Backed out changeset 510dd46a9de7 (bug 1696251) for causing Android build bustages.

This commit is contained in:
Butkovits Atila 2021-09-21 05:16:50 +03:00
parent 4b46b9b45b
commit c06a3dd9fa
67 changed files with 13662 additions and 13264 deletions

View File

@ -12,8 +12,10 @@ import os
from mach.decorators import (
Command,
CommandArgument,
CommandProvider,
)
from mozbuild.base import (
MachCommandBase,
MachCommandConditions as conditions,
BinaryNotFoundException,
)
@ -25,216 +27,218 @@ def is_valgrind_build(cls):
return "MOZ_VALGRIND" in defines and "MOZ_MEMORY" not in defines
@Command(
"valgrind-test",
category="testing",
conditions=[conditions.is_firefox_or_thunderbird, is_valgrind_build],
description="Run the Valgrind test job (memory-related errors).",
)
@CommandArgument(
"--suppressions",
default=[],
action="append",
metavar="FILENAME",
help="Specify a suppression file for Valgrind to use. Use "
"--suppression multiple times to specify multiple suppression "
"files.",
)
def valgrind_test(command_context, suppressions):
"""
Run Valgrind tests.
"""
@CommandProvider
class MachCommands(MachCommandBase):
@Command(
"valgrind-test",
category="testing",
conditions=[conditions.is_firefox_or_thunderbird, is_valgrind_build],
description="Run the Valgrind test job (memory-related errors).",
)
@CommandArgument(
"--suppressions",
default=[],
action="append",
metavar="FILENAME",
help="Specify a suppression file for Valgrind to use. Use "
"--suppression multiple times to specify multiple suppression "
"files.",
)
def valgrind_test(self, command_context, suppressions):
"""
Run Valgrind tests.
"""
from mozfile import TemporaryDirectory
from mozhttpd import MozHttpd
from mozprofile import FirefoxProfile, Preferences
from mozprofile.permissions import ServerLocations
from mozrunner import FirefoxRunner
from mozrunner.utils import findInPath
from six import string_types
from valgrind.output_handler import OutputHandler
from mozfile import TemporaryDirectory
from mozhttpd import MozHttpd
from mozprofile import FirefoxProfile, Preferences
from mozprofile.permissions import ServerLocations
from mozrunner import FirefoxRunner
from mozrunner.utils import findInPath
from six import string_types
from valgrind.output_handler import OutputHandler
build_dir = os.path.join(command_context.topsrcdir, "build")
build_dir = os.path.join(command_context.topsrcdir, "build")
# XXX: currently we just use the PGO inputs for Valgrind runs. This may
# change in the future.
httpd = MozHttpd(docroot=os.path.join(build_dir, "pgo"))
httpd.start(block=False)
# XXX: currently we just use the PGO inputs for Valgrind runs. This may
# change in the future.
httpd = MozHttpd(docroot=os.path.join(build_dir, "pgo"))
httpd.start(block=False)
with TemporaryDirectory() as profilePath:
# TODO: refactor this into mozprofile
profile_data_dir = os.path.join(
command_context.topsrcdir, "testing", "profiles"
)
with open(os.path.join(profile_data_dir, "profiles.json"), "r") as fh:
base_profiles = json.load(fh)["valgrind"]
prefpaths = [
os.path.join(profile_data_dir, profile, "user.js")
for profile in base_profiles
]
prefs = {}
for path in prefpaths:
prefs.update(Preferences.read_prefs(path))
interpolation = {
"server": "%s:%d" % httpd.httpd.server_address,
}
for k, v in prefs.items():
if isinstance(v, string_types):
v = v.format(**interpolation)
prefs[k] = Preferences.cast(v)
quitter = os.path.join(
command_context.topsrcdir, "tools", "quitter", "quitter@mozilla.org.xpi"
)
locations = ServerLocations()
locations.add_host(
host="127.0.0.1", port=httpd.httpd.server_port, options="primary"
)
profile = FirefoxProfile(
profile=profilePath,
preferences=prefs,
addons=[quitter],
locations=locations,
)
firefox_args = [httpd.get_url()]
env = os.environ.copy()
env["G_SLICE"] = "always-malloc"
env["MOZ_CC_RUN_DURING_SHUTDOWN"] = "1"
env["MOZ_CRASHREPORTER_NO_REPORT"] = "1"
env["MOZ_DISABLE_NONLOCAL_CONNECTIONS"] = "1"
env["XPCOM_DEBUG_BREAK"] = "warn"
outputHandler = OutputHandler(command_context.log)
kp_kwargs = {
"processOutputLine": [outputHandler],
"universal_newlines": True,
}
valgrind = "valgrind"
if not os.path.exists(valgrind):
valgrind = findInPath(valgrind)
valgrind_args = [
valgrind,
"--sym-offsets=yes",
"--smc-check=all-non-file",
"--vex-iropt-register-updates=allregs-at-mem-access",
"--gen-suppressions=all",
"--num-callers=36",
"--leak-check=full",
"--show-possibly-lost=no",
"--track-origins=yes",
"--trace-children=yes",
"-v", # Enable verbosity to get the list of used suppressions
# Avoid excessive delays in the presence of spinlocks.
# See bug 1309851.
"--fair-sched=yes",
# Keep debuginfo after library unmap. See bug 1382280.
"--keep-debuginfo=yes",
# Reduce noise level on rustc and/or LLVM compiled code.
# See bug 1365915
"--expensive-definedness-checks=yes",
# Compensate for the compiler inlining `new` but not `delete`
# or vice versa.
"--show-mismatched-frees=no",
]
for s in suppressions:
valgrind_args.append("--suppressions=" + s)
supps_dir = os.path.join(build_dir, "valgrind")
supps_file1 = os.path.join(supps_dir, "cross-architecture.sup")
valgrind_args.append("--suppressions=" + supps_file1)
if mozinfo.os == "linux":
machtype = {
"x86_64": "x86_64-pc-linux-gnu",
"x86": "i386-pc-linux-gnu",
}.get(mozinfo.processor)
if machtype:
supps_file2 = os.path.join(supps_dir, machtype + ".sup")
if os.path.isfile(supps_file2):
valgrind_args.append("--suppressions=" + supps_file2)
exitcode = None
timeout = 1800
binary_not_found_exception = None
try:
runner = FirefoxRunner(
profile=profile,
binary=command_context.get_binary_path(),
cmdargs=firefox_args,
env=env,
process_args=kp_kwargs,
with TemporaryDirectory() as profilePath:
# TODO: refactor this into mozprofile
profile_data_dir = os.path.join(
command_context.topsrcdir, "testing", "profiles"
)
runner.start(debug_args=valgrind_args)
exitcode = runner.wait(timeout=timeout)
except BinaryNotFoundException as e:
binary_not_found_exception = e
finally:
errs = outputHandler.error_count
supps = outputHandler.suppression_count
if errs != supps:
status = 1 # turns the TBPL job orange
command_context.log(
logging.ERROR,
"valgrind-fail-parsing",
{"errs": errs, "supps": supps},
"TEST-UNEXPECTED-FAIL | valgrind-test | error parsing: {errs} errors "
"seen, but {supps} generated suppressions seen",
)
with open(os.path.join(profile_data_dir, "profiles.json"), "r") as fh:
base_profiles = json.load(fh)["valgrind"]
elif errs == 0:
status = 0
command_context.log(
logging.INFO,
"valgrind-pass",
{},
"TEST-PASS | valgrind-test | valgrind found no errors",
)
else:
status = 1 # turns the TBPL job orange
# We've already printed details of the errors.
prefpaths = [
os.path.join(profile_data_dir, profile, "user.js")
for profile in base_profiles
]
prefs = {}
for path in prefpaths:
prefs.update(Preferences.read_prefs(path))
if binary_not_found_exception:
status = 2 # turns the TBPL job red
command_context.log(
logging.ERROR,
"valgrind-fail-errors",
{"error": str(binary_not_found_exception)},
"TEST-UNEXPECTED-FAIL | valgrind-test | {error}",
)
command_context.log(
logging.INFO,
"valgrind-fail-errors",
{"help": binary_not_found_exception.help()},
"{help}",
)
elif exitcode is None:
status = 2 # turns the TBPL job red
command_context.log(
logging.ERROR,
"valgrind-fail-timeout",
{"timeout": timeout},
"TEST-UNEXPECTED-FAIL | valgrind-test | Valgrind timed out "
"(reached {timeout} second limit)",
)
elif exitcode != 0:
status = 2 # turns the TBPL job red
command_context.log(
logging.ERROR,
"valgrind-fail-errors",
{"exitcode": exitcode},
"TEST-UNEXPECTED-FAIL | valgrind-test | non-zero exit code "
"from Valgrind: {exitcode}",
)
interpolation = {
"server": "%s:%d" % httpd.httpd.server_address,
}
for k, v in prefs.items():
if isinstance(v, string_types):
v = v.format(**interpolation)
prefs[k] = Preferences.cast(v)
httpd.stop()
quitter = os.path.join(
command_context.topsrcdir, "tools", "quitter", "quitter@mozilla.org.xpi"
)
return status
locations = ServerLocations()
locations.add_host(
host="127.0.0.1", port=httpd.httpd.server_port, options="primary"
)
profile = FirefoxProfile(
profile=profilePath,
preferences=prefs,
addons=[quitter],
locations=locations,
)
firefox_args = [httpd.get_url()]
env = os.environ.copy()
env["G_SLICE"] = "always-malloc"
env["MOZ_CC_RUN_DURING_SHUTDOWN"] = "1"
env["MOZ_CRASHREPORTER_NO_REPORT"] = "1"
env["MOZ_DISABLE_NONLOCAL_CONNECTIONS"] = "1"
env["XPCOM_DEBUG_BREAK"] = "warn"
outputHandler = OutputHandler(command_context.log)
kp_kwargs = {
"processOutputLine": [outputHandler],
"universal_newlines": True,
}
valgrind = "valgrind"
if not os.path.exists(valgrind):
valgrind = findInPath(valgrind)
valgrind_args = [
valgrind,
"--sym-offsets=yes",
"--smc-check=all-non-file",
"--vex-iropt-register-updates=allregs-at-mem-access",
"--gen-suppressions=all",
"--num-callers=36",
"--leak-check=full",
"--show-possibly-lost=no",
"--track-origins=yes",
"--trace-children=yes",
"-v", # Enable verbosity to get the list of used suppressions
# Avoid excessive delays in the presence of spinlocks.
# See bug 1309851.
"--fair-sched=yes",
# Keep debuginfo after library unmap. See bug 1382280.
"--keep-debuginfo=yes",
# Reduce noise level on rustc and/or LLVM compiled code.
# See bug 1365915
"--expensive-definedness-checks=yes",
# Compensate for the compiler inlining `new` but not `delete`
# or vice versa.
"--show-mismatched-frees=no",
]
for s in suppressions:
valgrind_args.append("--suppressions=" + s)
supps_dir = os.path.join(build_dir, "valgrind")
supps_file1 = os.path.join(supps_dir, "cross-architecture.sup")
valgrind_args.append("--suppressions=" + supps_file1)
if mozinfo.os == "linux":
machtype = {
"x86_64": "x86_64-pc-linux-gnu",
"x86": "i386-pc-linux-gnu",
}.get(mozinfo.processor)
if machtype:
supps_file2 = os.path.join(supps_dir, machtype + ".sup")
if os.path.isfile(supps_file2):
valgrind_args.append("--suppressions=" + supps_file2)
exitcode = None
timeout = 1800
binary_not_found_exception = None
try:
runner = FirefoxRunner(
profile=profile,
binary=command_context.get_binary_path(),
cmdargs=firefox_args,
env=env,
process_args=kp_kwargs,
)
runner.start(debug_args=valgrind_args)
exitcode = runner.wait(timeout=timeout)
except BinaryNotFoundException as e:
binary_not_found_exception = e
finally:
errs = outputHandler.error_count
supps = outputHandler.suppression_count
if errs != supps:
status = 1 # turns the TBPL job orange
command_context.log(
logging.ERROR,
"valgrind-fail-parsing",
{"errs": errs, "supps": supps},
"TEST-UNEXPECTED-FAIL | valgrind-test | error parsing: {errs} errors "
"seen, but {supps} generated suppressions seen",
)
elif errs == 0:
status = 0
command_context.log(
logging.INFO,
"valgrind-pass",
{},
"TEST-PASS | valgrind-test | valgrind found no errors",
)
else:
status = 1 # turns the TBPL job orange
# We've already printed details of the errors.
if binary_not_found_exception:
status = 2 # turns the TBPL job red
command_context.log(
logging.ERROR,
"valgrind-fail-errors",
{"error": str(binary_not_found_exception)},
"TEST-UNEXPECTED-FAIL | valgrind-test | {error}",
)
command_context.log(
logging.INFO,
"valgrind-fail-errors",
{"help": binary_not_found_exception.help()},
"{help}",
)
elif exitcode is None:
status = 2 # turns the TBPL job red
command_context.log(
logging.ERROR,
"valgrind-fail-timeout",
{"timeout": timeout},
"TEST-UNEXPECTED-FAIL | valgrind-test | Valgrind timed out "
"(reached {timeout} second limit)",
)
elif exitcode != 0:
status = 2 # turns the TBPL job red
command_context.log(
logging.ERROR,
"valgrind-fail-errors",
{"exitcode": exitcode},
"TEST-UNEXPECTED-FAIL | valgrind-test | non-zero exit code "
"from Valgrind: {exitcode}",
)
httpd.stop()
return status

View File

@ -19,9 +19,11 @@ import subprocess
from mozbuild import shellutil
from mozbuild.base import (
MozbuildObject,
MachCommandBase,
BinaryNotFoundException,
)
from mach.decorators import (
CommandProvider,
Command,
)
@ -36,87 +38,87 @@ def stringify(obj):
return json.dumps(obj, sort_keys=True, indent=2, separators=(",", ": "))
@Command(
"devtools-css-db",
category="post-build",
description="Rebuild the devtool's static css properties database.",
)
def generate_css_db(command_context):
"""Generate the static css properties database for devtools and write it to file."""
print("Re-generating the css properties database...")
db = get_properties_db_from_xpcshell(command_context)
if not db:
return 1
output_template(
command_context,
{
"preferences": stringify(db["preferences"]),
"cssProperties": stringify(db["cssProperties"]),
"pseudoElements": stringify(db["pseudoElements"]),
},
@CommandProvider
class MachCommands(MachCommandBase):
@Command(
"devtools-css-db",
category="post-build",
description="Rebuild the devtool's static css properties database.",
)
def generate_css_db(self, command_context):
"""Generate the static css properties database for devtools and write it to file."""
print("Re-generating the css properties database...")
db = self.get_properties_db_from_xpcshell(command_context)
if not db:
return 1
def get_properties_db_from_xpcshell(command_context):
"""Generate the static css properties db for devtools from an xpcshell script."""
build = MozbuildObject.from_environment()
# Get the paths
script_path = resolve_path(
command_context.topsrcdir,
"devtools/shared/css/generated/generate-properties-db.js",
)
gre_path = resolve_path(command_context.topobjdir, "dist/bin")
browser_path = resolve_path(command_context.topobjdir, "dist/bin/browser")
try:
xpcshell_path = build.get_binary_path(what="xpcshell")
except BinaryNotFoundException as e:
command_context.log(
logging.ERROR, "devtools-css-db", {"error": str(e)}, "ERROR: {error}"
self.output_template(
command_context,
{
"preferences": stringify(db["preferences"]),
"cssProperties": stringify(db["cssProperties"]),
"pseudoElements": stringify(db["pseudoElements"]),
},
)
command_context.log(
logging.INFO, "devtools-css-db", {"help": e.help()}, "{help}"
def get_properties_db_from_xpcshell(self, command_context):
"""Generate the static css properties db for devtools from an xpcshell script."""
build = MozbuildObject.from_environment()
# Get the paths
script_path = resolve_path(
command_context.topsrcdir,
"devtools/shared/css/generated/generate-properties-db.js",
)
return None
gre_path = resolve_path(command_context.topobjdir, "dist/bin")
browser_path = resolve_path(command_context.topobjdir, "dist/bin/browser")
try:
xpcshell_path = build.get_binary_path(what="xpcshell")
except BinaryNotFoundException as e:
command_context.log(
logging.ERROR, "devtools-css-db", {"error": str(e)}, "ERROR: {error}"
)
command_context.log(
logging.INFO, "devtools-css-db", {"help": e.help()}, "{help}"
)
return None
print(browser_path)
print(browser_path)
sub_env = dict(os.environ)
if sys.platform.startswith("linux"):
sub_env["LD_LIBRARY_PATH"] = gre_path
sub_env = dict(os.environ)
if sys.platform.startswith("linux"):
sub_env["LD_LIBRARY_PATH"] = gre_path
# Run the xcpshell script, and set the appdir flag to the browser path so that
# we have the proper dependencies for requiring the loader.
contents = subprocess.check_output(
[xpcshell_path, "-g", gre_path, "-a", browser_path, script_path],
env=sub_env,
)
# Extract just the output between the delimiters as the xpcshell output can
# have extra output that we don't want.
contents = contents.decode().split("DEVTOOLS_CSS_DB_DELIMITER")[1]
# Run the xcpshell script, and set the appdir flag to the browser path so that
# we have the proper dependencies for requiring the loader.
contents = subprocess.check_output(
[xpcshell_path, "-g", gre_path, "-a", browser_path, script_path],
env=sub_env,
)
# Extract just the output between the delimiters as the xpcshell output can
# have extra output that we don't want.
contents = contents.decode().split("DEVTOOLS_CSS_DB_DELIMITER")[1]
return json.loads(contents)
return json.loads(contents)
def output_template(self, command_context, substitutions):
"""Output a the properties-db.js from a template."""
js_template_path = resolve_path(
command_context.topsrcdir,
"devtools/shared/css/generated/properties-db.js.in",
)
destination_path = resolve_path(
command_context.topsrcdir, "devtools/shared/css/generated/properties-db.js"
)
def output_template(command_context, substitutions):
"""Output a the properties-db.js from a template."""
js_template_path = resolve_path(
command_context.topsrcdir,
"devtools/shared/css/generated/properties-db.js.in",
)
destination_path = resolve_path(
command_context.topsrcdir, "devtools/shared/css/generated/properties-db.js"
)
with open(js_template_path, "rb") as handle:
js_template = handle.read().decode()
with open(js_template_path, "rb") as handle:
js_template = handle.read().decode()
preamble = "/* THIS IS AN AUTOGENERATED FILE. DO NOT EDIT */\n\n"
contents = string.Template(js_template).substitute(substitutions)
preamble = "/* THIS IS AN AUTOGENERATED FILE. DO NOT EDIT */\n\n"
contents = string.Template(js_template).substitute(substitutions)
with open(destination_path, "wb") as destination:
destination.write(preamble.encode() + contents.encode())
with open(destination_path, "wb") as destination:
destination.write(preamble.encode() + contents.encode())
print("The database was successfully generated at " + destination_path)
print("The database was successfully generated at " + destination_path)

View File

@ -9,9 +9,11 @@ import sys
from mach.decorators import (
CommandArgument,
CommandProvider,
Command,
)
from mozbuild.base import MachCommandBase
from mozbuild.util import mkdir
@ -21,48 +23,51 @@ def get_test_parser():
return runtests.get_parser
@Command(
"webidl-example",
category="misc",
description="Generate example files for a WebIDL interface.",
)
@CommandArgument(
"interface", nargs="+", help="Interface(s) whose examples to generate."
)
def webidl_example(command_context, interface):
from mozwebidlcodegen import BuildSystemWebIDL
@CommandProvider
class WebIDLProvider(MachCommandBase):
@Command(
"webidl-example",
category="misc",
description="Generate example files for a WebIDL interface.",
)
@CommandArgument(
"interface", nargs="+", help="Interface(s) whose examples to generate."
)
def webidl_example(self, command_context, interface):
from mozwebidlcodegen import BuildSystemWebIDL
manager = command_context._spawn(BuildSystemWebIDL).manager
for i in interface:
manager.generate_example_files(i)
manager = command_context._spawn(BuildSystemWebIDL).manager
for i in interface:
manager.generate_example_files(i)
@Command(
"webidl-parser-test",
category="testing",
parser=get_test_parser,
description="Run WebIDL tests (Interface Browser parser).",
)
def webidl_test(self, command_context, **kwargs):
sys.path.insert(
0, os.path.join(command_context.topsrcdir, "other-licenses", "ply")
)
@Command(
"webidl-parser-test",
category="testing",
parser=get_test_parser,
description="Run WebIDL tests (Interface Browser parser).",
)
def webidl_test(command_context, **kwargs):
sys.path.insert(0, os.path.join(command_context.topsrcdir, "other-licenses", "ply"))
# Ensure the topobjdir exists. On a Taskcluster test run there won't be
# an objdir yet.
mkdir(command_context.topobjdir)
# Ensure the topobjdir exists. On a Taskcluster test run there won't be
# an objdir yet.
mkdir(command_context.topobjdir)
# Make sure we drop our cached grammar bits in the objdir, not
# wherever we happen to be running from.
os.chdir(command_context.topobjdir)
# Make sure we drop our cached grammar bits in the objdir, not
# wherever we happen to be running from.
os.chdir(command_context.topobjdir)
if kwargs["verbose"] is None:
kwargs["verbose"] = False
if kwargs["verbose"] is None:
kwargs["verbose"] = False
# Now we're going to create the cached grammar file in the
# objdir. But we're going to try loading it as a python
# module, so we need to make sure the objdir is in our search
# path.
sys.path.insert(0, command_context.topobjdir)
# Now we're going to create the cached grammar file in the
# objdir. But we're going to try loading it as a python
# module, so we need to make sure the objdir is in our search
# path.
sys.path.insert(0, command_context.topobjdir)
import runtests
import runtests
return runtests.run_tests(kwargs["tests"], verbose=kwargs["verbose"])
return runtests.run_tests(kwargs["tests"], verbose=kwargs["verbose"])

View File

@ -16,12 +16,14 @@ import textwrap
from mach.base import FailedCommandError, MachError
from mach.decorators import (
CommandArgument,
CommandProvider,
Command,
SubCommand,
)
from mach.registrar import Registrar
from mozbuild.mozconfig import MozconfigLoader
from mozbuild.base import MachCommandBase
# Command files like this are listed in build/mach_initialize.py in alphabetical
# order, but we need to access commands earlier in the sorted order to grab
@ -55,365 +57,355 @@ def inherit_command_args(command, subcommand=None):
return inherited
def state_dir():
return os.environ.get("MOZBUILD_STATE_PATH", os.path.expanduser("~/.mozbuild"))
@CommandProvider
class MachCommands(MachCommandBase):
def state_dir(self):
return os.environ.get("MOZBUILD_STATE_PATH", os.path.expanduser("~/.mozbuild"))
def tools_dir(self):
if os.environ.get("MOZ_FETCHES_DIR"):
# In automation, tools are provided by toolchain dependencies.
return os.path.join(os.environ["HOME"], os.environ["MOZ_FETCHES_DIR"])
def tools_dir():
if os.environ.get("MOZ_FETCHES_DIR"):
# In automation, tools are provided by toolchain dependencies.
return os.path.join(os.environ["HOME"], os.environ["MOZ_FETCHES_DIR"])
# In development, `mach hazard bootstrap` installs the tools separately
# to avoid colliding with the "main" compiler versions, which can
# change separately (and the precompiled sixgill and compiler version
# must match exactly).
return os.path.join(self.state_dir(), "hazard-tools")
# In development, `mach hazard bootstrap` installs the tools separately
# to avoid colliding with the "main" compiler versions, which can
# change separately (and the precompiled sixgill and compiler version
# must match exactly).
return os.path.join(state_dir(), "hazard-tools")
def sixgill_dir(self):
return os.path.join(self.tools_dir(), "sixgill")
def gcc_dir(self):
return os.path.join(self.tools_dir(), "gcc")
def sixgill_dir():
return os.path.join(tools_dir(), "sixgill")
def script_dir(self, command_context):
return os.path.join(command_context.topsrcdir, "js/src/devtools/rootAnalysis")
def get_work_dir(self, command_context, application, given):
if given is not None:
return given
return os.path.join(command_context.topsrcdir, "haz-" + application)
def gcc_dir():
return os.path.join(tools_dir(), "gcc")
def ensure_dir_exists(self, dir):
os.makedirs(dir, exist_ok=True)
return dir
def script_dir(command_context):
return os.path.join(command_context.topsrcdir, "js/src/devtools/rootAnalysis")
def get_work_dir(command_context, application, given):
if given is not None:
return given
return os.path.join(command_context.topsrcdir, "haz-" + application)
def ensure_dir_exists(dir):
os.makedirs(dir, exist_ok=True)
return dir
# Force the use of hazard-compatible installs of tools.
def setup_env_for_tools(env):
gccbin = os.path.join(gcc_dir(), "bin")
env["CC"] = os.path.join(gccbin, "gcc")
env["CXX"] = os.path.join(gccbin, "g++")
env["PATH"] = "{sixgill_dir}/usr/bin:{gccbin}:{PATH}".format(
sixgill_dir=sixgill_dir(), gccbin=gccbin, PATH=env["PATH"]
)
def setup_env_for_shell(env, shell):
"""Add JS shell directory to dynamic lib search path"""
for var in ("LD_LIBRARY_PATH", "DYLD_LIBRARY_PATH"):
env[var] = ":".join(p for p in (env.get(var), os.path.dirname(shell)) if p)
@Command(
"hazards",
category="build",
order="declaration",
description="Commands for running the static analysis for GC rooting hazards",
)
def hazards(command_context):
"""Commands related to performing the GC rooting hazard analysis"""
print("See `mach hazards --help` for a list of subcommands")
@inherit_command_args("artifact", "toolchain")
@SubCommand(
"hazards",
"bootstrap",
description="Install prerequisites for the hazard analysis",
)
def bootstrap(command_context, **kwargs):
orig_dir = os.getcwd()
os.chdir(ensure_dir_exists(tools_dir()))
try:
kwargs["from_build"] = ("linux64-gcc-sixgill", "linux64-gcc-9")
command_context._mach_context.commands.dispatch(
"artifact", command_context._mach_context, subcommand="toolchain", **kwargs
)
finally:
os.chdir(orig_dir)
@inherit_command_args("build")
@SubCommand(
"hazards", "build-shell", description="Build a shell for the hazard analysis"
)
@CommandArgument(
"--mozconfig",
default=None,
metavar="FILENAME",
help="Build with the given mozconfig.",
)
def build_shell(command_context, **kwargs):
"""Build a JS shell to use to run the rooting hazard analysis."""
# The JS shell requires some specific configuration settings to execute
# the hazard analysis code, and configuration is done via mozconfig.
# Subprocesses find MOZCONFIG in the environment, so we can't just
# modify the settings in this process's loaded version. Pass it through
# the environment.
default_mozconfig = "js/src/devtools/rootAnalysis/mozconfig.haz_shell"
mozconfig_path = (
kwargs.pop("mozconfig", None)
or os.environ.get("MOZCONFIG")
or default_mozconfig
)
mozconfig_path = os.path.join(command_context.topsrcdir, mozconfig_path)
loader = MozconfigLoader(command_context.topsrcdir)
mozconfig = loader.read_mozconfig(mozconfig_path)
# Validate the mozconfig settings in case the user overrode the default.
configure_args = mozconfig["configure_args"]
if "--enable-ctypes" not in configure_args:
raise FailedCommandError(
"ctypes required in hazard JS shell, mozconfig=" + mozconfig_path
# Force the use of hazard-compatible installs of tools.
def setup_env_for_tools(self, env):
gccbin = os.path.join(self.gcc_dir(), "bin")
env["CC"] = os.path.join(gccbin, "gcc")
env["CXX"] = os.path.join(gccbin, "g++")
env["PATH"] = "{sixgill_dir}/usr/bin:{gccbin}:{PATH}".format(
sixgill_dir=self.sixgill_dir(), gccbin=gccbin, PATH=env["PATH"]
)
# Transmit the mozconfig location to build subprocesses.
os.environ["MOZCONFIG"] = mozconfig_path
def setup_env_for_shell(self, env, shell):
"""Add JS shell directory to dynamic lib search path"""
for var in ("LD_LIBRARY_PATH", "DYLD_LIBRARY_PATH"):
env[var] = ":".join(p for p in (env.get(var), os.path.dirname(shell)) if p)
setup_env_for_tools(os.environ)
# Set a default objdir for the shell, for developer builds.
os.environ.setdefault(
"MOZ_OBJDIR", os.path.join(command_context.topsrcdir, "obj-haz-shell")
@Command(
"hazards",
category="build",
order="declaration",
description="Commands for running the static analysis for GC rooting hazards",
)
def hazards(self, command_context):
"""Commands related to performing the GC rooting hazard analysis"""
print("See `mach hazards --help` for a list of subcommands")
return command_context._mach_context.commands.dispatch(
"build", command_context._mach_context, **kwargs
@inherit_command_args("artifact", "toolchain")
@SubCommand(
"hazards",
"bootstrap",
description="Install prerequisites for the hazard analysis",
)
def bootstrap(self, command_context, **kwargs):
orig_dir = os.getcwd()
os.chdir(self.ensure_dir_exists(self.tools_dir()))
try:
kwargs["from_build"] = ("linux64-gcc-sixgill", "linux64-gcc-9")
command_context._mach_context.commands.dispatch(
"artifact",
command_context._mach_context,
subcommand="toolchain",
**kwargs
)
finally:
os.chdir(orig_dir)
@inherit_command_args("build")
@SubCommand(
"hazards", "build-shell", description="Build a shell for the hazard analysis"
)
@CommandArgument(
"--mozconfig",
default=None,
metavar="FILENAME",
help="Build with the given mozconfig.",
)
def build_shell(self, command_context, **kwargs):
"""Build a JS shell to use to run the rooting hazard analysis."""
# The JS shell requires some specific configuration settings to execute
# the hazard analysis code, and configuration is done via mozconfig.
# Subprocesses find MOZCONFIG in the environment, so we can't just
# modify the settings in this process's loaded version. Pass it through
# the environment.
def read_json_file(filename):
with open(filename) as fh:
return json.load(fh)
default_mozconfig = "js/src/devtools/rootAnalysis/mozconfig.haz_shell"
mozconfig_path = (
kwargs.pop("mozconfig", None)
or os.environ.get("MOZCONFIG")
or default_mozconfig
)
mozconfig_path = os.path.join(command_context.topsrcdir, mozconfig_path)
loader = MozconfigLoader(command_context.topsrcdir)
mozconfig = loader.read_mozconfig(mozconfig_path)
# Validate the mozconfig settings in case the user overrode the default.
configure_args = mozconfig["configure_args"]
if "--enable-ctypes" not in configure_args:
raise FailedCommandError(
"ctypes required in hazard JS shell, mozconfig=" + mozconfig_path
)
def ensure_shell(command_context, objdir):
if objdir is None:
objdir = os.path.join(command_context.topsrcdir, "obj-haz-shell")
# Transmit the mozconfig location to build subprocesses.
os.environ["MOZCONFIG"] = mozconfig_path
try:
binaries = read_json_file(os.path.join(objdir, "binaries.json"))
info = [b for b in binaries["programs"] if b["program"] == "js"][0]
return os.path.join(objdir, info["install_target"], "js")
except (OSError, KeyError):
raise FailedCommandError(
"""\
self.setup_env_for_tools(os.environ)
# Set a default objdir for the shell, for developer builds.
os.environ.setdefault(
"MOZ_OBJDIR", os.path.join(command_context.topsrcdir, "obj-haz-shell")
)
return command_context._mach_context.commands.dispatch(
"build", command_context._mach_context, **kwargs
)
def read_json_file(self, filename):
with open(filename) as fh:
return json.load(fh)
def ensure_shell(self, command_context, objdir):
if objdir is None:
objdir = os.path.join(command_context.topsrcdir, "obj-haz-shell")
try:
binaries = self.read_json_file(os.path.join(objdir, "binaries.json"))
info = [b for b in binaries["programs"] if b["program"] == "js"][0]
return os.path.join(objdir, info["install_target"], "js")
except (OSError, KeyError):
raise FailedCommandError(
"""\
no shell found in %s -- must build the JS shell with `mach hazards build-shell` first"""
% objdir
)
% objdir
)
@inherit_command_args("build")
@SubCommand(
"hazards",
"gather",
description="Gather analysis data by compiling the given application",
)
@CommandArgument(
"--application", default="browser", help="Build the given application."
)
@CommandArgument(
"--haz-objdir", default=None, help="Write object files to this directory."
)
@CommandArgument(
"--work-dir", default=None, help="Directory for output and working files."
)
def gather_hazard_data(command_context, **kwargs):
"""Gather analysis information by compiling the tree"""
application = kwargs["application"]
objdir = kwargs["haz_objdir"]
if objdir is None:
objdir = os.environ.get("HAZ_OBJDIR")
if objdir is None:
objdir = os.path.join(command_context.topsrcdir, "obj-analyzed-" + application)
work_dir = get_work_dir(command_context, application, kwargs["work_dir"])
ensure_dir_exists(work_dir)
with open(os.path.join(work_dir, "defaults.py"), "wt") as fh:
data = textwrap.dedent(
"""\
analysis_scriptdir = "{script_dir}"
objdir = "{objdir}"
source = "{srcdir}"
sixgill = "{sixgill_dir}/usr/libexec/sixgill"
sixgill_bin = "{sixgill_dir}/usr/bin"
gcc_bin = "{gcc_dir}/bin"
"""
).format(
script_dir=script_dir(command_context),
objdir=objdir,
srcdir=command_context.topsrcdir,
sixgill_dir=sixgill_dir(),
gcc_dir=gcc_dir(),
)
fh.write(data)
buildscript = " ".join(
[
command_context.topsrcdir + "/mach hazards compile",
"--job-size=3.0", # Conservatively estimate 3GB/process
"--application=" + application,
"--haz-objdir=" + objdir,
]
@inherit_command_args("build")
@SubCommand(
"hazards",
"gather",
description="Gather analysis data by compiling the given application",
)
args = [
sys.executable,
os.path.join(script_dir(command_context), "analyze.py"),
"dbs",
"--upto",
"dbs",
"-v",
"--buildcommand=" + buildscript,
]
@CommandArgument(
"--application", default="browser", help="Build the given application."
)
@CommandArgument(
"--haz-objdir", default=None, help="Write object files to this directory."
)
@CommandArgument(
"--work-dir", default=None, help="Directory for output and working files."
)
def gather_hazard_data(self, command_context, **kwargs):
"""Gather analysis information by compiling the tree"""
application = kwargs["application"]
objdir = kwargs["haz_objdir"]
if objdir is None:
objdir = os.environ.get("HAZ_OBJDIR")
if objdir is None:
objdir = os.path.join(
command_context.topsrcdir, "obj-analyzed-" + application
)
return command_context.run_process(args=args, cwd=work_dir, pass_thru=True)
work_dir = self.get_work_dir(command_context, application, kwargs["work_dir"])
self.ensure_dir_exists(work_dir)
with open(os.path.join(work_dir, "defaults.py"), "wt") as fh:
data = textwrap.dedent(
"""\
analysis_scriptdir = "{script_dir}"
objdir = "{objdir}"
source = "{srcdir}"
sixgill = "{sixgill_dir}/usr/libexec/sixgill"
sixgill_bin = "{sixgill_dir}/usr/bin"
gcc_bin = "{gcc_dir}/bin"
"""
).format(
script_dir=self.script_dir(command_context),
objdir=objdir,
srcdir=command_context.topsrcdir,
sixgill_dir=self.sixgill_dir(),
gcc_dir=self.gcc_dir(),
)
fh.write(data)
@inherit_command_args("build")
@SubCommand("hazards", "compile", description=argparse.SUPPRESS)
@CommandArgument(
"--mozconfig",
default=None,
metavar="FILENAME",
help="Build with the given mozconfig.",
)
@CommandArgument(
"--application", default="browser", help="Build the given application."
)
@CommandArgument(
"--haz-objdir",
default=os.environ.get("HAZ_OBJDIR"),
help="Write object files to this directory.",
)
def inner_compile(command_context, **kwargs):
"""Build a source tree and gather analysis information while running
under the influence of the analysis collection server."""
env = os.environ
# Check whether we are running underneath the manager (and therefore
# have a server to talk to).
if "XGILL_CONFIG" not in env:
raise Exception(
"no sixgill manager detected. `mach hazards compile` "
+ "should only be run from `mach hazards gather`"
buildscript = " ".join(
[
command_context.topsrcdir + "/mach hazards compile",
"--job-size=3.0", # Conservatively estimate 3GB/process
"--application=" + application,
"--haz-objdir=" + objdir,
]
)
app = kwargs.pop("application")
default_mozconfig = "js/src/devtools/rootAnalysis/mozconfig.%s" % app
mozconfig_path = (
kwargs.pop("mozconfig", None) or env.get("MOZCONFIG") or default_mozconfig
)
mozconfig_path = os.path.join(command_context.topsrcdir, mozconfig_path)
# Validate the mozconfig.
# Require an explicit --enable-application=APP (even if you just
# want to build the default browser application.)
loader = MozconfigLoader(command_context.topsrcdir)
mozconfig = loader.read_mozconfig(mozconfig_path)
configure_args = mozconfig["configure_args"]
if "--enable-application=%s" % app not in configure_args:
raise Exception("mozconfig %s builds wrong project" % mozconfig_path)
if not any("--with-compiler-wrapper" in a for a in configure_args):
raise Exception("mozconfig must wrap compiles")
# Communicate mozconfig to build subprocesses.
env["MOZCONFIG"] = os.path.join(command_context.topsrcdir, mozconfig_path)
# hazard mozconfigs need to find binaries in .mozbuild
env["MOZBUILD_STATE_PATH"] = state_dir()
# Suppress the gathering of sources, to save disk space and memory.
env["XGILL_NO_SOURCE"] = "1"
setup_env_for_tools(env)
if "haz_objdir" in kwargs:
env["MOZ_OBJDIR"] = kwargs.pop("haz_objdir")
return command_context._mach_context.commands.dispatch(
"build", command_context._mach_context, **kwargs
)
@SubCommand(
"hazards", "analyze", description="Analyzed gathered data for rooting hazards"
)
@CommandArgument(
"--application",
default="browser",
help="Analyze the output for the given application.",
)
@CommandArgument(
"--shell-objdir",
default=None,
help="objdir containing the optimized JS shell for running the analysis.",
)
@CommandArgument(
"--work-dir", default=None, help="Directory for output and working files."
)
@CommandArgument(
"extra",
nargs=argparse.REMAINDER,
help="Remaining non-optional arguments to analyze.py script",
)
def analyze(command_context, application, shell_objdir, work_dir, extra):
"""Analyzed gathered data for rooting hazards"""
shell = ensure_shell(command_context, shell_objdir)
args = [
os.path.join(script_dir(command_context), "analyze.py"),
"--js",
shell,
]
if extra:
args += extra
else:
args += [
"gcTypes",
args = [
sys.executable,
os.path.join(self.script_dir(command_context), "analyze.py"),
"dbs",
"--upto",
"dbs",
"-v",
"--buildcommand=" + buildscript,
]
setup_env_for_tools(os.environ)
setup_env_for_shell(os.environ, shell)
return command_context.run_process(args=args, cwd=work_dir, pass_thru=True)
work_dir = get_work_dir(command_context, application, work_dir)
return command_context.run_process(args=args, cwd=work_dir, pass_thru=True)
@inherit_command_args("build")
@SubCommand("hazards", "compile", description=argparse.SUPPRESS)
@CommandArgument(
"--mozconfig",
default=None,
metavar="FILENAME",
help="Build with the given mozconfig.",
)
@CommandArgument(
"--application", default="browser", help="Build the given application."
)
@CommandArgument(
"--haz-objdir",
default=os.environ.get("HAZ_OBJDIR"),
help="Write object files to this directory.",
)
def inner_compile(self, command_context, **kwargs):
"""Build a source tree and gather analysis information while running
under the influence of the analysis collection server."""
env = os.environ
@SubCommand(
"hazards",
"self-test",
description="Run a self-test to verify hazards are detected",
)
@CommandArgument(
"--shell-objdir",
default=None,
help="objdir containing the optimized JS shell for running the analysis.",
)
def self_test(command_context, shell_objdir):
"""Analyzed gathered data for rooting hazards"""
shell = ensure_shell(command_context, shell_objdir)
args = [
os.path.join(script_dir(command_context), "run-test.py"),
"-v",
"--js",
shell,
"--sixgill",
os.path.join(tools_dir(), "sixgill"),
"--gccdir",
gcc_dir(),
]
# Check whether we are running underneath the manager (and therefore
# have a server to talk to).
if "XGILL_CONFIG" not in env:
raise Exception(
"no sixgill manager detected. `mach hazards compile` "
+ "should only be run from `mach hazards gather`"
)
setup_env_for_tools(os.environ)
setup_env_for_shell(os.environ, shell)
app = kwargs.pop("application")
default_mozconfig = "js/src/devtools/rootAnalysis/mozconfig.%s" % app
mozconfig_path = (
kwargs.pop("mozconfig", None) or env.get("MOZCONFIG") or default_mozconfig
)
mozconfig_path = os.path.join(command_context.topsrcdir, mozconfig_path)
return command_context.run_process(args=args, pass_thru=True)
# Validate the mozconfig.
# Require an explicit --enable-application=APP (even if you just
# want to build the default browser application.)
loader = MozconfigLoader(command_context.topsrcdir)
mozconfig = loader.read_mozconfig(mozconfig_path)
configure_args = mozconfig["configure_args"]
if "--enable-application=%s" % app not in configure_args:
raise Exception("mozconfig %s builds wrong project" % mozconfig_path)
if not any("--with-compiler-wrapper" in a for a in configure_args):
raise Exception("mozconfig must wrap compiles")
# Communicate mozconfig to build subprocesses.
env["MOZCONFIG"] = os.path.join(command_context.topsrcdir, mozconfig_path)
# hazard mozconfigs need to find binaries in .mozbuild
env["MOZBUILD_STATE_PATH"] = self.state_dir()
# Suppress the gathering of sources, to save disk space and memory.
env["XGILL_NO_SOURCE"] = "1"
self.setup_env_for_tools(env)
if "haz_objdir" in kwargs:
env["MOZ_OBJDIR"] = kwargs.pop("haz_objdir")
return command_context._mach_context.commands.dispatch(
"build", command_context._mach_context, **kwargs
)
@SubCommand(
"hazards", "analyze", description="Analyzed gathered data for rooting hazards"
)
@CommandArgument(
"--application",
default="browser",
help="Analyze the output for the given application.",
)
@CommandArgument(
"--shell-objdir",
default=None,
help="objdir containing the optimized JS shell for running the analysis.",
)
@CommandArgument(
"--work-dir", default=None, help="Directory for output and working files."
)
@CommandArgument(
"extra",
nargs=argparse.REMAINDER,
help="Remaining non-optional arguments to analyze.py script",
)
def analyze(self, command_context, application, shell_objdir, work_dir, extra):
"""Analyzed gathered data for rooting hazards"""
shell = self.ensure_shell(command_context, shell_objdir)
args = [
os.path.join(self.script_dir(command_context), "analyze.py"),
"--js",
shell,
]
if extra:
args += extra
else:
args += [
"gcTypes",
"-v",
]
self.setup_env_for_tools(os.environ)
self.setup_env_for_shell(os.environ, shell)
work_dir = self.get_work_dir(command_context, application, work_dir)
return command_context.run_process(args=args, cwd=work_dir, pass_thru=True)
@SubCommand(
"hazards",
"self-test",
description="Run a self-test to verify hazards are detected",
)
@CommandArgument(
"--shell-objdir",
default=None,
help="objdir containing the optimized JS shell for running the analysis.",
)
def self_test(self, command_context, shell_objdir):
"""Analyzed gathered data for rooting hazards"""
shell = self.ensure_shell(command_context, shell_objdir)
args = [
os.path.join(self.script_dir(command_context), "run-test.py"),
"-v",
"--js",
shell,
"--sixgill",
os.path.join(self.tools_dir(), "sixgill"),
"--gccdir",
self.gcc_dir(),
]
self.setup_env_for_tools(os.environ)
self.setup_env_for_shell(os.environ, shell)
return command_context.run_process(args=args, pass_thru=True)

View File

@ -10,11 +10,13 @@ import sys
from argparse import Namespace
from mozbuild.base import (
MachCommandBase,
MachCommandConditions as conditions,
MozbuildObject,
)
from mach.decorators import (
CommandProvider,
Command,
)
@ -226,76 +228,77 @@ def get_parser():
return parser
@Command(
"reftest",
category="testing",
description="Run reftests (layout and graphics correctness).",
parser=get_parser,
)
def run_reftest(command_context, **kwargs):
kwargs["suite"] = "reftest"
return _run_reftest(command_context, **kwargs)
@Command(
"jstestbrowser",
category="testing",
description="Run js/src/tests in the browser.",
parser=get_parser,
)
def run_jstestbrowser(command_context, **kwargs):
if "--enable-js-shell" not in command_context.mozconfig["configure_args"]:
raise Exception(
"jstestbrowser requires --enable-js-shell be specified in mozconfig."
)
command_context._mach_context.commands.dispatch(
"build", command_context._mach_context, what=["stage-jstests"]
@CommandProvider
class MachCommands(MachCommandBase):
@Command(
"reftest",
category="testing",
description="Run reftests (layout and graphics correctness).",
parser=get_parser,
)
kwargs["suite"] = "jstestbrowser"
return _run_reftest(command_context, **kwargs)
def run_reftest(self, command_context, **kwargs):
kwargs["suite"] = "reftest"
return self._run_reftest(command_context, **kwargs)
@Command(
"crashtest",
category="testing",
description="Run crashtests (Check if crashes on a page).",
parser=get_parser,
)
def run_crashtest(command_context, **kwargs):
kwargs["suite"] = "crashtest"
return _run_reftest(command_context, **kwargs)
def _run_reftest(command_context, **kwargs):
kwargs["topsrcdir"] = command_context.topsrcdir
process_test_objects(kwargs)
reftest = command_context._spawn(ReftestRunner)
# Unstructured logging must be enabled prior to calling
# adb which uses an unstructured logger in its constructor.
reftest.log_manager.enable_unstructured()
if conditions.is_android(command_context):
from mozrunner.devices.android_device import (
verify_android_device,
InstallIntent,
@Command(
"jstestbrowser",
category="testing",
description="Run js/src/tests in the browser.",
parser=get_parser,
)
def run_jstestbrowser(self, command_context, **kwargs):
if "--enable-js-shell" not in command_context.mozconfig["configure_args"]:
raise Exception(
"jstestbrowser requires --enable-js-shell be specified in mozconfig."
)
command_context._mach_context.commands.dispatch(
"build", command_context._mach_context, what=["stage-jstests"]
)
kwargs["suite"] = "jstestbrowser"
return self._run_reftest(command_context, **kwargs)
install = InstallIntent.NO if kwargs.get("no_install") else InstallIntent.YES
verbose = False
if (
kwargs.get("log_mach_verbose")
or kwargs.get("log_tbpl_level") == "debug"
or kwargs.get("log_mach_level") == "debug"
or kwargs.get("log_raw_level") == "debug"
):
verbose = True
verify_android_device(
command_context,
install=install,
xre=True,
network=True,
app=kwargs["app"],
device_serial=kwargs["deviceSerial"],
verbose=verbose,
)
return reftest.run_android_test(**kwargs)
return reftest.run_desktop_test(**kwargs)
@Command(
"crashtest",
category="testing",
description="Run crashtests (Check if crashes on a page).",
parser=get_parser,
)
def run_crashtest(self, command_context, **kwargs):
kwargs["suite"] = "crashtest"
return self._run_reftest(command_context, **kwargs)
def _run_reftest(self, command_context, **kwargs):
kwargs["topsrcdir"] = command_context.topsrcdir
process_test_objects(kwargs)
reftest = command_context._spawn(ReftestRunner)
# Unstructured logging must be enabled prior to calling
# adb which uses an unstructured logger in its constructor.
reftest.log_manager.enable_unstructured()
if conditions.is_android(command_context):
from mozrunner.devices.android_device import (
verify_android_device,
InstallIntent,
)
install = (
InstallIntent.NO if kwargs.get("no_install") else InstallIntent.YES
)
verbose = False
if (
kwargs.get("log_mach_verbose")
or kwargs.get("log_tbpl_level") == "debug"
or kwargs.get("log_mach_level") == "debug"
or kwargs.get("log_raw_level") == "debug"
):
verbose = True
verify_android_device(
command_context,
install=install,
xre=True,
network=True,
app=kwargs["app"],
device_serial=kwargs["deviceSerial"],
verbose=verbose,
)
return reftest.run_android_test(**kwargs)
return reftest.run_desktop_test(**kwargs)

View File

@ -10,8 +10,10 @@ from argparse import Namespace
from functools import partial
from mach.decorators import (
CommandProvider,
Command,
)
from mozbuild.base import MachCommandBase
here = os.path.abspath(os.path.dirname(__file__))
logger = None
@ -105,13 +107,15 @@ def setup_argument_parser():
return parser
@Command(
"reftest",
category="testing",
description="Run the reftest harness.",
parser=setup_argument_parser,
)
def reftest(command_context, **kwargs):
command_context._mach_context.activate_mozharness_venv()
kwargs["suite"] = "reftest"
return run_reftest(command_context._mach_context, **kwargs)
@CommandProvider
class ReftestCommands(MachCommandBase):
@Command(
"reftest",
category="testing",
description="Run the reftest harness.",
parser=setup_argument_parser,
)
def reftest(self, command_context, **kwargs):
command_context._mach_context.activate_mozharness_venv()
kwargs["suite"] = "reftest"
return run_reftest(command_context._mach_context, **kwargs)

File diff suppressed because it is too large Load Diff

View File

@ -9,22 +9,26 @@ Mach commands are defined via Python decorators.
All the relevant decorators are defined in the *mach.decorators* module.
The important decorators are as follows:
:py:func:`CommandProvider <mach.decorators.CommandProvider>`
A class decorator that denotes that a class contains mach
commands. The decorator takes no arguments.
:py:func:`Command <mach.decorators.Command>`
A function decorator that denotes that the function should be called when
A method decorator that denotes that the method should be called when
the specified command is requested. The decorator takes a command name
as its first argument and a number of additional arguments to
configure the behavior of the command. The decorated function must take a
``command_context`` argument as its first.
configure the behavior of the command. The decorated method must take a
``command_context`` argument as its first (after ``self``).
``command_context`` is a properly configured instance of a ``MozbuildObject``
subclass, meaning it can be used for accessing things like the current config
and running processes.
:py:func:`CommandArgument <mach.decorators.CommandArgument>`
A function decorator that defines an argument to the command. Its
A method decorator that defines an argument to the command. Its
arguments are essentially proxied to ArgumentParser.add_argument()
:py:func:`SubCommand <mach.decorators.SubCommand>`
A function decorator that denotes that the function should be a
A method decorator that denotes that the method should be a
sub-command to an existing ``@Command``. The decorator takes the
parent command name as its first argument and the sub-command name
as its second argument.
@ -32,6 +36,8 @@ The important decorators are as follows:
``@CommandArgument`` can be used on ``@SubCommand`` instances just
like they can on ``@Command`` instances.
Classes with the ``@CommandProvider`` decorator **must** subclass
``MachCommandBase`` and have a compatible ``__init__`` method.
Here is a complete example:
@ -39,14 +45,18 @@ Here is a complete example:
from mach.decorators import (
CommandArgument,
CommandProvider,
Command,
)
from mozbuild.base import MachCommandBase
@Command('doit', help='Do ALL OF THE THINGS.')
@CommandArgument('--force', '-f', action='store_true',
help='Force doing it.')
def doit(command_context, force=False):
# Do stuff here.
@CommandProvider
class MyClass(MachCommandBase):
@Command('doit', help='Do ALL OF THE THINGS.')
@CommandArgument('--force', '-f', action='store_true',
help='Force doing it.')
def doit(self, command_context, force=False):
# Do stuff here.
When the module is loaded, the decorators tell mach about all handlers.
When mach runs, it takes the assembled metadata from these handlers and
@ -69,7 +79,7 @@ define a series of conditions on the
:py:func:`Command <mach.decorators.Command>` decorator.
A condition is simply a function that takes an instance of the
:py:func:`mozbuild.base.MachCommandBase` class as an argument, and
:py:func:`mach.decorators.CommandProvider` class as an argument, and
returns ``True`` or ``False``. If any of the conditions defined on a
command return ``False``, the command will not be runnable. The
docstring of a condition function is used in error messages, to explain
@ -80,6 +90,7 @@ Here is an example:
.. code-block:: python
from mach.decorators import (
CommandProvider,
Command,
)
@ -87,9 +98,18 @@ Here is an example:
"""The build needs to be available."""
return cls.build_path is not None
@Command('run_tests', conditions=[build_available])
def run_tests(command_context):
# Do stuff here.
@CommandProvider
class MyClass(MachCommandBase):
def __init__(self, *args, **kwargs):
super(MyClass, self).__init__(*args, **kwargs)
self.build_path = ...
@Command('run_tests', conditions=[build_available])
def run_tests(self, command_context):
# Do stuff here.
It is important to make sure that any state needed by the condition is
available to instances of the command provider.
By default all commands without any conditions applied will be runnable,
but it is possible to change this behaviour by setting

View File

@ -116,6 +116,7 @@ For example:
from mach.decorators import (
Command,
CommandProvider,
SettingsProvider,
)
from mozbuild.base import MachCommandBase
@ -128,10 +129,15 @@ For example:
('foo.baz', 'int', 'desc', 0, {'choices': set([0,1,2])}),
]
@Command('command', category='misc',
description='Prints a setting')
def command(command_context):
settings = command_context._mach_context.settings
print(settings.a.b)
for option in settings.foo:
print(settings.foo[option])
@CommandProvider
class Commands(MachCommandBase):
def __init__(self, *args, **kwargs):
super(Commands, self).__init__(*args, **kwargs)
self.settings = self._mach_context.settings
@Command('command', category='misc',
description='Prints a setting')
def command(self):
print(self.settings.a.b)
for option in self.settings.foo:
print(self.settings.foo[option])

View File

@ -19,15 +19,17 @@ Adding Metrics to a new Command
If you would like to submit telemetry metrics from your mach ``@Command``, you should take two steps:
#. Parameterize your ``@Command`` annotation with ``metrics_path``.
#. Use the ``command_context.metrics`` handle provided by ``MachCommandBase``
#. Use the ``self.metrics`` handle provided by ``MachCommandBase``
For example::
METRICS_PATH = os.path.abspath(os.path.join(__file__, '..', '..', 'metrics.yaml'))
@Command('custom-command', metrics_path=METRICS_PATH)
def custom_command(command_context):
command_context.metrics.custom.foo.set('bar')
@CommandProvider
class CustomCommand(MachCommandBase):
@Command('custom-command', metrics_path=METRICS_PATH)
def custom_command(self):
self.metrics.custom.foo.set('bar')
Updating Generated Metrics Docs
===============================

View File

@ -13,7 +13,8 @@ from itertools import chain
import attr
from mach.decorators import Command, CommandArgument, SubCommand
from mach.decorators import CommandProvider, Command, CommandArgument, SubCommand
from mozbuild.base import MachCommandBase
from mozbuild.util import memoize
here = os.path.abspath(os.path.dirname(__file__))
@ -36,239 +37,213 @@ def render_template(shell, context):
return template % context
@memoize
def command_handlers(command_context):
"""A dictionary of command handlers keyed by command name."""
return command_context._mach_context.commands.command_handlers
@CommandProvider
class BuiltinCommands(MachCommandBase):
@memoize
def command_handlers(self, command_context):
"""A dictionary of command handlers keyed by command name."""
return command_context._mach_context.commands.command_handlers
@memoize
def commands(self, command_context):
"""A sorted list of all command names."""
return sorted(self.command_handlers(command_context))
@memoize
def commands(command_context):
"""A sorted list of all command names."""
return sorted(command_handlers(command_context))
def _get_parser_options(parser):
options = {}
for action in parser._actions:
# ignore positional args
if not action.option_strings:
continue
# ignore suppressed args
if action.help == argparse.SUPPRESS:
continue
options[tuple(action.option_strings)] = action.help or ""
return options
@memoize
def global_options(command_context):
"""Return a dict of global options.
Of the form `{("-o", "--option"): "description"}`.
"""
for group in command_context._mach_context.global_parser._action_groups:
if group.title == "Global Arguments":
return _get_parser_options(group)
@memoize
def _get_handler_options(handler):
"""Return a dict of options for the given handler.
Of the form `{("-o", "--option"): "description"}`.
"""
options = {}
for option_strings, val in handler.arguments:
# ignore positional args
if option_strings[0][0] != "-":
continue
options[tuple(option_strings)] = val.get("help", "")
if handler._parser:
options.update(_get_parser_options(handler.parser))
return options
def _get_handler_info(handler):
try:
options = _get_handler_options(handler)
except (Exception, SystemExit):
# We don't want misbehaving commands to break tab completion,
# ignore any exceptions.
def _get_parser_options(self, parser):
options = {}
for action in parser._actions:
# ignore positional args
if not action.option_strings:
continue
subcommands = []
for sub in sorted(handler.subcommand_handlers):
subcommands.append(_get_handler_info(handler.subcommand_handlers[sub]))
# ignore suppressed args
if action.help == argparse.SUPPRESS:
continue
return CommandInfo(
name=handler.name,
description=handler.description or "",
options=options,
subcommands=subcommands,
subcommand=handler.subcommand,
)
options[tuple(action.option_strings)] = action.help or ""
return options
@memoize
def global_options(self, command_context):
"""Return a dict of global options.
@memoize
def commands_info(command_context):
"""Return a list of CommandInfo objects for each command."""
commands_info = []
# Loop over self.commands() rather than self.command_handlers().items() for
# alphabetical order.
for c in commands(command_context):
commands_info.append(_get_handler_info(command_handlers(command_context)[c]))
return commands_info
Of the form `{("-o", "--option"): "description"}`.
"""
for group in command_context._mach_context.global_parser._action_groups:
if group.title == "Global Arguments":
return self._get_parser_options(group)
@memoize
def _get_handler_options(self, handler):
"""Return a dict of options for the given handler.
@Command("mach-commands", category="misc", description="List all mach commands.")
def run_commands(command_context):
print("\n".join(commands(command_context)))
Of the form `{("-o", "--option"): "description"}`.
"""
options = {}
for option_strings, val in handler.arguments:
# ignore positional args
if option_strings[0][0] != "-":
continue
options[tuple(option_strings)] = val.get("help", "")
@Command(
"mach-debug-commands",
category="misc",
description="Show info about available mach commands.",
)
@CommandArgument(
"match",
metavar="MATCH",
default=None,
nargs="?",
help="Only display commands containing given substring.",
)
def run_debug_commands(command_context, match=None):
import inspect
if handler._parser:
options.update(self._get_parser_options(handler.parser))
for command, handler in command_handlers(command_context).items():
if match and match not in command:
continue
return options
cls = handler.cls
method = getattr(cls, getattr(handler, "method"))
def _get_handler_info(self, handler):
try:
options = self._get_handler_options(handler)
except (Exception, SystemExit):
# We don't want misbehaving commands to break tab completion,
# ignore any exceptions.
options = {}
print(command)
print("=" * len(command))
print("")
print("File: %s" % inspect.getsourcefile(method))
print("Class: %s" % cls.__name__)
print("Method: %s" % handler.method)
print("")
subcommands = []
for sub in sorted(handler.subcommand_handlers):
subcommands.append(self._get_handler_info(handler.subcommand_handlers[sub]))
return CommandInfo(
name=handler.name,
description=handler.description or "",
options=options,
subcommands=subcommands,
subcommand=handler.subcommand,
)
@Command(
"mach-completion",
category="misc",
description="Prints a list of completion strings for the specified command.",
)
@CommandArgument(
"args", default=None, nargs=argparse.REMAINDER, help="Command to complete."
)
def run_completion(command_context, args):
if not args:
print("\n".join(commands(command_context)))
return
is_help = "help" in args
command = None
for i, arg in enumerate(args):
if arg in commands(command_context):
command = arg
args = args[i + 1 :]
break
# If no command is typed yet, just offer the commands.
if not command:
print("\n".join(commands(command_context)))
return
handler = command_handlers(command_context)[command]
# If a subcommand was typed, update the handler.
for arg in args:
if arg in handler.subcommand_handlers:
handler = handler.subcommand_handlers[arg]
break
targets = sorted(handler.subcommand_handlers.keys())
if is_help:
print("\n".join(targets))
return
targets.append("help")
targets.extend(chain(*_get_handler_options(handler).keys()))
print("\n".join(targets))
def _zsh_describe(value, description=None):
value = '"' + value.replace(":", "\\:")
if description:
description = subprocess.list2cmdline(
[re.sub(r'(["\'#&;`|*?~<>^()\[\]{}$\\\x0A\xFF])', r"\\\1", description)]
).lstrip('"')
if description.endswith('"') and not description.endswith(r"\""):
description = description[:-1]
value += ":{}".format(description)
value += '"'
return value
@SubCommand(
"mach-completion",
"bash",
description="Print mach completion script for bash shell",
)
@CommandArgument(
"-f",
"--file",
dest="outfile",
default=None,
help="File path to save completion script.",
)
def completion_bash(command_context, outfile):
commands_subcommands = []
case_options = []
case_subcommands = []
for i, cmd in enumerate(commands_info(command_context)):
# Build case statement for options.
options = []
for opt_strs, description in cmd.options.items():
for opt in opt_strs:
options.append(_zsh_describe(opt, None).strip('"'))
if options:
case_options.append(
"\n".join(
[
" ({})".format(cmd.name),
' opts="${{opts}} {}"'.format(" ".join(options)),
" ;;",
"",
]
)
@memoize
def commands_info(self, command_context):
"""Return a list of CommandInfo objects for each command."""
commands_info = []
# Loop over self.commands() rather than self.command_handlers().items() for
# alphabetical order.
for c in self.commands(command_context):
commands_info.append(
self._get_handler_info(self.command_handlers(command_context)[c])
)
return commands_info
# Build case statement for subcommand options.
for sub in cmd.subcommands:
@Command("mach-commands", category="misc", description="List all mach commands.")
def run_commands(self, command_context):
print("\n".join(self.commands(command_context)))
@Command(
"mach-debug-commands",
category="misc",
description="Show info about available mach commands.",
)
@CommandArgument(
"match",
metavar="MATCH",
default=None,
nargs="?",
help="Only display commands containing given substring.",
)
def run_debug_commands(self, command_context, match=None):
import inspect
for command, handler in self.command_handlers(command_context).items():
if match and match not in command:
continue
cls = handler.cls
method = getattr(cls, getattr(handler, "method"))
print(command)
print("=" * len(command))
print("")
print("File: %s" % inspect.getsourcefile(method))
print("Class: %s" % cls.__name__)
print("Method: %s" % handler.method)
print("")
@Command(
"mach-completion",
category="misc",
description="Prints a list of completion strings for the specified command.",
)
@CommandArgument(
"args", default=None, nargs=argparse.REMAINDER, help="Command to complete."
)
def run_completion(self, command_context, args):
if not args:
print("\n".join(self.commands(command_context)))
return
is_help = "help" in args
command = None
for i, arg in enumerate(args):
if arg in self.commands(command_context):
command = arg
args = args[i + 1 :]
break
# If no command is typed yet, just offer the commands.
if not command:
print("\n".join(self.commands(command_context)))
return
handler = self.command_handlers(command_context)[command]
# If a subcommand was typed, update the handler.
for arg in args:
if arg in handler.subcommand_handlers:
handler = handler.subcommand_handlers[arg]
break
targets = sorted(handler.subcommand_handlers.keys())
if is_help:
print("\n".join(targets))
return
targets.append("help")
targets.extend(chain(*self._get_handler_options(handler).keys()))
print("\n".join(targets))
def _zsh_describe(self, value, description=None):
value = '"' + value.replace(":", "\\:")
if description:
description = subprocess.list2cmdline(
[re.sub(r'(["\'#&;`|*?~<>^()\[\]{}$\\\x0A\xFF])', r"\\\1", description)]
).lstrip('"')
if description.endswith('"') and not description.endswith(r"\""):
description = description[:-1]
value += ":{}".format(description)
value += '"'
return value
@SubCommand(
"mach-completion",
"bash",
description="Print mach completion script for bash shell",
)
@CommandArgument(
"-f",
"--file",
dest="outfile",
default=None,
help="File path to save completion script.",
)
def completion_bash(self, command_context, outfile):
commands_subcommands = []
case_options = []
case_subcommands = []
for i, cmd in enumerate(self.commands_info(command_context)):
# Build case statement for options.
options = []
for opt_strs, description in sub.options.items():
for opt_strs, description in cmd.options.items():
for opt in opt_strs:
options.append(_zsh_describe(opt, None))
options.append(self._zsh_describe(opt, None).strip('"'))
if options:
case_options.append(
"\n".join(
[
' ("{} {}")'.format(sub.name, sub.subcommand),
" ({})".format(cmd.name),
' opts="${{opts}} {}"'.format(" ".join(options)),
" ;;",
"",
@ -276,91 +251,98 @@ def completion_bash(command_context, outfile):
)
)
# Build case statement for subcommands.
subcommands = [_zsh_describe(s.subcommand, None) for s in cmd.subcommands]
if subcommands:
commands_subcommands.append(
'[{}]=" {} "'.format(
cmd.name, " ".join([h.subcommand for h in cmd.subcommands])
# Build case statement for subcommand options.
for sub in cmd.subcommands:
options = []
for opt_strs, description in sub.options.items():
for opt in opt_strs:
options.append(self._zsh_describe(opt, None))
if options:
case_options.append(
"\n".join(
[
' ("{} {}")'.format(
sub.name, sub.subcommand
),
' opts="${{opts}} {}"'.format(
" ".join(options)
),
" ;;",
"",
]
)
)
# Build case statement for subcommands.
subcommands = [
self._zsh_describe(s.subcommand, None) for s in cmd.subcommands
]
if subcommands:
commands_subcommands.append(
'[{}]=" {} "'.format(
cmd.name, " ".join([h.subcommand for h in cmd.subcommands])
)
)
)
case_subcommands.append(
"\n".join(
[
" ({})".format(cmd.name),
' subs="${{subs}} {}"'.format(" ".join(subcommands)),
" ;;",
"",
]
case_subcommands.append(
"\n".join(
[
" ({})".format(cmd.name),
' subs="${{subs}} {}"'.format(
" ".join(subcommands)
),
" ;;",
"",
]
)
)
)
globalopts = [
opt for opt_strs in global_options(command_context) for opt in opt_strs
]
context = {
"case_options": "\n".join(case_options),
"case_subcommands": "\n".join(case_subcommands),
"commands": " ".join(commands(command_context)),
"commands_subcommands": " ".join(sorted(commands_subcommands)),
"globalopts": " ".join(sorted(globalopts)),
}
globalopts = [
opt for opt_strs in self.global_options(command_context) for opt in opt_strs
]
context = {
"case_options": "\n".join(case_options),
"case_subcommands": "\n".join(case_subcommands),
"commands": " ".join(self.commands(command_context)),
"commands_subcommands": " ".join(sorted(commands_subcommands)),
"globalopts": " ".join(sorted(globalopts)),
}
outfile = open(outfile, "w") if outfile else sys.stdout
print(render_template("bash", context), file=outfile)
outfile = open(outfile, "w") if outfile else sys.stdout
print(render_template("bash", context), file=outfile)
@SubCommand(
"mach-completion",
"zsh",
description="Print mach completion script for zsh shell",
)
@CommandArgument(
"-f",
"--file",
dest="outfile",
default=None,
help="File path to save completion script.",
)
def completion_zsh(self, command_context, outfile):
commands_descriptions = []
commands_subcommands = []
case_options = []
case_subcommands = []
for i, cmd in enumerate(self.commands_info(command_context)):
commands_descriptions.append(self._zsh_describe(cmd.name, cmd.description))
@SubCommand(
"mach-completion",
"zsh",
description="Print mach completion script for zsh shell",
)
@CommandArgument(
"-f",
"--file",
dest="outfile",
default=None,
help="File path to save completion script.",
)
def completion_zsh(command_context, outfile):
commands_descriptions = []
commands_subcommands = []
case_options = []
case_subcommands = []
for i, cmd in enumerate(commands_info(command_context)):
commands_descriptions.append(_zsh_describe(cmd.name, cmd.description))
# Build case statement for options.
options = []
for opt_strs, description in cmd.options.items():
for opt in opt_strs:
options.append(_zsh_describe(opt, description))
if options:
case_options.append(
"\n".join(
[
" ({})".format(cmd.name),
" opts+=({})".format(" ".join(options)),
" ;;",
"",
]
)
)
# Build case statement for subcommand options.
for sub in cmd.subcommands:
# Build case statement for options.
options = []
for opt_strs, description in sub.options.items():
for opt_strs, description in cmd.options.items():
for opt in opt_strs:
options.append(_zsh_describe(opt, description))
options.append(self._zsh_describe(opt, description))
if options:
case_options.append(
"\n".join(
[
" ({} {})".format(sub.name, sub.subcommand),
" ({})".format(cmd.name),
" opts+=({})".format(" ".join(options)),
" ;;",
"",
@ -368,125 +350,145 @@ def completion_zsh(command_context, outfile):
)
)
# Build case statement for subcommands.
subcommands = [
_zsh_describe(s.subcommand, s.description) for s in cmd.subcommands
]
if subcommands:
commands_subcommands.append(
'[{}]=" {} "'.format(
cmd.name, " ".join([h.subcommand for h in cmd.subcommands])
# Build case statement for subcommand options.
for sub in cmd.subcommands:
options = []
for opt_strs, description in sub.options.items():
for opt in opt_strs:
options.append(self._zsh_describe(opt, description))
if options:
case_options.append(
"\n".join(
[
" ({} {})".format(sub.name, sub.subcommand),
" opts+=({})".format(" ".join(options)),
" ;;",
"",
]
)
)
# Build case statement for subcommands.
subcommands = [
self._zsh_describe(s.subcommand, s.description) for s in cmd.subcommands
]
if subcommands:
commands_subcommands.append(
'[{}]=" {} "'.format(
cmd.name, " ".join([h.subcommand for h in cmd.subcommands])
)
)
)
case_subcommands.append(
"\n".join(
[
" ({})".format(cmd.name),
" subs+=({})".format(" ".join(subcommands)),
" ;;",
"",
]
case_subcommands.append(
"\n".join(
[
" ({})".format(cmd.name),
" subs+=({})".format(" ".join(subcommands)),
" ;;",
"",
]
)
)
)
globalopts = []
for opt_strings, description in global_options(command_context).items():
for opt in opt_strings:
globalopts.append(_zsh_describe(opt, description))
globalopts = []
for opt_strings, description in self.global_options(command_context).items():
for opt in opt_strings:
globalopts.append(self._zsh_describe(opt, description))
context = {
"case_options": "\n".join(case_options),
"case_subcommands": "\n".join(case_subcommands),
"commands": " ".join(sorted(commands_descriptions)),
"commands_subcommands": " ".join(sorted(commands_subcommands)),
"globalopts": " ".join(sorted(globalopts)),
}
context = {
"case_options": "\n".join(case_options),
"case_subcommands": "\n".join(case_subcommands),
"commands": " ".join(sorted(commands_descriptions)),
"commands_subcommands": " ".join(sorted(commands_subcommands)),
"globalopts": " ".join(sorted(globalopts)),
}
outfile = open(outfile, "w") if outfile else sys.stdout
print(render_template("zsh", context), file=outfile)
outfile = open(outfile, "w") if outfile else sys.stdout
print(render_template("zsh", context), file=outfile)
@SubCommand(
"mach-completion",
"fish",
description="Print mach completion script for fish shell",
)
@CommandArgument(
"-f",
"--file",
dest="outfile",
default=None,
help="File path to save completion script.",
)
def completion_fish(self, command_context, outfile):
def _append_opt_strs(comp, opt_strs):
for opt in opt_strs:
if opt.startswith("--"):
comp += " -l {}".format(opt[2:])
elif opt.startswith("-"):
comp += " -s {}".format(opt[1:])
return comp
@SubCommand(
"mach-completion",
"fish",
description="Print mach completion script for fish shell",
)
@CommandArgument(
"-f",
"--file",
dest="outfile",
default=None,
help="File path to save completion script.",
)
def completion_fish(command_context, outfile):
def _append_opt_strs(comp, opt_strs):
for opt in opt_strs:
if opt.startswith("--"):
comp += " -l {}".format(opt[2:])
elif opt.startswith("-"):
comp += " -s {}".format(opt[1:])
return comp
globalopts = []
for opt_strs, description in global_options(command_context).items():
comp = (
"complete -c mach -n '__fish_mach_complete_no_command' "
"-d '{}'".format(description.replace("'", "\\'"))
)
comp = _append_opt_strs(comp, opt_strs)
globalopts.append(comp)
cmds = []
cmds_opts = []
for i, cmd in enumerate(commands_info(command_context)):
cmds.append(
"complete -c mach -f -n '__fish_mach_complete_no_command' "
"-a {} -d '{}'".format(cmd.name, cmd.description.replace("'", "\\'"))
)
cmds_opts += ["# {}".format(cmd.name)]
subcommands = " ".join([s.subcommand for s in cmd.subcommands])
for opt_strs, description in cmd.options.items():
globalopts = []
for opt_strs, description in self.global_options(command_context).items():
comp = (
"complete -c mach -A -n '__fish_mach_complete_command {} {}' "
"-d '{}'".format(cmd.name, subcommands, description.replace("'", "\\'"))
"complete -c mach -n '__fish_mach_complete_no_command' "
"-d '{}'".format(description.replace("'", "\\'"))
)
comp = _append_opt_strs(comp, opt_strs)
cmds_opts.append(comp)
globalopts.append(comp)
for sub in cmd.subcommands:
cmds = []
cmds_opts = []
for i, cmd in enumerate(self.commands_info(command_context)):
cmds.append(
"complete -c mach -f -n '__fish_mach_complete_no_command' "
"-a {} -d '{}'".format(cmd.name, cmd.description.replace("'", "\\'"))
)
for opt_strs, description in sub.options.items():
cmds_opts += ["# {}".format(cmd.name)]
subcommands = " ".join([s.subcommand for s in cmd.subcommands])
for opt_strs, description in cmd.options.items():
comp = (
"complete -c mach -A -n '__fish_mach_complete_subcommand {} {}' "
"complete -c mach -A -n '__fish_mach_complete_command {} {}' "
"-d '{}'".format(
sub.name, sub.subcommand, description.replace("'", "\\'")
cmd.name, subcommands, description.replace("'", "\\'")
)
)
comp = _append_opt_strs(comp, opt_strs)
cmds_opts.append(comp)
description = sub.description or ""
description = description.replace("'", "\\'")
comp = (
"complete -c mach -A -n '__fish_mach_complete_command {} {}' "
"-d '{}' -a {}".format(
cmd.name, subcommands, description, sub.subcommand
for sub in cmd.subcommands:
for opt_strs, description in sub.options.items():
comp = (
"complete -c mach -A -n '__fish_mach_complete_subcommand {} {}' "
"-d '{}'".format(
sub.name, sub.subcommand, description.replace("'", "\\'")
)
)
comp = _append_opt_strs(comp, opt_strs)
cmds_opts.append(comp)
description = sub.description or ""
description = description.replace("'", "\\'")
comp = (
"complete -c mach -A -n '__fish_mach_complete_command {} {}' "
"-d '{}' -a {}".format(
cmd.name, subcommands, description, sub.subcommand
)
)
)
cmds_opts.append(comp)
cmds_opts.append(comp)
if i < len(commands(command_context)) - 1:
cmds_opts.append("")
if i < len(self.commands(command_context)) - 1:
cmds_opts.append("")
context = {
"commands": " ".join(commands(command_context)),
"command_completions": "\n".join(cmds),
"command_option_completions": "\n".join(cmds_opts),
"global_option_completions": "\n".join(globalopts),
}
context = {
"commands": " ".join(self.commands(command_context)),
"command_completions": "\n".join(cmds),
"command_option_completions": "\n".join(cmds_opts),
"global_option_completions": "\n".join(globalopts),
}
outfile = open(outfile, "w") if outfile else sys.stdout
print(render_template("fish", context), file=outfile)
outfile = open(outfile, "w") if outfile else sys.stdout
print(render_template("fish", context), file=outfile)

View File

@ -7,47 +7,54 @@ from __future__ import absolute_import, print_function, unicode_literals
from textwrap import TextWrapper
from mach.config import TYPE_CLASSES
from mach.decorators import CommandArgument, Command
from mach.decorators import CommandArgument, CommandProvider, Command
from mozbuild.base import MachCommandBase
# Interact with settings for mach.
@CommandProvider
class Settings(MachCommandBase):
"""Interact with settings for mach.
# Currently, we only provide functionality to view what settings are
# available. In the future, this module will be used to modify settings, help
# people create configs via a wizard, etc.
Currently, we only provide functionality to view what settings are
available. In the future, this module will be used to modify settings, help
people create configs via a wizard, etc.
"""
@Command(
"settings", category="devenv", description="Show available config settings."
)
@CommandArgument(
"-l",
"--list",
dest="short",
action="store_true",
help="Show settings in a concise list",
)
def run_settings(self, command_context, short=None):
"""List available settings."""
types = {v: k for k, v in TYPE_CLASSES.items()}
wrapper = TextWrapper(initial_indent="# ", subsequent_indent="# ")
for i, section in enumerate(sorted(command_context._mach_context.settings)):
if not short:
print("%s[%s]" % ("" if i == 0 else "\n", section))
@Command("settings", category="devenv", description="Show available config settings.")
@CommandArgument(
"-l",
"--list",
dest="short",
action="store_true",
help="Show settings in a concise list",
)
def run_settings(command_context, short=None):
"""List available settings."""
types = {v: k for k, v in TYPE_CLASSES.items()}
wrapper = TextWrapper(initial_indent="# ", subsequent_indent="# ")
for i, section in enumerate(sorted(command_context._mach_context.settings)):
if not short:
print("%s[%s]" % ("" if i == 0 else "\n", section))
for option in sorted(
command_context._mach_context.settings[section]._settings
):
meta = command_context._mach_context.settings[section].get_meta(option)
desc = meta["description"]
for option in sorted(command_context._mach_context.settings[section]._settings):
meta = command_context._mach_context.settings[section].get_meta(option)
desc = meta["description"]
if short:
print("%s.%s -- %s" % (section, option, desc.splitlines()[0]))
continue
if short:
print("%s.%s -- %s" % (section, option, desc.splitlines()[0]))
continue
if option == "*":
option = "<option>"
if option == "*":
option = "<option>"
if "choices" in meta:
value = "{%s}" % ", ".join(meta["choices"])
else:
value = "<%s>" % types[meta["type_cls"]]
if "choices" in meta:
value = "{%s}" % ", ".join(meta["choices"])
else:
value = "<%s>" % types[meta["type_cls"]]
print(wrapper.fill(desc))
print(";%s=%s" % (option, value))
print(wrapper.fill(desc))
print(";%s=%s" % (option, value))

View File

@ -31,12 +31,18 @@ class _MachCommand(object):
# By default, subcommands will be sorted. If this is set to
# 'declaration', they will be left in declaration order.
"order",
# This is the function or callable that will be called when
# the command is invoked
"func",
# Describes how dispatch is performed.
# The Python class providing the command. This is the class type not
# an instance of the class. Mach will instantiate a new instance of
# the class if the command is executed.
"cls",
# The path to the `metrics.yaml` file that describes data that telemetry will
# gather for this command. This path is optional.
"metrics_path",
# The name of the method providing the command. In other words, this
# is the str name of the attribute on the class type corresponding to
# the name of the function.
"method",
# Dict of string to _MachCommand defining sub-commands for this
# command.
"subcommand_handlers",
@ -73,8 +79,9 @@ class _MachCommand(object):
)
self.ok_if_tests_disabled = ok_if_tests_disabled
self.func = None
self.cls = None
self.metrics_path = None
self.method = None
self.subcommand_handlers = {}
self.decl_order = None
@ -82,11 +89,7 @@ class _MachCommand(object):
metrics = None
if self.metrics_path:
metrics = context.telemetry.metrics(self.metrics_path)
# This ensures the resulting class is defined inside `mach` so that logging
# works as expected, and has a meaningful name
subclass = type(self.name, (MachCommandBase,), {})
return subclass(context, virtualenv_name=virtualenv_name, metrics=metrics)
return self.cls(context, virtualenv_name=virtualenv_name, metrics=metrics)
@property
def parser(self):
@ -99,7 +102,7 @@ class _MachCommand(object):
@property
def docstring(self):
return self.func.__doc__
return self.cls.__dict__[self.method].__doc__
def __ior__(self, other):
if not isinstance(other, _MachCommand):
@ -111,44 +114,84 @@ class _MachCommand(object):
return self
def register(self, func):
"""Register the command in the Registrar with the function to be called on invocation."""
if not self.subcommand:
if not self.conditions and Registrar.require_conditions:
return
msg = (
"Mach command '%s' implemented incorrectly. "
+ "Conditions argument must take a list "
+ "of functions. Found %s instead."
)
def CommandProvider(cls):
if not issubclass(cls, MachCommandBase):
raise MachError(
"Mach command provider class %s must be a subclass of "
"mozbuild.base.MachComandBase" % cls.__name__
)
if not isinstance(self.conditions, collections.abc.Iterable):
msg = msg % (self.name, type(self.conditions))
seen_commands = set()
# We scan __dict__ because we only care about the classes' own attributes,
# not inherited ones. If we did inherited attributes, we could potentially
# define commands multiple times. We also sort keys so commands defined in
# the same class are grouped in a sane order.
command_methods = sorted(
[
(name, value._mach_command)
for name, value in cls.__dict__.items()
if hasattr(value, "_mach_command")
]
)
for method, command in command_methods:
# Ignore subcommands for now: we handle them later.
if command.subcommand:
continue
seen_commands.add(command.name)
if not command.conditions and Registrar.require_conditions:
continue
msg = (
"Mach command '%s' implemented incorrectly. "
+ "Conditions argument must take a list "
+ "of functions. Found %s instead."
)
if not isinstance(command.conditions, collections.abc.Iterable):
msg = msg % (command.name, type(command.conditions))
raise MachError(msg)
for c in command.conditions:
if not hasattr(c, "__call__"):
msg = msg % (command.name, type(c))
raise MachError(msg)
for c in self.conditions:
if not hasattr(c, "__call__"):
msg = msg % (self.name, type(c))
raise MachError(msg)
command.cls = cls
command.method = method
self.func = func
Registrar.register_command_handler(command)
Registrar.register_command_handler(self)
# Now do another pass to get sub-commands. We do this in two passes so
# we can check the parent command existence without having to hold
# state and reconcile after traversal.
for method, command in command_methods:
# It is a regular command.
if not command.subcommand:
continue
else:
if self.name not in Registrar.command_handlers:
raise MachError(
"Command referenced by sub-command does not exist: %s" % self.name
)
if command.name not in seen_commands:
raise MachError(
"Command referenced by sub-command does not exist: %s" % command.name
)
self.func = func
parent = Registrar.command_handlers[self.name]
if command.name not in Registrar.command_handlers:
continue
if self.subcommand in parent.subcommand_handlers:
raise MachError("sub-command already defined: %s" % self.subcommand)
command.cls = cls
command.method = method
parent = Registrar.command_handlers[command.name]
parent.subcommand_handlers[self.subcommand] = self
if command.subcommand in parent.subcommand_handlers:
raise MachError("sub-command already defined: %s" % command.subcommand)
parent.subcommand_handlers[command.subcommand] = command
return cls
class Command(object):
@ -182,7 +225,6 @@ class Command(object):
func._mach_command = _MachCommand()
func._mach_command |= self._mach_command
func._mach_command.register(func)
return func
@ -223,7 +265,6 @@ class SubCommand(object):
func._mach_command = _MachCommand()
func._mach_command |= self._mach_command
func._mach_command.register(func)
return func

View File

@ -95,7 +95,7 @@ class MachRegistrar(object):
return 1
self.command_depth += 1
fn = handler.func
fn = getattr(instance, handler.method)
start_time = time.time()

View File

@ -6,16 +6,19 @@ from __future__ import unicode_literals
from mach.decorators import (
CommandArgument,
CommandProvider,
Command,
)
from mozbuild.base import MachCommandBase
@Command("cmd_foo", category="testing")
def run_foo(command_context):
pass
@CommandProvider
class ConditionsProvider(MachCommandBase):
@Command("cmd_foo", category="testing")
def run_foo(self, command_context):
pass
@Command("cmd_bar", category="testing")
@CommandArgument("--baz", action="store_true", help="Run with baz")
def run_bar(command_context, baz=None):
pass
@Command("cmd_bar", category="testing")
@CommandArgument("--baz", action="store_true", help="Run with baz")
def run_bar(self, command_context, baz=None):
pass

View File

@ -8,8 +8,10 @@ from functools import partial
from mach.decorators import (
CommandArgument,
CommandProvider,
Command,
)
from mozbuild.base import MachCommandBase
def is_foo(cls):
@ -22,17 +24,22 @@ def is_bar(val, cls):
return cls.bar == val
@Command("cmd_foo", category="testing")
@CommandArgument("--arg", default=None, help="Argument help.")
def run_foo(command_context):
pass
@CommandProvider
class MachCommands(MachCommandBase):
foo = True
bar = False
@Command("cmd_foo", category="testing")
@CommandArgument("--arg", default=None, help="Argument help.")
def run_foo(self, command_context):
pass
@Command("cmd_bar", category="testing", conditions=[partial(is_bar, False)])
def run_bar(command_context):
pass
@Command("cmd_bar", category="testing", conditions=[partial(is_bar, False)])
def run_bar(self, command_context):
pass
@Command("cmd_foobar", category="testing", conditions=[is_foo, partial(is_bar, True)])
def run_foobar(command_context):
pass
@Command(
"cmd_foobar", category="testing", conditions=[is_foo, partial(is_bar, True)]
)
def run_foobar(self, command_context):
pass

View File

@ -6,55 +6,50 @@ from __future__ import absolute_import
from __future__ import unicode_literals
from mach.decorators import (
CommandProvider,
Command,
)
from mozbuild.base import MachCommandBase
def is_true(cls):
return True
def is_false(cls):
return False
@Command("cmd_condition_true", category="testing", conditions=[is_true])
def run_condition_true(self, command_context):
pass
@Command("cmd_condition_false", category="testing", conditions=[is_false])
def run_condition_false(self, command_context):
pass
@Command(
"cmd_condition_true_and_false", category="testing", conditions=[is_true, is_false]
)
def run_condition_true_and_false(self, command_context):
pass
def is_ctx_foo(cls):
def is_foo(cls):
"""Foo must be true"""
return cls._mach_context.foo
return cls.foo
def is_ctx_bar(cls):
def is_bar(cls):
"""Bar must be true"""
return cls._mach_context.bar
return cls.bar
@Command("cmd_foo_ctx", category="testing", conditions=[is_ctx_foo])
def run_foo_ctx(self, command_context):
pass
@CommandProvider
class ConditionsProvider(MachCommandBase):
foo = True
bar = False
@Command("cmd_foo", category="testing", conditions=[is_foo])
def run_foo(self, command_context):
pass
@Command("cmd_bar", category="testing", conditions=[is_bar])
def run_bar(self, command_context):
pass
@Command("cmd_foobar", category="testing", conditions=[is_foo, is_bar])
def run_foobar(self, command_context):
pass
@Command("cmd_bar_ctx", category="testing", conditions=[is_ctx_bar])
def run_bar_ctx(self, command_context):
pass
@CommandProvider
class ConditionsContextProvider(MachCommandBase):
@Command("cmd_foo_ctx", category="testing", conditions=[is_foo])
def run_foo(self, command_context):
pass
@Command("cmd_bar_ctx", category="testing", conditions=[is_bar])
def run_bar(self, command_context):
pass
@Command("cmd_foobar_ctx", category="testing", conditions=[is_ctx_foo, is_ctx_bar])
def run_foobar_ctx(self, command_context):
pass
@Command("cmd_foobar_ctx", category="testing", conditions=[is_foo, is_bar])
def run_foobar(self, command_context):
pass

View File

@ -6,10 +6,14 @@ from __future__ import absolute_import
from __future__ import unicode_literals
from mach.decorators import (
CommandProvider,
Command,
)
from mozbuild.base import MachCommandBase
@Command("cmd_foo", category="testing", conditions=["invalid"])
def run_foo(command_context):
pass
@CommandProvider
class ConditionsProvider(MachCommandBase):
@Command("cmd_foo", category="testing", conditions=["invalid"])
def run_foo(self, command_context):
pass

View File

@ -7,18 +7,21 @@ from __future__ import unicode_literals
from mach.decorators import (
CommandArgument,
CommandProvider,
Command,
)
from mach.test.providers import throw2
from mozbuild.base import MachCommandBase
@Command("throw", category="testing")
@CommandArgument("--message", "-m", default="General Error")
def throw(command_context, message):
raise Exception(message)
@CommandProvider
class TestCommandProvider(MachCommandBase):
@Command("throw", category="testing")
@CommandArgument("--message", "-m", default="General Error")
def throw(self, command_context, message):
raise Exception(message)
@Command("throw_deep", category="testing")
@CommandArgument("--message", "-m", default="General Error")
def throw_deep(command_context, message):
throw2.throw_deep(message)
@Command("throw_deep", category="testing")
@CommandArgument("--message", "-m", default="General Error")
def throw_deep(self, command_context, message):
throw2.throw_deep(message)

View File

@ -49,7 +49,7 @@ class TestConditions(TestBase):
def test_conditions_pass(self):
"""Test that a command which passes its conditions is runnable."""
self.assertEquals((0, "", ""), self._run(["cmd_condition_true"]))
self.assertEquals((0, "", ""), self._run(["cmd_foo"]))
self.assertEquals((0, "", ""), self._run(["cmd_foo_ctx"], _populate_context))
def test_invalid_context_message(self):
@ -61,7 +61,7 @@ class TestConditions(TestBase):
fail_conditions = [is_bar]
for name in ("cmd_condition_false", "cmd_condition_true_and_false"):
for name in ("cmd_bar", "cmd_foobar"):
result, stdout, stderr = self._run([name])
self.assertEquals(1, result)
@ -90,9 +90,9 @@ class TestConditions(TestBase):
"""Test that commands that are not runnable do not show up in help."""
result, stdout, stderr = self._run(["help"], _populate_context)
self.assertIn("cmd_condition_true", stdout)
self.assertNotIn("cmd_condition_false", stdout)
self.assertNotIn("cmd_condition_true_and_false", stdout)
self.assertIn("cmd_foo", stdout)
self.assertNotIn("cmd_bar", stdout)
self.assertNotIn("cmd_foobar", stdout)
self.assertIn("cmd_foo_ctx", stdout)
self.assertNotIn("cmd_bar_ctx", stdout)
self.assertNotIn("cmd_foobar_ctx", stdout)

View File

@ -9,12 +9,13 @@ import os
import pytest
from unittest.mock import Mock
from mozbuild.base import MachCommandBase
from mozunit import main
import mach.registrar
import mach.decorators
from mach.base import MachError
from mach.decorators import CommandArgument, Command, SubCommand
from mach.decorators import CommandArgument, CommandProvider, Command, SubCommand
@pytest.fixture
@ -32,10 +33,12 @@ def test_register_command_with_argument(registrar):
context = Mock()
context.cwd = "."
@Command("cmd_foo", category="testing")
@CommandArgument("--arg", default=None, help="Argument help.")
def run_foo(command_context, arg):
inner_function(arg)
@CommandProvider
class CommandFoo(MachCommandBase):
@Command("cmd_foo", category="testing")
@CommandArgument("--arg", default=None, help="Argument help.")
def run_foo(self, command_context, arg):
inner_function(arg)
registrar.dispatch("cmd_foo", context, arg="argument")
@ -50,13 +53,15 @@ def test_register_command_with_metrics_path(registrar):
metrics_mock = Mock()
context.telemetry.metrics.return_value = metrics_mock
@Command("cmd_foo", category="testing", metrics_path=metrics_path)
def run_foo(command_context):
assert command_context.metrics == metrics_mock
@CommandProvider
class CommandFoo(MachCommandBase):
@Command("cmd_foo", category="testing", metrics_path=metrics_path)
def run_foo(self, command_context):
assert command_context.metrics == metrics_mock
@SubCommand("cmd_foo", "sub_foo", metrics_path=metrics_path + "2")
def run_subfoo(command_context):
assert command_context.metrics == metrics_mock
@SubCommand("cmd_foo", "sub_foo", metrics_path=metrics_path + "2")
def run_subfoo(self, command_context):
assert command_context.metrics == metrics_mock
registrar.dispatch("cmd_foo", context)
@ -73,23 +78,25 @@ def test_register_command_sets_up_class_at_runtime(registrar):
context = Mock()
context.cwd = "."
# We test that the virtualenv is set up properly dynamically on
# the instance that actually runs the command.
@Command("cmd_foo", category="testing", virtualenv_name="env_foo")
def run_foo(command_context):
assert (
os.path.basename(command_context.virtualenv_manager.virtualenv_root)
== "env_foo"
)
inner_function("foo")
# Inside the following class, we test that the virtualenv is set up properly
# dynamically on the instance that actually runs the command.
@CommandProvider
class CommandFoo(MachCommandBase):
@Command("cmd_foo", category="testing", virtualenv_name="env_foo")
def run_foo(self, command_context):
assert (
os.path.basename(command_context.virtualenv_manager.virtualenv_root)
== "env_foo"
)
inner_function("foo")
@Command("cmd_bar", category="testing", virtualenv_name="env_bar")
def run_bar(command_context):
assert (
os.path.basename(command_context.virtualenv_manager.virtualenv_root)
== "env_bar"
)
inner_function("bar")
@Command("cmd_bar", category="testing", virtualenv_name="env_bar")
def run_bar(self, command_context):
assert (
os.path.basename(command_context.virtualenv_manager.virtualenv_root)
== "env_bar"
)
inner_function("bar")
registrar.dispatch("cmd_foo", context)
inner_function.assert_called_with("foo")
@ -100,17 +107,21 @@ def test_register_command_sets_up_class_at_runtime(registrar):
def test_cannot_create_command_nonexisting_category(registrar):
with pytest.raises(MachError):
@Command("cmd_foo", category="bar")
def run_foo(command_context):
pass
@CommandProvider
class CommandFoo(MachCommandBase):
@Command("cmd_foo", category="bar")
def run_foo(self, command_context):
pass
def test_subcommand_requires_parent_to_exist(registrar):
with pytest.raises(MachError):
@SubCommand("sub_foo", "foo")
def run_foo(command_context):
pass
@CommandProvider
class CommandFoo(MachCommandBase):
@SubCommand("sub_foo", "foo")
def run_foo(self, command_context):
pass
if __name__ == "__main__":

View File

@ -20,347 +20,353 @@ from mozfile import which
from manifestparser import TestManifest
from manifestparser import filters as mpf
from mozbuild.base import MachCommandBase
from mach.decorators import CommandArgument, Command
from mach.decorators import CommandArgument, CommandProvider, Command
from mach.util import UserError
here = os.path.abspath(os.path.dirname(__file__))
@Command("python", category="devenv", description="Run Python.")
@CommandArgument(
"--no-virtualenv", action="store_true", help="Do not set up a virtualenv"
)
@CommandArgument(
"--no-activate", action="store_true", help="Do not activate the virtualenv"
)
@CommandArgument(
"--exec-file", default=None, help="Execute this Python file using `exec`"
)
@CommandArgument(
"--ipython",
action="store_true",
default=False,
help="Use ipython instead of the default Python REPL.",
)
@CommandArgument(
"--requirements",
default=None,
help="Install this requirements file before running Python",
)
@CommandArgument("args", nargs=argparse.REMAINDER)
def python(
command_context,
no_virtualenv,
no_activate,
exec_file,
ipython,
requirements,
args,
):
# Avoid logging the command
command_context.log_manager.terminal_handler.setLevel(logging.CRITICAL)
@CommandProvider
class MachCommands(MachCommandBase):
@Command("python", category="devenv", description="Run Python.")
@CommandArgument(
"--no-virtualenv", action="store_true", help="Do not set up a virtualenv"
)
@CommandArgument(
"--no-activate", action="store_true", help="Do not activate the virtualenv"
)
@CommandArgument(
"--exec-file", default=None, help="Execute this Python file using `exec`"
)
@CommandArgument(
"--ipython",
action="store_true",
default=False,
help="Use ipython instead of the default Python REPL.",
)
@CommandArgument(
"--requirements",
default=None,
help="Install this requirements file before running Python",
)
@CommandArgument("args", nargs=argparse.REMAINDER)
def python(
self,
command_context,
no_virtualenv,
no_activate,
exec_file,
ipython,
requirements,
args,
):
# Avoid logging the command
command_context.log_manager.terminal_handler.setLevel(logging.CRITICAL)
# Note: subprocess requires native strings in os.environ on Windows.
append_env = {"PYTHONDONTWRITEBYTECODE": str("1")}
# Note: subprocess requires native strings in os.environ on Windows.
append_env = {"PYTHONDONTWRITEBYTECODE": str("1")}
if requirements and no_virtualenv:
raise UserError("Cannot pass both --requirements and --no-virtualenv.")
if requirements and no_virtualenv:
raise UserError("Cannot pass both --requirements and --no-virtualenv.")
if no_virtualenv:
from mach_bootstrap import mach_sys_path
if no_virtualenv:
from mach_initialize import mach_sys_path
python_path = sys.executable
append_env["PYTHONPATH"] = os.pathsep.join(
mach_sys_path(command_context.topsrcdir)
)
else:
command_context.virtualenv_manager.ensure()
if not no_activate:
command_context.virtualenv_manager.activate()
python_path = command_context.virtualenv_manager.python_path
if requirements:
command_context.virtualenv_manager.install_pip_requirements(
requirements, require_hashes=False
python_path = sys.executable
append_env["PYTHONPATH"] = os.pathsep.join(
mach_sys_path(command_context.topsrcdir)
)
else:
command_context.virtualenv_manager.ensure()
if not no_activate:
command_context.virtualenv_manager.activate()
python_path = command_context.virtualenv_manager.python_path
if requirements:
command_context.virtualenv_manager.install_pip_requirements(
requirements, require_hashes=False
)
if exec_file:
exec(open(exec_file).read())
return 0
if ipython:
bindir = os.path.dirname(python_path)
python_path = which("ipython", path=bindir)
if not python_path:
if not no_virtualenv:
# Use `_run_pip` directly rather than `install_pip_package` to bypass
# `req.check_if_exists()` which may detect a system installed ipython.
command_context.virtualenv_manager._run_pip(["install", "ipython"])
python_path = which("ipython", path=bindir)
if exec_file:
exec(open(exec_file).read())
return 0
if ipython:
bindir = os.path.dirname(python_path)
python_path = which("ipython", path=bindir)
if not python_path:
print("error: could not detect or install ipython")
return 1
if not no_virtualenv:
# Use `_run_pip` directly rather than `install_pip_package` to bypass
# `req.check_if_exists()` which may detect a system installed ipython.
command_context.virtualenv_manager._run_pip(["install", "ipython"])
python_path = which("ipython", path=bindir)
return command_context.run_process(
[python_path] + args,
pass_thru=True, # Allow user to run Python interactively.
ensure_exit_code=False, # Don't throw on non-zero exit code.
python_unbuffered=False, # Leave input buffered.
append_env=append_env,
)
if not python_path:
print("error: could not detect or install ipython")
return 1
@Command(
"python-test",
category="testing",
virtualenv_name="python-test",
description="Run Python unit tests with pytest.",
)
@CommandArgument(
"-v", "--verbose", default=False, action="store_true", help="Verbose output."
)
@CommandArgument(
"-j",
"--jobs",
default=None,
type=int,
help="Number of concurrent jobs to run. Default is the number of CPUs "
"in the system.",
)
@CommandArgument(
"-x",
"--exitfirst",
default=False,
action="store_true",
help="Runs all tests sequentially and breaks at the first failure.",
)
@CommandArgument(
"--subsuite",
default=None,
help=(
"Python subsuite to run. If not specified, all subsuites are run. "
"Use the string `default` to only run tests without a subsuite."
),
)
@CommandArgument(
"tests",
nargs="*",
metavar="TEST",
help=(
"Tests to run. Each test can be a single file or a directory. "
"Default test resolution relies on PYTHON_UNITTEST_MANIFESTS."
),
)
@CommandArgument(
"extra",
nargs=argparse.REMAINDER,
metavar="PYTEST ARGS",
help=(
"Arguments that aren't recognized by mach. These will be "
"passed as it is to pytest"
),
)
def python_test(command_context, *args, **kwargs):
try:
tempdir = str(tempfile.mkdtemp(suffix="-python-test"))
if six.PY2:
os.environ[b"PYTHON_TEST_TMP"] = tempdir
else:
os.environ["PYTHON_TEST_TMP"] = tempdir
return run_python_tests(command_context, *args, **kwargs)
finally:
import mozfile
mozfile.remove(tempdir)
def run_python_tests(
command_context,
tests=None,
test_objects=None,
subsuite=None,
verbose=False,
jobs=None,
exitfirst=False,
extra=None,
**kwargs
):
command_context.activate_virtualenv()
if test_objects is None:
from moztest.resolve import TestResolver
resolver = command_context._spawn(TestResolver)
# If we were given test paths, try to find tests matching them.
test_objects = resolver.resolve_tests(paths=tests, flavor="python")
else:
# We've received test_objects from |mach test|. We need to ignore
# the subsuite because python-tests don't use this key like other
# harnesses do and |mach test| doesn't realize this.
subsuite = None
mp = TestManifest()
mp.tests.extend(test_objects)
filters = []
if subsuite == "default":
filters.append(mpf.subsuite(None))
elif subsuite:
filters.append(mpf.subsuite(subsuite))
tests = mp.active_tests(
filters=filters,
disabled=False,
python=command_context.virtualenv_manager.version_info()[0],
**mozinfo.info
)
if not tests:
submsg = "for subsuite '{}' ".format(subsuite) if subsuite else ""
message = (
"TEST-UNEXPECTED-FAIL | No tests collected "
+ "{}(Not in PYTHON_UNITTEST_MANIFESTS?)".format(submsg)
return command_context.run_process(
[python_path] + args,
pass_thru=True, # Allow user to run Python interactively.
ensure_exit_code=False, # Don't throw on non-zero exit code.
python_unbuffered=False, # Leave input buffered.
append_env=append_env,
)
command_context.log(logging.WARN, "python-test", {}, message)
return 1
parallel = []
sequential = []
os.environ.setdefault("PYTEST_ADDOPTS", "")
if extra:
os.environ["PYTEST_ADDOPTS"] += " " + " ".join(extra)
installed_requirements = set()
for test in tests:
if (
test.get("requirements")
and test["requirements"] not in installed_requirements
):
command_context.virtualenv_manager.install_pip_requirements(
test["requirements"], quiet=True
)
installed_requirements.add(test["requirements"])
if exitfirst:
sequential = tests
os.environ["PYTEST_ADDOPTS"] += " -x"
else:
for test in tests:
if test.get("sequential"):
sequential.append(test)
else:
parallel.append(test)
jobs = jobs or cpu_count()
return_code = 0
def on_test_finished(result):
output, ret, test_path = result
for line in output:
command_context.log(
logging.INFO, "python-test", {"line": line.rstrip()}, "{line}"
)
if ret and not return_code:
command_context.log(
logging.ERROR,
"python-test",
{"test_path": test_path, "ret": ret},
"Setting retcode to {ret} from {test_path}",
)
return return_code or ret
with ThreadPoolExecutor(max_workers=jobs) as executor:
futures = [
executor.submit(_run_python_test, command_context, test, jobs, verbose)
for test in parallel
]
try:
for future in as_completed(futures):
return_code = on_test_finished(future.result())
except KeyboardInterrupt:
# Hack to force stop currently running threads.
# https://gist.github.com/clchiou/f2608cbe54403edb0b13
executor._threads.clear()
thread._threads_queues.clear()
raise
for test in sequential:
return_code = on_test_finished(
_run_python_test(command_context, test, jobs, verbose)
)
if return_code and exitfirst:
break
command_context.log(
logging.INFO,
@Command(
"python-test",
{"return_code": return_code},
"Return code from mach python-test: {return_code}",
category="testing",
virtualenv_name="python-test",
description="Run Python unit tests with pytest.",
)
return return_code
@CommandArgument(
"-v", "--verbose", default=False, action="store_true", help="Verbose output."
)
@CommandArgument(
"-j",
"--jobs",
default=None,
type=int,
help="Number of concurrent jobs to run. Default is the number of CPUs "
"in the system.",
)
@CommandArgument(
"-x",
"--exitfirst",
default=False,
action="store_true",
help="Runs all tests sequentially and breaks at the first failure.",
)
@CommandArgument(
"--subsuite",
default=None,
help=(
"Python subsuite to run. If not specified, all subsuites are run. "
"Use the string `default` to only run tests without a subsuite."
),
)
@CommandArgument(
"tests",
nargs="*",
metavar="TEST",
help=(
"Tests to run. Each test can be a single file or a directory. "
"Default test resolution relies on PYTHON_UNITTEST_MANIFESTS."
),
)
@CommandArgument(
"extra",
nargs=argparse.REMAINDER,
metavar="PYTEST ARGS",
help=(
"Arguments that aren't recognized by mach. These will be "
"passed as it is to pytest"
),
)
def python_test(self, command_context, *args, **kwargs):
try:
tempdir = str(tempfile.mkdtemp(suffix="-python-test"))
if six.PY2:
os.environ[b"PYTHON_TEST_TMP"] = tempdir
else:
os.environ["PYTHON_TEST_TMP"] = tempdir
return self.run_python_tests(command_context, *args, **kwargs)
finally:
import mozfile
mozfile.remove(tempdir)
def _run_python_test(command_context, test, jobs, verbose):
from mozprocess import ProcessHandler
def run_python_tests(
self,
command_context,
tests=None,
test_objects=None,
subsuite=None,
verbose=False,
jobs=None,
exitfirst=False,
extra=None,
**kwargs
):
output = []
command_context.activate_virtualenv()
if test_objects is None:
from moztest.resolve import TestResolver
def _log(line):
# Buffer messages if more than one worker to avoid interleaving
if jobs > 1:
output.append(line)
resolver = command_context._spawn(TestResolver)
# If we were given test paths, try to find tests matching them.
test_objects = resolver.resolve_tests(paths=tests, flavor="python")
else:
command_context.log(
logging.INFO, "python-test", {"line": line.rstrip()}, "{line}"
)
# We've received test_objects from |mach test|. We need to ignore
# the subsuite because python-tests don't use this key like other
# harnesses do and |mach test| doesn't realize this.
subsuite = None
file_displayed_test = [] # used as boolean
mp = TestManifest()
mp.tests.extend(test_objects)
def _line_handler(line):
line = six.ensure_str(line)
if not file_displayed_test:
output = "Ran" in line or "collected" in line or line.startswith("TEST-")
if output:
file_displayed_test.append(True)
filters = []
if subsuite == "default":
filters.append(mpf.subsuite(None))
elif subsuite:
filters.append(mpf.subsuite(subsuite))
# Hack to make sure treeherder highlights pytest failures
if "FAILED" in line.rsplit(" ", 1)[-1]:
line = line.replace("FAILED", "TEST-UNEXPECTED-FAIL")
_log(line)
_log(test["path"])
python = command_context.virtualenv_manager.python_path
cmd = [python, test["path"]]
env = os.environ.copy()
if six.PY2:
env[b"PYTHONDONTWRITEBYTECODE"] = b"1"
else:
env["PYTHONDONTWRITEBYTECODE"] = "1"
proc = ProcessHandler(
cmd, env=env, processOutputLine=_line_handler, storeOutput=False
)
proc.run()
return_code = proc.wait()
if not file_displayed_test:
_log(
"TEST-UNEXPECTED-FAIL | No test output (missing mozunit.main() "
"call?): {}".format(test["path"])
tests = mp.active_tests(
filters=filters,
disabled=False,
python=command_context.virtualenv_manager.version_info()[0],
**mozinfo.info
)
if verbose:
if return_code != 0:
_log("Test failed: {}".format(test["path"]))
else:
_log("Test passed: {}".format(test["path"]))
if not tests:
submsg = "for subsuite '{}' ".format(subsuite) if subsuite else ""
message = (
"TEST-UNEXPECTED-FAIL | No tests collected "
+ "{}(Not in PYTHON_UNITTEST_MANIFESTS?)".format(submsg)
)
command_context.log(logging.WARN, "python-test", {}, message)
return 1
return output, return_code, test["path"]
parallel = []
sequential = []
os.environ.setdefault("PYTEST_ADDOPTS", "")
if extra:
os.environ["PYTEST_ADDOPTS"] += " " + " ".join(extra)
installed_requirements = set()
for test in tests:
if (
test.get("requirements")
and test["requirements"] not in installed_requirements
):
command_context.virtualenv_manager.install_pip_requirements(
test["requirements"], quiet=True
)
installed_requirements.add(test["requirements"])
if exitfirst:
sequential = tests
os.environ["PYTEST_ADDOPTS"] += " -x"
else:
for test in tests:
if test.get("sequential"):
sequential.append(test)
else:
parallel.append(test)
jobs = jobs or cpu_count()
return_code = 0
def on_test_finished(result):
output, ret, test_path = result
for line in output:
command_context.log(
logging.INFO, "python-test", {"line": line.rstrip()}, "{line}"
)
if ret and not return_code:
command_context.log(
logging.ERROR,
"python-test",
{"test_path": test_path, "ret": ret},
"Setting retcode to {ret} from {test_path}",
)
return return_code or ret
with ThreadPoolExecutor(max_workers=jobs) as executor:
futures = [
executor.submit(
self._run_python_test, command_context, test, jobs, verbose
)
for test in parallel
]
try:
for future in as_completed(futures):
return_code = on_test_finished(future.result())
except KeyboardInterrupt:
# Hack to force stop currently running threads.
# https://gist.github.com/clchiou/f2608cbe54403edb0b13
executor._threads.clear()
thread._threads_queues.clear()
raise
for test in sequential:
return_code = on_test_finished(
self._run_python_test(command_context, test, jobs, verbose)
)
if return_code and exitfirst:
break
command_context.log(
logging.INFO,
"python-test",
{"return_code": return_code},
"Return code from mach python-test: {return_code}",
)
return return_code
def _run_python_test(self, command_context, test, jobs, verbose):
from mozprocess import ProcessHandler
output = []
def _log(line):
# Buffer messages if more than one worker to avoid interleaving
if jobs > 1:
output.append(line)
else:
command_context.log(
logging.INFO, "python-test", {"line": line.rstrip()}, "{line}"
)
file_displayed_test = [] # used as boolean
def _line_handler(line):
line = six.ensure_str(line)
if not file_displayed_test:
output = (
"Ran" in line or "collected" in line or line.startswith("TEST-")
)
if output:
file_displayed_test.append(True)
# Hack to make sure treeherder highlights pytest failures
if "FAILED" in line.rsplit(" ", 1)[-1]:
line = line.replace("FAILED", "TEST-UNEXPECTED-FAIL")
_log(line)
_log(test["path"])
python = command_context.virtualenv_manager.python_path
cmd = [python, test["path"]]
env = os.environ.copy()
if six.PY2:
env[b"PYTHONDONTWRITEBYTECODE"] = b"1"
else:
env["PYTHONDONTWRITEBYTECODE"] = "1"
proc = ProcessHandler(
cmd, env=env, processOutputLine=_line_handler, storeOutput=False
)
proc.run()
return_code = proc.wait()
if not file_displayed_test:
_log(
"TEST-UNEXPECTED-FAIL | No test output (missing mozunit.main() "
"call?): {}".format(test["path"])
)
if verbose:
if return_code != 0:
_log("Test failed: {}".format(test["path"]))
else:
_log("Test passed: {}".format(test["path"]))
return output, return_code, test["path"]

View File

@ -7,102 +7,113 @@ from __future__ import absolute_import, print_function, unicode_literals
import errno
import sys
from mach.decorators import CommandArgument, Command
from mach.decorators import CommandArgument, CommandProvider, Command
from mozbuild.base import MachCommandBase
from mozboot.bootstrap import APPLICATIONS
@Command(
"bootstrap",
category="devenv",
description="Install required system packages for building.",
)
@CommandArgument(
"--application-choice",
choices=list(APPLICATIONS.keys()) + list(APPLICATIONS.values()),
default=None,
help="Pass in an application choice instead of using the default "
"interactive prompt.",
)
@CommandArgument(
"--no-system-changes",
dest="no_system_changes",
action="store_true",
help="Only execute actions that leave the system configuration alone.",
)
def bootstrap(command_context, application_choice=None, no_system_changes=False):
"""Bootstrap system and mach for optimal development experience."""
from mozboot.bootstrap import Bootstrapper
bootstrapper = Bootstrapper(
choice=application_choice,
no_interactive=not command_context._mach_context.is_interactive,
no_system_changes=no_system_changes,
mach_context=command_context._mach_context,
@CommandProvider
class Bootstrap(MachCommandBase):
@Command(
"bootstrap",
category="devenv",
description="Install required system packages for building.",
)
bootstrapper.bootstrap(command_context.settings)
@CommandArgument(
"--application-choice",
choices=list(APPLICATIONS.keys()) + list(APPLICATIONS.values()),
default=None,
help="Pass in an application choice instead of using the default "
"interactive prompt.",
)
@CommandArgument(
"--no-system-changes",
dest="no_system_changes",
action="store_true",
help="Only execute actions that leave the system " "configuration alone.",
)
def bootstrap(
self, command_context, application_choice=None, no_system_changes=False
):
"""Bootstrap system and mach for optimal development experience."""
from mozboot.bootstrap import Bootstrapper
bootstrapper = Bootstrapper(
choice=application_choice,
no_interactive=not command_context._mach_context.is_interactive,
no_system_changes=no_system_changes,
mach_context=command_context._mach_context,
)
bootstrapper.bootstrap(command_context.settings)
@Command(
"vcs-setup",
category="devenv",
description="Help configure a VCS for optimal development.",
)
@CommandArgument(
"-u",
"--update-only",
action="store_true",
help="Only update recommended extensions, don't run the wizard.",
)
def vcs_setup(command_context, update_only=False):
"""Ensure a Version Control System (Mercurial or Git) is optimally
configured.
@CommandProvider
class VersionControlCommands(MachCommandBase):
@Command(
"vcs-setup",
category="devenv",
description="Help configure a VCS for optimal development.",
)
@CommandArgument(
"-u",
"--update-only",
action="store_true",
help="Only update recommended extensions, don't run the wizard.",
)
def vcs_setup(self, command_context, update_only=False):
"""Ensure a Version Control System (Mercurial or Git) is optimally
configured.
This command will inspect your VCS configuration and
guide you through an interactive wizard helping you configure the
VCS for optimal use on Mozilla projects.
This command will inspect your VCS configuration and
guide you through an interactive wizard helping you configure the
VCS for optimal use on Mozilla projects.
User choice is respected: no changes are made without explicit
confirmation from you.
User choice is respected: no changes are made without explicit
confirmation from you.
If "--update-only" is used, the interactive wizard is disabled
and this command only ensures that remote repositories providing
VCS extensions are up to date.
"""
import mozboot.bootstrap as bootstrap
import mozversioncontrol
from mozfile import which
If "--update-only" is used, the interactive wizard is disabled
and this command only ensures that remote repositories providing
VCS extensions are up to date.
"""
import mozboot.bootstrap as bootstrap
import mozversioncontrol
from mozfile import which
repo = mozversioncontrol.get_repository_object(command_context._mach_context.topdir)
tool = "hg"
if repo.name == "git":
tool = "git"
# "hg" is an executable script with a shebang, which will be found by
# which. We need to pass a win32 executable to the function because we
# spawn a process from it.
if sys.platform in ("win32", "msys"):
tool += ".exe"
vcs = which(tool)
if not vcs:
raise OSError(errno.ENOENT, "Could not find {} on $PATH".format(tool))
if update_only:
repo = mozversioncontrol.get_repository_object(
command_context._mach_context.topdir
)
tool = "hg"
if repo.name == "git":
bootstrap.update_git_tools(
vcs,
command_context._mach_context.state_dir,
command_context._mach_context.topdir,
)
tool = "git"
# "hg" is an executable script with a shebang, which will be found by
# which. We need to pass a win32 executable to the function because we
# spawn a process from it.
if sys.platform in ("win32", "msys"):
tool += ".exe"
vcs = which(tool)
if not vcs:
raise OSError(errno.ENOENT, "Could not find {} on $PATH".format(tool))
if update_only:
if repo.name == "git":
bootstrap.update_git_tools(
vcs,
command_context._mach_context.state_dir,
command_context._mach_context.topdir,
)
else:
bootstrap.update_vct(vcs, command_context._mach_context.state_dir)
else:
bootstrap.update_vct(vcs, command_context._mach_context.state_dir)
else:
if repo.name == "git":
bootstrap.configure_git(
vcs,
which("git-cinnabar"),
command_context._mach_context.state_dir,
command_context._mach_context.topdir,
)
else:
bootstrap.configure_mercurial(vcs, command_context._mach_context.state_dir)
if repo.name == "git":
bootstrap.configure_git(
vcs,
which("git-cinnabar"),
command_context._mach_context.state_dir,
command_context._mach_context.topdir,
)
else:
bootstrap.configure_mercurial(
vcs, command_context._mach_context.state_dir
)

File diff suppressed because it is too large Load Diff

View File

@ -9,373 +9,375 @@ import logging
import os
import subprocess
from mozbuild import build_commands
from mozbuild.base import MachCommandBase
from mozbuild.build_commands import Build
from mozfile import which
from mach.decorators import CommandArgument, Command
from mach.decorators import CommandArgument, CommandProvider, Command
import mozpack.path as mozpath
@Command(
"ide",
category="devenv",
description="Generate a project and launch an IDE.",
virtualenv_name="build",
)
@CommandArgument("ide", choices=["eclipse", "visualstudio", "vscode"])
@CommandArgument("args", nargs=argparse.REMAINDER)
def run(command_context, ide, args):
if ide == "eclipse":
backend = "CppEclipse"
elif ide == "visualstudio":
backend = "VisualStudio"
elif ide == "vscode":
backend = "Clangd"
@CommandProvider
class MachCommands(MachCommandBase):
@Command(
"ide",
category="devenv",
description="Generate a project and launch an IDE.",
virtualenv_name="build",
)
@CommandArgument("ide", choices=["eclipse", "visualstudio", "vscode"])
@CommandArgument("args", nargs=argparse.REMAINDER)
def run(self, command_context, ide, args):
if ide == "eclipse":
backend = "CppEclipse"
elif ide == "visualstudio":
backend = "VisualStudio"
elif ide == "vscode":
backend = "Clangd"
if ide == "eclipse" and not which("eclipse"):
command_context.log(
logging.ERROR,
"ide",
{},
"Eclipse CDT 8.4 or later must be installed in your PATH.",
)
command_context.log(
logging.ERROR,
"ide",
{},
"Download: http://www.eclipse.org/cdt/downloads.php",
)
return 1
if ide == "vscode":
# Check if platform has VSCode installed
vscode_cmd = find_vscode_cmd(command_context)
if vscode_cmd is None:
choice = prompt_bool(
"VSCode cannot be found, and may not be installed. Proceed?"
if ide == "eclipse" and not which("eclipse"):
command_context.log(
logging.ERROR,
"ide",
{},
"Eclipse CDT 8.4 or later must be installed in your PATH.",
)
if not choice:
return 1
command_context.log(
logging.ERROR,
"ide",
{},
"Download: http://www.eclipse.org/cdt/downloads.php",
)
return 1
rc = build_commands.configure(command_context)
if ide == "vscode":
# Check if platform has VSCode installed
vscode_cmd = self.find_vscode_cmd(command_context)
if vscode_cmd is None:
choice = prompt_bool(
"VSCode cannot be found, and may not be installed. Proceed?"
)
if not choice:
return 1
if rc != 0:
return rc
# Create the Build environment to configure the tree
builder = Build(command_context._mach_context, None)
# First install what we can through install manifests.
rc = build_commands._run_make(
directory=command_context.topobjdir,
target="pre-export",
line_handler=None,
)
if rc != 0:
return rc
rc = builder.configure(command_context)
if rc != 0:
return rc
# Then build the rest of the build dependencies by running the full
# export target, because we can't do anything better.
for target in ("export", "pre-compile"):
rc = build_commands._run_make(
# First install what we can through install manifests.
rc = builder._run_make(
directory=command_context.topobjdir,
target=target,
target="pre-export",
line_handler=None,
)
if rc != 0:
return rc
else:
# Here we refresh the whole build. 'build export' is sufficient here and is
# probably more correct but it's also nice having a single target to get a fully
# built and indexed project (gives a easy target to use before go out to lunch).
res = command_context._mach_context.commands.dispatch(
"build", command_context._mach_context
# Then build the rest of the build dependencies by running the full
# export target, because we can't do anything better.
for target in ("export", "pre-compile"):
rc = builder._run_make(
directory=command_context.topobjdir,
target=target,
line_handler=None,
)
if rc != 0:
return rc
else:
# Here we refresh the whole build. 'build export' is sufficient here and is
# probably more correct but it's also nice having a single target to get a fully
# built and indexed project (gives a easy target to use before go out to lunch).
res = command_context._mach_context.commands.dispatch(
"build", command_context._mach_context
)
if res != 0:
return 1
# Generate or refresh the IDE backend.
python = command_context.virtualenv_manager.python_path
config_status = os.path.join(command_context.topobjdir, "config.status")
args = [python, config_status, "--backend=%s" % backend]
res = command_context._run_command_in_objdir(
args=args, pass_thru=True, ensure_exit_code=False
)
if res != 0:
return 1
# Generate or refresh the IDE backend.
python = command_context.virtualenv_manager.python_path
config_status = os.path.join(command_context.topobjdir, "config.status")
args = [python, config_status, "--backend=%s" % backend]
res = command_context._run_command_in_objdir(
args=args, pass_thru=True, ensure_exit_code=False
)
if res != 0:
return 1
if ide == "eclipse":
eclipse_workspace_dir = self.get_eclipse_workspace_path(command_context)
subprocess.check_call(["eclipse", "-data", eclipse_workspace_dir])
elif ide == "visualstudio":
visual_studio_workspace_dir = self.get_visualstudio_workspace_path(
command_context
)
subprocess.call(["explorer.exe", visual_studio_workspace_dir])
elif ide == "vscode":
return self.setup_vscode(command_context, vscode_cmd)
if ide == "eclipse":
eclipse_workspace_dir = get_eclipse_workspace_path(command_context)
subprocess.check_call(["eclipse", "-data", eclipse_workspace_dir])
elif ide == "visualstudio":
visual_studio_workspace_dir = get_visualstudio_workspace_path(command_context)
subprocess.call(["explorer.exe", visual_studio_workspace_dir])
elif ide == "vscode":
return setup_vscode(command_context, vscode_cmd)
def get_eclipse_workspace_path(self, command_context):
from mozbuild.backend.cpp_eclipse import CppEclipseBackend
def get_eclipse_workspace_path(command_context):
from mozbuild.backend.cpp_eclipse import CppEclipseBackend
return CppEclipseBackend.get_workspace_path(
command_context.topsrcdir, command_context.topobjdir
)
def get_visualstudio_workspace_path(command_context):
return os.path.normpath(
os.path.join(command_context.topobjdir, "msvc", "mozilla.sln")
)
def find_vscode_cmd(command_context):
import shutil
# Try to look up the `code` binary on $PATH, and use it if present. This
# should catch cases like being run from within a vscode-remote shell,
# even if vscode itself is also installed on the remote host.
path = shutil.which("code")
if path is not None:
return [path]
# If the binary wasn't on $PATH, try to find it in a variety of other
# well-known install locations based on the current platform.
if "linux" in command_context.platform[0]:
cmd_and_path = [
{"path": "/usr/local/bin/code", "cmd": ["/usr/local/bin/code"]},
{"path": "/snap/bin/code", "cmd": ["/snap/bin/code"]},
{"path": "/usr/bin/code", "cmd": ["/usr/bin/code"]},
{"path": "/usr/bin/code-insiders", "cmd": ["/usr/bin/code-insiders"]},
]
elif "macos" in command_context.platform[0]:
cmd_and_path = [
{"path": "/usr/local/bin/code", "cmd": ["/usr/local/bin/code"]},
{
"path": "/Applications/Visual Studio Code.app",
"cmd": ["open", "/Applications/Visual Studio Code.app", "--args"],
},
{
"path": "/Applications/Visual Studio Code - Insiders.app",
"cmd": [
"open",
"/Applications/Visual Studio Code - Insiders.app",
"--args",
],
},
]
elif "win64" in command_context.platform[0]:
from pathlib import Path
vscode_path = mozpath.join(
str(Path.home()),
"AppData",
"Local",
"Programs",
"Microsoft VS Code",
"Code.exe",
return CppEclipseBackend.get_workspace_path(
command_context.topsrcdir, command_context.topobjdir
)
vscode_insiders_path = mozpath.join(
str(Path.home()),
"AppData",
"Local",
"Programs",
"Microsoft VS Code Insiders",
"Code - Insiders.exe",
def get_visualstudio_workspace_path(self, command_context):
return os.path.normpath(
os.path.join(command_context.topobjdir, "msvc", "mozilla.sln")
)
cmd_and_path = [
{"path": vscode_path, "cmd": [vscode_path]},
{"path": vscode_insiders_path, "cmd": [vscode_insiders_path]},
]
# Did we guess the path?
for element in cmd_and_path:
if os.path.exists(element["path"]):
return element["cmd"]
def find_vscode_cmd(self, command_context):
import shutil
# Path cannot be found
return None
# Try to look up the `code` binary on $PATH, and use it if present. This
# should catch cases like being run from within a vscode-remote shell,
# even if vscode itself is also installed on the remote host.
path = shutil.which("code")
if path is not None:
return [path]
# If the binary wasn't on $PATH, try to find it in a variety of other
# well-known install locations based on the current platform.
if "linux" in command_context.platform[0]:
cmd_and_path = [
{"path": "/usr/local/bin/code", "cmd": ["/usr/local/bin/code"]},
{"path": "/snap/bin/code", "cmd": ["/snap/bin/code"]},
{"path": "/usr/bin/code", "cmd": ["/usr/bin/code"]},
{"path": "/usr/bin/code-insiders", "cmd": ["/usr/bin/code-insiders"]},
]
elif "macos" in command_context.platform[0]:
cmd_and_path = [
{"path": "/usr/local/bin/code", "cmd": ["/usr/local/bin/code"]},
{
"path": "/Applications/Visual Studio Code.app",
"cmd": ["open", "/Applications/Visual Studio Code.app", "--args"],
},
{
"path": "/Applications/Visual Studio Code - Insiders.app",
"cmd": [
"open",
"/Applications/Visual Studio Code - Insiders.app",
"--args",
],
},
]
elif "win64" in command_context.platform[0]:
from pathlib import Path
def setup_vscode(command_context, vscode_cmd):
vscode_settings = mozpath.join(
command_context.topsrcdir, ".vscode", "settings.json"
)
vscode_path = mozpath.join(
str(Path.home()),
"AppData",
"Local",
"Programs",
"Microsoft VS Code",
"Code.exe",
)
vscode_insiders_path = mozpath.join(
str(Path.home()),
"AppData",
"Local",
"Programs",
"Microsoft VS Code Insiders",
"Code - Insiders.exe",
)
cmd_and_path = [
{"path": vscode_path, "cmd": [vscode_path]},
{"path": vscode_insiders_path, "cmd": [vscode_insiders_path]},
]
clangd_cc_path = mozpath.join(command_context.topobjdir, "clangd")
# Did we guess the path?
for element in cmd_and_path:
if os.path.exists(element["path"]):
return element["cmd"]
# Verify if the required files are present
clang_tools_path = mozpath.join(
command_context._mach_context.state_dir, "clang-tools"
)
clang_tidy_bin = mozpath.join(clang_tools_path, "clang-tidy", "bin")
# Path cannot be found
return None
clangd_path = mozpath.join(
clang_tidy_bin,
"clangd" + command_context.config_environment.substs.get("BIN_SUFFIX", ""),
)
if not os.path.exists(clangd_path):
command_context.log(
logging.ERROR,
"ide",
{},
"Unable to locate clangd in {}.".format(clang_tidy_bin),
def setup_vscode(self, command_context, vscode_cmd):
vscode_settings = mozpath.join(
command_context.topsrcdir, ".vscode", "settings.json"
)
rc = _get_clang_tools(command_context, clang_tools_path)
clangd_cc_path = mozpath.join(command_context.topobjdir, "clangd")
# Verify if the required files are present
clang_tools_path = mozpath.join(
command_context._mach_context.state_dir, "clang-tools"
)
clang_tidy_bin = mozpath.join(clang_tools_path, "clang-tidy", "bin")
clangd_path = mozpath.join(
clang_tidy_bin,
"clangd" + command_context.config_environment.substs.get("BIN_SUFFIX", ""),
)
if not os.path.exists(clangd_path):
command_context.log(
logging.ERROR,
"ide",
{},
"Unable to locate clangd in {}.".format(clang_tidy_bin),
)
rc = self._get_clang_tools(command_context, clang_tools_path)
if rc != 0:
return rc
import multiprocessing
import json
import difflib
from mozbuild.code_analysis.utils import ClangTidyConfig
clang_tidy_cfg = ClangTidyConfig(command_context.topsrcdir)
clangd_json = {
"clangd.path": clangd_path,
"clangd.arguments": [
"--compile-commands-dir",
clangd_cc_path,
"-j",
str(multiprocessing.cpu_count() // 2),
"--limit-results",
"0",
"--completion-style",
"detailed",
"--background-index",
"--all-scopes-completion",
"--log",
"info",
"--pch-storage",
"memory",
"--clang-tidy",
"--clang-tidy-checks",
",".join(clang_tidy_cfg.checks),
],
}
# Load the existing .vscode/settings.json file, to check if if needs to
# be created or updated.
try:
with open(vscode_settings) as fh:
old_settings_str = fh.read()
except FileNotFoundError:
print("Configuration for {} will be created.".format(vscode_settings))
old_settings_str = None
if old_settings_str is None:
# No old settings exist
with open(vscode_settings, "w") as fh:
json.dump(clangd_json, fh, indent=4)
else:
# Merge our new settings with the existing settings, and check if we
# need to make changes. Only prompt & write out the updated config
# file if settings actually changed.
try:
old_settings = json.loads(old_settings_str)
prompt_prefix = ""
except ValueError:
old_settings = {}
prompt_prefix = (
"\n**WARNING**: Parsing of existing settings file failed. "
"Existing settings will be lost!"
)
settings = {**old_settings, **clangd_json}
if old_settings != settings:
# Prompt the user with a diff of the changes we're going to make
new_settings_str = json.dumps(settings, indent=4)
print(
"\nThe following modifications to {settings} will occur:\n{diff}".format(
settings=vscode_settings,
diff="".join(
difflib.unified_diff(
old_settings_str.splitlines(keepends=True),
new_settings_str.splitlines(keepends=True),
"a/.vscode/settings.json",
"b/.vscode/settings.json",
n=30,
)
),
)
)
choice = prompt_bool(
"{}\nProceed with modifications to {}?".format(
prompt_prefix, vscode_settings
)
)
if not choice:
return 1
with open(vscode_settings, "w") as fh:
fh.write(new_settings_str)
# Open vscode with new configuration, or ask the user to do so if the
# binary was not found.
if vscode_cmd is None:
print(
"Please open VS Code manually and load directory: {}".format(
command_context.topsrcdir
)
)
return 0
rc = subprocess.call(vscode_cmd + [command_context.topsrcdir])
if rc != 0:
command_context.log(
logging.ERROR,
"ide",
{},
"Unable to open VS Code. Please open VS Code manually and load "
"directory: {}".format(command_context.topsrcdir),
)
return rc
import multiprocessing
import json
import difflib
from mozbuild.code_analysis.utils import ClangTidyConfig
clang_tidy_cfg = ClangTidyConfig(command_context.topsrcdir)
clangd_json = {
"clangd.path": clangd_path,
"clangd.arguments": [
"--compile-commands-dir",
clangd_cc_path,
"-j",
str(multiprocessing.cpu_count() // 2),
"--limit-results",
"0",
"--completion-style",
"detailed",
"--background-index",
"--all-scopes-completion",
"--log",
"info",
"--pch-storage",
"memory",
"--clang-tidy",
"--clang-tidy-checks",
",".join(clang_tidy_cfg.checks),
],
}
# Load the existing .vscode/settings.json file, to check if if needs to
# be created or updated.
try:
with open(vscode_settings) as fh:
old_settings_str = fh.read()
except FileNotFoundError:
print("Configuration for {} will be created.".format(vscode_settings))
old_settings_str = None
if old_settings_str is None:
# No old settings exist
with open(vscode_settings, "w") as fh:
json.dump(clangd_json, fh, indent=4)
else:
# Merge our new settings with the existing settings, and check if we
# need to make changes. Only prompt & write out the updated config
# file if settings actually changed.
try:
old_settings = json.loads(old_settings_str)
prompt_prefix = ""
except ValueError:
old_settings = {}
prompt_prefix = (
"\n**WARNING**: Parsing of existing settings file failed. "
"Existing settings will be lost!"
)
settings = {**old_settings, **clangd_json}
if old_settings != settings:
# Prompt the user with a diff of the changes we're going to make
new_settings_str = json.dumps(settings, indent=4)
print(
"\nThe following modifications to {settings} will occur:\n{diff}".format(
settings=vscode_settings,
diff="".join(
difflib.unified_diff(
old_settings_str.splitlines(keepends=True),
new_settings_str.splitlines(keepends=True),
"a/.vscode/settings.json",
"b/.vscode/settings.json",
n=30,
)
),
)
)
choice = prompt_bool(
"{}\nProceed with modifications to {}?".format(
prompt_prefix, vscode_settings
)
)
if not choice:
return 1
with open(vscode_settings, "w") as fh:
fh.write(new_settings_str)
# Open vscode with new configuration, or ask the user to do so if the
# binary was not found.
if vscode_cmd is None:
print(
"Please open VS Code manually and load directory: {}".format(
command_context.topsrcdir
)
)
return 0
rc = subprocess.call(vscode_cmd + [command_context.topsrcdir])
def _get_clang_tools(self, command_context, clang_tools_path):
if rc != 0:
command_context.log(
logging.ERROR,
"ide",
{},
"Unable to open VS Code. Please open VS Code manually and load "
"directory: {}".format(command_context.topsrcdir),
import shutil
if os.path.isdir(clang_tools_path):
shutil.rmtree(clang_tools_path)
# Create base directory where we store clang binary
os.mkdir(clang_tools_path)
from mozbuild.artifact_commands import PackageFrontend
_artifact_manager = PackageFrontend(command_context._mach_context)
job, _ = command_context.platform
if job is None:
command_context.log(
logging.ERROR,
"ide",
{},
"The current platform isn't supported. "
"Currently only the following platforms are "
"supported: win32/win64, linux64 and macosx64.",
)
return 1
job += "-clang-tidy"
# We want to unpack data in the clang-tidy mozbuild folder
currentWorkingDir = os.getcwd()
os.chdir(clang_tools_path)
rc = _artifact_manager.artifact_toolchain(
command_context, verbose=False, from_build=[job], no_unpack=False, retry=0
)
# Change back the cwd
os.chdir(currentWorkingDir)
return rc
return 0
def _get_clang_tools(command_context, clang_tools_path):
import shutil
if os.path.isdir(clang_tools_path):
shutil.rmtree(clang_tools_path)
# Create base directory where we store clang binary
os.mkdir(clang_tools_path)
from mozbuild.artifact_commands import PackageFrontend
_artifact_manager = PackageFrontend(command_context._mach_context)
job, _ = command_context.platform
if job is None:
command_context.log(
logging.ERROR,
"ide",
{},
"The current platform isn't supported. "
"Currently only the following platforms are "
"supported: win32/win64, linux64 and macosx64.",
)
return 1
job += "-clang-tidy"
# We want to unpack data in the clang-tidy mozbuild folder
currentWorkingDir = os.getcwd()
os.chdir(clang_tools_path)
rc = _artifact_manager.artifact_toolchain(
command_context, verbose=False, from_build=[job], no_unpack=False, retry=0
)
# Change back the cwd
os.chdir(currentWorkingDir)
return rc
def prompt_bool(prompt, limit=5):
""" Prompts the user with prompt and requires a boolean value. """

View File

@ -8,8 +8,9 @@ import argparse
import os
import subprocess
from mach.decorators import CommandArgument, Command
from mach.decorators import CommandArgument, CommandProvider, Command
from mozbuild.base import MachCommandBase
from mozbuild.util import MOZBUILD_METRICS_PATH
from mozbuild.mozconfig import MozconfigLoader
import mozpack.path as mozpath
@ -68,114 +69,160 @@ def _set_priority(priority, verbose):
return True
# Interface to build the tree.
@CommandProvider
class Build(MachCommandBase):
"""Interface to build the tree."""
@Command(
"build",
category="build",
description="Build the tree.",
metrics_path=MOZBUILD_METRICS_PATH,
virtualenv_name="build",
)
@CommandArgument(
"--jobs",
"-j",
default="0",
metavar="jobs",
type=int,
help="Number of concurrent jobs to run. Default is based on the number of "
"CPUs and the estimated size of the jobs (see --job-size).",
)
@CommandArgument(
"--job-size",
default="0",
metavar="size",
type=float,
help="Estimated RAM required, in GiB, for each parallel job. Used to "
"compute a default number of concurrent jobs.",
)
@CommandArgument(
"-C",
"--directory",
default=None,
help="Change to a subdirectory of the build directory first.",
)
@CommandArgument("what", default=None, nargs="*", help=BUILD_WHAT_HELP)
@CommandArgument(
"-v",
"--verbose",
action="store_true",
help="Verbose output for what commands the build is running.",
)
@CommandArgument(
"--keep-going",
action="store_true",
help="Keep building after an error has occurred",
)
@CommandArgument(
"--priority",
default="less",
metavar="priority",
type=str,
help="idle/less/normal/more/high. (Default less)",
)
def build(
self,
command_context,
what=None,
jobs=0,
job_size=0,
directory=None,
verbose=False,
keep_going=False,
priority="less",
):
"""Build the source tree.
@Command(
"build",
category="build",
description="Build the tree.",
metrics_path=MOZBUILD_METRICS_PATH,
virtualenv_name="build",
)
@CommandArgument(
"--jobs",
"-j",
default="0",
metavar="jobs",
type=int,
help="Number of concurrent jobs to run. Default is based on the number of "
"CPUs and the estimated size of the jobs (see --job-size).",
)
@CommandArgument(
"--job-size",
default="0",
metavar="size",
type=float,
help="Estimated RAM required, in GiB, for each parallel job. Used to "
"compute a default number of concurrent jobs.",
)
@CommandArgument(
"-C",
"--directory",
default=None,
help="Change to a subdirectory of the build directory first.",
)
@CommandArgument("what", default=None, nargs="*", help=BUILD_WHAT_HELP)
@CommandArgument(
"-v",
"--verbose",
action="store_true",
help="Verbose output for what commands the build is running.",
)
@CommandArgument(
"--keep-going",
action="store_true",
help="Keep building after an error has occurred",
)
@CommandArgument(
"--priority",
default="less",
metavar="priority",
type=str,
help="idle/less/normal/more/high. (Default less)",
)
def build(
command_context,
what=None,
jobs=0,
job_size=0,
directory=None,
verbose=False,
keep_going=False,
priority="less",
):
"""Build the source tree.
With no arguments, this will perform a full build.
With no arguments, this will perform a full build.
Positional arguments define targets to build. These can be make targets
or patterns like "<dir>/<target>" to indicate a make target within a
directory.
Positional arguments define targets to build. These can be make targets
or patterns like "<dir>/<target>" to indicate a make target within a
directory.
There are a few special targets that can be used to perform a partial
build faster than what `mach build` would perform:
There are a few special targets that can be used to perform a partial
build faster than what `mach build` would perform:
* binaries - compiles and links all C/C++ sources and produces shared
libraries and executables (binaries).
* binaries - compiles and links all C/C++ sources and produces shared
libraries and executables (binaries).
* faster - builds JavaScript, XUL, CSS, etc files.
* faster - builds JavaScript, XUL, CSS, etc files.
"binaries" and "faster" almost fully complement each other. However,
there are build actions not captured by either. If things don't appear to
be rebuilding, perform a vanilla `mach build` to rebuild the world.
"""
from mozbuild.controller.building import BuildDriver
"binaries" and "faster" almost fully complement each other. However,
there are build actions not captured by either. If things don't appear to
be rebuilding, perform a vanilla `mach build` to rebuild the world.
"""
from mozbuild.controller.building import BuildDriver
command_context.log_manager.enable_all_structured_loggers()
command_context.log_manager.enable_all_structured_loggers()
loader = MozconfigLoader(command_context.topsrcdir)
mozconfig = loader.read_mozconfig(loader.AUTODETECT)
configure_args = mozconfig["configure_args"]
doing_pgo = configure_args and "MOZ_PGO=1" in configure_args
# Force verbosity on automation.
verbose = verbose or bool(os.environ.get("MOZ_AUTOMATION", False))
append_env = None
loader = MozconfigLoader(command_context.topsrcdir)
mozconfig = loader.read_mozconfig(loader.AUTODETECT)
configure_args = mozconfig["configure_args"]
doing_pgo = configure_args and "MOZ_PGO=1" in configure_args
# Force verbosity on automation.
verbose = verbose or bool(os.environ.get("MOZ_AUTOMATION", False))
append_env = None
# By setting the current process's priority, by default our child processes
# will also inherit this same priority.
if not _set_priority(priority, verbose):
print("--priority not supported on this platform.")
# By setting the current process's priority, by default our child processes
# will also inherit this same priority.
if not _set_priority(priority, verbose):
print("--priority not supported on this platform.")
if doing_pgo:
if what:
raise Exception(
"Cannot specify targets (%s) in MOZ_PGO=1 builds" % what
)
instr = command_context._spawn(BuildDriver)
orig_topobjdir = instr._topobjdir
instr._topobjdir = mozpath.join(instr._topobjdir, "instrumented")
if doing_pgo:
if what:
raise Exception("Cannot specify targets (%s) in MOZ_PGO=1 builds" % what)
instr = command_context._spawn(BuildDriver)
orig_topobjdir = instr._topobjdir
instr._topobjdir = mozpath.join(instr._topobjdir, "instrumented")
append_env = {"MOZ_PROFILE_GENERATE": "1"}
status = instr.build(
command_context.metrics,
what=what,
jobs=jobs,
job_size=job_size,
directory=directory,
verbose=verbose,
keep_going=keep_going,
mach_context=command_context._mach_context,
append_env=append_env,
)
if status != 0:
return status
append_env = {"MOZ_PROFILE_GENERATE": "1"}
status = instr.build(
# Packaging the instrumented build is required to get the jarlog
# data.
status = instr._run_make(
directory=".",
target="package",
silent=not verbose,
ensure_exit_code=False,
append_env=append_env,
)
if status != 0:
return status
pgo_env = os.environ.copy()
if instr.config_environment.substs.get("CC_TYPE") in ("clang", "clang-cl"):
pgo_env["LLVM_PROFDATA"] = instr.config_environment.substs.get(
"LLVM_PROFDATA"
)
pgo_env["JARLOG_FILE"] = mozpath.join(orig_topobjdir, "jarlog/en-US.log")
pgo_cmd = [
instr.virtualenv_manager.python_path,
mozpath.join(command_context.topsrcdir, "build/pgo/profileserver.py"),
]
subprocess.check_call(pgo_cmd, cwd=instr.topobjdir, env=pgo_env)
# Set the default build to MOZ_PROFILE_USE
append_env = {"MOZ_PROFILE_USE": "1"}
driver = command_context._spawn(BuildDriver)
return driver.build(
command_context.metrics,
what=what,
jobs=jobs,
@ -186,179 +233,141 @@ def build(
mach_context=command_context._mach_context,
append_env=append_env,
)
if status != 0:
return status
# Packaging the instrumented build is required to get the jarlog
# data.
status = instr._run_make(
directory=".",
target="package",
silent=not verbose,
ensure_exit_code=False,
append_env=append_env,
@Command(
"configure",
category="build",
description="Configure the tree (run configure and config.status).",
metrics_path=MOZBUILD_METRICS_PATH,
virtualenv_name="build",
)
@CommandArgument(
"options", default=None, nargs=argparse.REMAINDER, help="Configure options"
)
def configure(
self,
command_context,
options=None,
buildstatus_messages=False,
line_handler=None,
):
from mozbuild.controller.building import BuildDriver
command_context.log_manager.enable_all_structured_loggers()
driver = command_context._spawn(BuildDriver)
return driver.configure(
command_context.metrics,
options=options,
buildstatus_messages=buildstatus_messages,
line_handler=line_handler,
)
if status != 0:
return status
pgo_env = os.environ.copy()
if instr.config_environment.substs.get("CC_TYPE") in ("clang", "clang-cl"):
pgo_env["LLVM_PROFDATA"] = instr.config_environment.substs.get(
"LLVM_PROFDATA"
)
pgo_env["JARLOG_FILE"] = mozpath.join(orig_topobjdir, "jarlog/en-US.log")
pgo_cmd = [
instr.virtualenv_manager.python_path,
mozpath.join(command_context.topsrcdir, "build/pgo/profileserver.py"),
]
subprocess.check_call(pgo_cmd, cwd=instr.topobjdir, env=pgo_env)
# Set the default build to MOZ_PROFILE_USE
append_env = {"MOZ_PROFILE_USE": "1"}
driver = command_context._spawn(BuildDriver)
return driver.build(
command_context.metrics,
what=what,
jobs=jobs,
job_size=job_size,
directory=directory,
verbose=verbose,
keep_going=keep_going,
mach_context=command_context._mach_context,
append_env=append_env,
@Command(
"resource-usage",
category="post-build",
description="Show information about system resource usage for a build.",
virtualenv_name="build",
)
@Command(
"configure",
category="build",
description="Configure the tree (run configure and config.status).",
metrics_path=MOZBUILD_METRICS_PATH,
virtualenv_name="build",
)
@CommandArgument(
"options", default=None, nargs=argparse.REMAINDER, help="Configure options"
)
def configure(
command_context,
options=None,
buildstatus_messages=False,
line_handler=None,
):
from mozbuild.controller.building import BuildDriver
command_context.log_manager.enable_all_structured_loggers()
driver = command_context._spawn(BuildDriver)
return driver.configure(
command_context.metrics,
options=options,
buildstatus_messages=buildstatus_messages,
line_handler=line_handler,
@CommandArgument(
"--address",
default="localhost",
help="Address the HTTP server should listen on.",
)
@CommandArgument(
"--port",
type=int,
default=0,
help="Port number the HTTP server should listen on.",
)
@CommandArgument(
"--browser",
default="firefox",
help="Web browser to automatically open. See webbrowser Python module.",
)
@CommandArgument("--url", help="URL of JSON document to display")
def resource_usage(
self, command_context, address=None, port=None, browser=None, url=None
):
import webbrowser
from mozbuild.html_build_viewer import BuildViewerServer
server = BuildViewerServer(address, port)
@Command(
"resource-usage",
category="post-build",
description="Show information about system resource usage for a build.",
virtualenv_name="build",
)
@CommandArgument(
"--address",
default="localhost",
help="Address the HTTP server should listen on.",
)
@CommandArgument(
"--port",
type=int,
default=0,
help="Port number the HTTP server should listen on.",
)
@CommandArgument(
"--browser",
default="firefox",
help="Web browser to automatically open. See webbrowser Python module.",
)
@CommandArgument("--url", help="URL of JSON document to display")
def resource_usage(command_context, address=None, port=None, browser=None, url=None):
import webbrowser
from mozbuild.html_build_viewer import BuildViewerServer
if url:
server.add_resource_json_url("url", url)
else:
last = command_context._get_state_filename("build_resources.json")
if not os.path.exists(last):
print(
"Build resources not available. If you have performed a "
"build and receive this message, the psutil Python package "
"likely failed to initialize properly."
)
return 1
server = BuildViewerServer(address, port)
server.add_resource_json_file("last", last)
try:
webbrowser.get(browser).open_new_tab(server.url)
except Exception:
print("Cannot get browser specified, trying the default instead.")
try:
browser = webbrowser.get().open_new_tab(server.url)
except Exception:
print("Please open %s in a browser." % server.url)
if url:
server.add_resource_json_url("url", url)
else:
last = command_context._get_state_filename("build_resources.json")
if not os.path.exists(last):
print("Hit CTRL+c to stop server.")
server.run()
@Command(
"build-backend",
category="build",
description="Generate a backend used to build the tree.",
virtualenv_name="build",
)
@CommandArgument(
"-d", "--diff", action="store_true", help="Show a diff of changes."
)
# It would be nice to filter the choices below based on
# conditions, but that is for another day.
@CommandArgument(
"-b",
"--backend",
nargs="+",
choices=sorted(backends),
help="Which backend to build.",
)
@CommandArgument("-v", "--verbose", action="store_true", help="Verbose output.")
@CommandArgument(
"-n",
"--dry-run",
action="store_true",
help="Do everything except writing files out.",
)
def build_backend(
self, command_context, backend, diff=False, verbose=False, dry_run=False
):
python = command_context.virtualenv_manager.python_path
config_status = os.path.join(command_context.topobjdir, "config.status")
if not os.path.exists(config_status):
print(
"Build resources not available. If you have performed a "
"build and receive this message, the psutil Python package "
"likely failed to initialize properly."
"config.status not found. Please run |mach configure| "
"or |mach build| prior to building the %s build backend." % backend
)
return 1
server.add_resource_json_file("last", last)
try:
webbrowser.get(browser).open_new_tab(server.url)
except Exception:
print("Cannot get browser specified, trying the default instead.")
try:
browser = webbrowser.get().open_new_tab(server.url)
except Exception:
print("Please open %s in a browser." % server.url)
args = [python, config_status]
if backend:
args.append("--backend")
args.extend(backend)
if diff:
args.append("--diff")
if verbose:
args.append("--verbose")
if dry_run:
args.append("--dry-run")
print("Hit CTRL+c to stop server.")
server.run()
@Command(
"build-backend",
category="build",
description="Generate a backend used to build the tree.",
virtualenv_name="build",
)
@CommandArgument("-d", "--diff", action="store_true", help="Show a diff of changes.")
# It would be nice to filter the choices below based on
# conditions, but that is for another day.
@CommandArgument(
"-b",
"--backend",
nargs="+",
choices=sorted(backends),
help="Which backend to build.",
)
@CommandArgument("-v", "--verbose", action="store_true", help="Verbose output.")
@CommandArgument(
"-n",
"--dry-run",
action="store_true",
help="Do everything except writing files out.",
)
def build_backend(command_context, backend, diff=False, verbose=False, dry_run=False):
python = command_context.virtualenv_manager.python_path
config_status = os.path.join(command_context.topobjdir, "config.status")
if not os.path.exists(config_status):
print(
"config.status not found. Please run |mach configure| "
"or |mach build| prior to building the %s build backend." % backend
return command_context._run_command_in_objdir(
args=args, pass_thru=True, ensure_exit_code=False
)
return 1
args = [python, config_status]
if backend:
args.append("--backend")
args.extend(backend)
if diff:
args.append("--diff")
if verbose:
args.append("--verbose")
if dry_run:
args.append("--dry-run")
return command_context._run_command_in_objdir(
args=args, pass_thru=True, ensure_exit_code=False
)

File diff suppressed because it is too large Load Diff

View File

@ -6,51 +6,53 @@
from __future__ import absolute_import, print_function
from mach.decorators import CommandArgument, Command
from mach.decorators import CommandArgument, CommandProvider, Command
from mozbuild.base import MachCommandBase
from mozbuild.shellutil import split as shell_split, quote as shell_quote
# Instropection commands.
@CommandProvider
class Introspection(MachCommandBase):
"""Instropection commands."""
@Command(
"compileflags",
category="devenv",
description="Display the compilation flags for a given source file",
)
@CommandArgument(
"what", default=None, help="Source file to display compilation flags for"
)
def compileflags(command_context, what):
from mozbuild.util import resolve_target_to_make
from mozbuild.compilation import util
if not util.check_top_objdir(command_context.topobjdir):
return 1
path_arg = command_context._wrap_path_argument(what)
make_dir, make_target = resolve_target_to_make(
command_context.topobjdir, path_arg.relpath()
@Command(
"compileflags",
category="devenv",
description="Display the compilation flags for a given source file",
)
@CommandArgument(
"what", default=None, help="Source file to display compilation flags for"
)
def compileflags(self, command_context, what):
from mozbuild.util import resolve_target_to_make
from mozbuild.compilation import util
if make_dir is None and make_target is None:
return 1
if not util.check_top_objdir(command_context.topobjdir):
return 1
build_vars = util.get_build_vars(make_dir, command_context)
path_arg = command_context._wrap_path_argument(what)
if what.endswith(".c"):
cc = "CC"
name = "COMPILE_CFLAGS"
else:
cc = "CXX"
name = "COMPILE_CXXFLAGS"
make_dir, make_target = resolve_target_to_make(
command_context.topobjdir, path_arg.relpath()
)
if name not in build_vars:
return
if make_dir is None and make_target is None:
return 1
# Drop the first flag since that is the pathname of the compiler.
flags = (shell_split(build_vars[cc]) + shell_split(build_vars[name]))[1:]
build_vars = util.get_build_vars(make_dir, self)
print(" ".join(shell_quote(arg) for arg in util.sanitize_cflags(flags)))
if what.endswith(".c"):
cc = "CC"
name = "COMPILE_CFLAGS"
else:
cc = "CXX"
name = "COMPILE_CXXFLAGS"
if name not in build_vars:
return
# Drop the first flag since that is the pathname of the compiler.
flags = (shell_split(build_vars[cc]) + shell_split(build_vars[name]))[1:]
print(" ".join(shell_quote(arg) for arg in util.sanitize_cflags(flags)))

View File

@ -9,8 +9,9 @@ import json
import os
import sys
from mach.decorators import CommandArgument, Command, SubCommand
from mach.decorators import CommandArgument, CommandProvider, Command, SubCommand
from mozbuild.base import MachCommandBase
import mozpack.path as mozpath
TOPSRCDIR = os.path.abspath(os.path.join(__file__, "../../../../../"))
@ -20,327 +21,329 @@ class InvalidPathException(Exception):
"""Represents an error due to an invalid path."""
@Command(
"mozbuild-reference",
category="build-dev",
description="View reference documentation on mozbuild files.",
)
@CommandArgument(
"symbol",
default=None,
nargs="*",
help="Symbol to view help on. If not specified, all will be shown.",
)
@CommandArgument(
"--name-only",
"-n",
default=False,
action="store_true",
help="Print symbol names only.",
)
def reference(command_context, symbol, name_only=False):
# mozbuild.sphinx imports some Sphinx modules, so we need to be sure
# the optional Sphinx package is installed.
command_context.activate_virtualenv()
command_context.virtualenv_manager.install_pip_package("Sphinx==1.1.3")
from mozbuild.sphinx import (
format_module,
function_reference,
special_reference,
variable_reference,
@CommandProvider
class MozbuildFileCommands(MachCommandBase):
@Command(
"mozbuild-reference",
category="build-dev",
description="View reference documentation on mozbuild files.",
)
@CommandArgument(
"symbol",
default=None,
nargs="*",
help="Symbol to view help on. If not specified, all will be shown.",
)
@CommandArgument(
"--name-only",
"-n",
default=False,
action="store_true",
help="Print symbol names only.",
)
def reference(self, command_context, symbol, name_only=False):
# mozbuild.sphinx imports some Sphinx modules, so we need to be sure
# the optional Sphinx package is installed.
command_context.activate_virtualenv()
command_context.virtualenv_manager.install_pip_package("Sphinx==1.1.3")
import mozbuild.frontend.context as m
if name_only:
for s in sorted(m.VARIABLES.keys()):
print(s)
for s in sorted(m.FUNCTIONS.keys()):
print(s)
for s in sorted(m.SPECIAL_VARIABLES.keys()):
print(s)
return 0
if len(symbol):
for s in symbol:
if s in m.VARIABLES:
for line in variable_reference(s, *m.VARIABLES[s]):
print(line)
continue
elif s in m.FUNCTIONS:
for line in function_reference(s, *m.FUNCTIONS[s]):
print(line)
continue
elif s in m.SPECIAL_VARIABLES:
for line in special_reference(s, *m.SPECIAL_VARIABLES[s]):
print(line)
continue
print("Could not find symbol: %s" % s)
return 1
return 0
for line in format_module(m):
print(line)
return 0
@Command(
"file-info", category="build-dev", description="Query for metadata about files."
)
def file_info(command_context):
"""Show files metadata derived from moz.build files.
moz.build files contain "Files" sub-contexts for declaring metadata
against file patterns. This command suite is used to query that data.
"""
@SubCommand(
"file-info",
"bugzilla-component",
"Show Bugzilla component info for files listed.",
)
@CommandArgument("-r", "--rev", help="Version control revision to look up info from")
@CommandArgument(
"--format",
choices={"json", "plain"},
default="plain",
help="Output format",
dest="fmt",
)
@CommandArgument("paths", nargs="+", help="Paths whose data to query")
def file_info_bugzilla(command_context, paths, rev=None, fmt=None):
"""Show Bugzilla component for a set of files.
Given a requested set of files (which can be specified using
wildcards), print the Bugzilla component for each file.
"""
components = defaultdict(set)
try:
for p, m in _get_files_info(command_context, paths, rev=rev).items():
components[m.get("BUG_COMPONENT")].add(p)
except InvalidPathException as e:
print(e)
return 1
if fmt == "json":
data = {}
for component, files in components.items():
if not component:
continue
for f in files:
data[f] = [component.product, component.component]
json.dump(data, sys.stdout, sort_keys=True, indent=2)
return
elif fmt == "plain":
comp_to_file = sorted(
(
"UNKNOWN"
if component is None
else "%s :: %s" % (component.product, component.component),
sorted(files),
)
for component, files in components.items()
from mozbuild.sphinx import (
format_module,
function_reference,
special_reference,
variable_reference,
)
for component, files in comp_to_file:
print(component)
for f in files:
print(" %s" % f)
else:
print("unhandled output format: %s" % fmt)
return 1
import mozbuild.frontend.context as m
@SubCommand(
"file-info", "missing-bugzilla", "Show files missing Bugzilla component info"
)
@CommandArgument("-r", "--rev", help="Version control revision to look up info from")
@CommandArgument(
"--format",
choices={"json", "plain"},
dest="fmt",
default="plain",
help="Output format",
)
@CommandArgument("paths", nargs="+", help="Paths whose data to query")
def file_info_missing_bugzilla(command_context, paths, rev=None, fmt=None):
missing = set()
if name_only:
for s in sorted(m.VARIABLES.keys()):
print(s)
try:
for p, m in _get_files_info(command_context, paths, rev=rev).items():
if "BUG_COMPONENT" not in m:
missing.add(p)
except InvalidPathException as e:
print(e)
return 1
for s in sorted(m.FUNCTIONS.keys()):
print(s)
if fmt == "json":
json.dump({"missing": sorted(missing)}, sys.stdout, indent=2)
return
elif fmt == "plain":
for f in sorted(missing):
print(f)
else:
print("unhandled output format: %s" % fmt)
return 1
for s in sorted(m.SPECIAL_VARIABLES.keys()):
print(s)
return 0
@SubCommand(
"file-info",
"bugzilla-automation",
"Perform Bugzilla metadata analysis as required for automation",
)
@CommandArgument("out_dir", help="Where to write files")
def bugzilla_automation(command_context, out_dir):
"""Analyze and validate Bugzilla metadata as required by automation.
This will write out JSON and gzipped JSON files for Bugzilla metadata.
The exit code will be non-0 if Bugzilla metadata fails validation.
"""
import gzip
missing_component = set()
seen_components = set()
component_by_path = {}
# TODO operate in VCS space. This requires teaching the VCS reader
# to understand wildcards and/or for the relative path issue in the
# VCS finder to be worked out.
for p, m in sorted(_get_files_info(command_context, ["**"]).items()):
if "BUG_COMPONENT" not in m:
missing_component.add(p)
print(
"FileToBugzillaMappingError: Missing Bugzilla component: "
"%s - Set the BUG_COMPONENT in the moz.build file to fix "
"the issue." % p
)
continue
c = m["BUG_COMPONENT"]
seen_components.add(c)
component_by_path[p] = [c.product, c.component]
print("Examined %d files" % len(component_by_path))
# We also have a normalized versions of the file to components mapping
# that requires far less storage space by eliminating redundant strings.
indexed_components = {
i: [c.product, c.component] for i, c in enumerate(sorted(seen_components))
}
components_index = {tuple(v): k for k, v in indexed_components.items()}
normalized_component = {"components": indexed_components, "paths": {}}
for p, c in component_by_path.items():
d = normalized_component["paths"]
while "/" in p:
base, p = p.split("/", 1)
d = d.setdefault(base, {})
d[p] = components_index[tuple(c)]
if not os.path.exists(out_dir):
os.makedirs(out_dir)
components_json = os.path.join(out_dir, "components.json")
print("Writing %s" % components_json)
with open(components_json, "w") as fh:
json.dump(component_by_path, fh, sort_keys=True, indent=2)
missing_json = os.path.join(out_dir, "missing.json")
print("Writing %s" % missing_json)
with open(missing_json, "w") as fh:
json.dump({"missing": sorted(missing_component)}, fh, indent=2)
indexed_components_json = os.path.join(out_dir, "components-normalized.json")
print("Writing %s" % indexed_components_json)
with open(indexed_components_json, "w") as fh:
# Don't indent so file is as small as possible.
json.dump(normalized_component, fh, sort_keys=True)
# Write compressed versions of JSON files.
for p in (components_json, indexed_components_json, missing_json):
gzip_path = "%s.gz" % p
print("Writing %s" % gzip_path)
with open(p, "rb") as ifh, gzip.open(gzip_path, "wb") as ofh:
while True:
data = ifh.read(32768)
if not data:
break
ofh.write(data)
# Causes CI task to fail if files are missing Bugzilla annotation.
if missing_component:
return 1
def _get_files_info(command_context, paths, rev=None):
reader = command_context.mozbuild_reader(config_mode="empty", vcs_revision=rev)
# Normalize to relative from topsrcdir.
relpaths = []
for p in paths:
a = mozpath.abspath(p)
if not mozpath.basedir(a, [command_context.topsrcdir]):
raise InvalidPathException("path is outside topsrcdir: %s" % p)
relpaths.append(mozpath.relpath(a, command_context.topsrcdir))
# Expand wildcards.
# One variable is for ordering. The other for membership tests.
# (Membership testing on a list can be slow.)
allpaths = []
all_paths_set = set()
for p in relpaths:
if "*" not in p:
if p not in all_paths_set:
if not os.path.exists(mozpath.join(command_context.topsrcdir, p)):
print("(%s does not exist; ignoring)" % p, file=sys.stderr)
if len(symbol):
for s in symbol:
if s in m.VARIABLES:
for line in variable_reference(s, *m.VARIABLES[s]):
print(line)
continue
elif s in m.FUNCTIONS:
for line in function_reference(s, *m.FUNCTIONS[s]):
print(line)
continue
elif s in m.SPECIAL_VARIABLES:
for line in special_reference(s, *m.SPECIAL_VARIABLES[s]):
print(line)
continue
all_paths_set.add(p)
allpaths.append(p)
continue
print("Could not find symbol: %s" % s)
return 1
if rev:
raise InvalidPathException("cannot use wildcard in version control mode")
return 0
# finder is rooted at / for now.
# TODO bug 1171069 tracks changing to relative.
search = mozpath.join(command_context.topsrcdir, p)[1:]
for path, f in reader.finder.find(search):
path = path[len(command_context.topsrcdir) :]
if path not in all_paths_set:
all_paths_set.add(path)
allpaths.append(path)
for line in format_module(m):
print(line)
return reader.files_info(allpaths)
return 0
@Command(
"file-info", category="build-dev", description="Query for metadata about files."
)
def file_info(self, command_context):
"""Show files metadata derived from moz.build files.
@SubCommand(
"file-info", "schedules", "Show the combined SCHEDULES for the files listed."
)
@CommandArgument("paths", nargs="+", help="Paths whose data to query")
def file_info_schedules(command_context, paths):
"""Show what is scheduled by the given files.
moz.build files contain "Files" sub-contexts for declaring metadata
against file patterns. This command suite is used to query that data.
"""
Given a requested set of files (which can be specified using
wildcards), print the total set of scheduled components.
"""
from mozbuild.frontend.reader import EmptyConfig, BuildReader
@SubCommand(
"file-info",
"bugzilla-component",
"Show Bugzilla component info for files listed.",
)
@CommandArgument(
"-r", "--rev", help="Version control revision to look up info from"
)
@CommandArgument(
"--format",
choices={"json", "plain"},
default="plain",
help="Output format",
dest="fmt",
)
@CommandArgument("paths", nargs="+", help="Paths whose data to query")
def file_info_bugzilla(self, command_context, paths, rev=None, fmt=None):
"""Show Bugzilla component for a set of files.
config = EmptyConfig(TOPSRCDIR)
reader = BuildReader(config)
schedules = set()
for p, m in reader.files_info(paths).items():
schedules |= set(m["SCHEDULES"].components)
Given a requested set of files (which can be specified using
wildcards), print the Bugzilla component for each file.
"""
components = defaultdict(set)
try:
for p, m in self._get_files_info(command_context, paths, rev=rev).items():
components[m.get("BUG_COMPONENT")].add(p)
except InvalidPathException as e:
print(e)
return 1
print(", ".join(schedules))
if fmt == "json":
data = {}
for component, files in components.items():
if not component:
continue
for f in files:
data[f] = [component.product, component.component]
json.dump(data, sys.stdout, sort_keys=True, indent=2)
return
elif fmt == "plain":
comp_to_file = sorted(
(
"UNKNOWN"
if component is None
else "%s :: %s" % (component.product, component.component),
sorted(files),
)
for component, files in components.items()
)
for component, files in comp_to_file:
print(component)
for f in files:
print(" %s" % f)
else:
print("unhandled output format: %s" % fmt)
return 1
@SubCommand(
"file-info", "missing-bugzilla", "Show files missing Bugzilla component info"
)
@CommandArgument(
"-r", "--rev", help="Version control revision to look up info from"
)
@CommandArgument(
"--format",
choices={"json", "plain"},
dest="fmt",
default="plain",
help="Output format",
)
@CommandArgument("paths", nargs="+", help="Paths whose data to query")
def file_info_missing_bugzilla(self, command_context, paths, rev=None, fmt=None):
missing = set()
try:
for p, m in self._get_files_info(command_context, paths, rev=rev).items():
if "BUG_COMPONENT" not in m:
missing.add(p)
except InvalidPathException as e:
print(e)
return 1
if fmt == "json":
json.dump({"missing": sorted(missing)}, sys.stdout, indent=2)
return
elif fmt == "plain":
for f in sorted(missing):
print(f)
else:
print("unhandled output format: %s" % fmt)
return 1
@SubCommand(
"file-info",
"bugzilla-automation",
"Perform Bugzilla metadata analysis as required for automation",
)
@CommandArgument("out_dir", help="Where to write files")
def bugzilla_automation(self, command_context, out_dir):
"""Analyze and validate Bugzilla metadata as required by automation.
This will write out JSON and gzipped JSON files for Bugzilla metadata.
The exit code will be non-0 if Bugzilla metadata fails validation.
"""
import gzip
missing_component = set()
seen_components = set()
component_by_path = {}
# TODO operate in VCS space. This requires teaching the VCS reader
# to understand wildcards and/or for the relative path issue in the
# VCS finder to be worked out.
for p, m in sorted(self._get_files_info(command_context, ["**"]).items()):
if "BUG_COMPONENT" not in m:
missing_component.add(p)
print(
"FileToBugzillaMappingError: Missing Bugzilla component: "
"%s - Set the BUG_COMPONENT in the moz.build file to fix "
"the issue." % p
)
continue
c = m["BUG_COMPONENT"]
seen_components.add(c)
component_by_path[p] = [c.product, c.component]
print("Examined %d files" % len(component_by_path))
# We also have a normalized versions of the file to components mapping
# that requires far less storage space by eliminating redundant strings.
indexed_components = {
i: [c.product, c.component] for i, c in enumerate(sorted(seen_components))
}
components_index = {tuple(v): k for k, v in indexed_components.items()}
normalized_component = {"components": indexed_components, "paths": {}}
for p, c in component_by_path.items():
d = normalized_component["paths"]
while "/" in p:
base, p = p.split("/", 1)
d = d.setdefault(base, {})
d[p] = components_index[tuple(c)]
if not os.path.exists(out_dir):
os.makedirs(out_dir)
components_json = os.path.join(out_dir, "components.json")
print("Writing %s" % components_json)
with open(components_json, "w") as fh:
json.dump(component_by_path, fh, sort_keys=True, indent=2)
missing_json = os.path.join(out_dir, "missing.json")
print("Writing %s" % missing_json)
with open(missing_json, "w") as fh:
json.dump({"missing": sorted(missing_component)}, fh, indent=2)
indexed_components_json = os.path.join(out_dir, "components-normalized.json")
print("Writing %s" % indexed_components_json)
with open(indexed_components_json, "w") as fh:
# Don't indent so file is as small as possible.
json.dump(normalized_component, fh, sort_keys=True)
# Write compressed versions of JSON files.
for p in (components_json, indexed_components_json, missing_json):
gzip_path = "%s.gz" % p
print("Writing %s" % gzip_path)
with open(p, "rb") as ifh, gzip.open(gzip_path, "wb") as ofh:
while True:
data = ifh.read(32768)
if not data:
break
ofh.write(data)
# Causes CI task to fail if files are missing Bugzilla annotation.
if missing_component:
return 1
def _get_files_info(self, command_context, paths, rev=None):
reader = command_context.mozbuild_reader(config_mode="empty", vcs_revision=rev)
# Normalize to relative from topsrcdir.
relpaths = []
for p in paths:
a = mozpath.abspath(p)
if not mozpath.basedir(a, [command_context.topsrcdir]):
raise InvalidPathException("path is outside topsrcdir: %s" % p)
relpaths.append(mozpath.relpath(a, command_context.topsrcdir))
# Expand wildcards.
# One variable is for ordering. The other for membership tests.
# (Membership testing on a list can be slow.)
allpaths = []
all_paths_set = set()
for p in relpaths:
if "*" not in p:
if p not in all_paths_set:
if not os.path.exists(mozpath.join(command_context.topsrcdir, p)):
print("(%s does not exist; ignoring)" % p, file=sys.stderr)
continue
all_paths_set.add(p)
allpaths.append(p)
continue
if rev:
raise InvalidPathException(
"cannot use wildcard in version control mode"
)
# finder is rooted at / for now.
# TODO bug 1171069 tracks changing to relative.
search = mozpath.join(command_context.topsrcdir, p)[1:]
for path, f in reader.finder.find(search):
path = path[len(command_context.topsrcdir) :]
if path not in all_paths_set:
all_paths_set.add(path)
allpaths.append(path)
return reader.files_info(allpaths)
@SubCommand(
"file-info", "schedules", "Show the combined SCHEDULES for the files listed."
)
@CommandArgument("paths", nargs="+", help="Paths whose data to query")
def file_info_schedules(self, command_context, paths):
"""Show what is scheduled by the given files.
Given a requested set of files (which can be specified using
wildcards), print the total set of scheduled components.
"""
from mozbuild.frontend.reader import EmptyConfig, BuildReader
config = EmptyConfig(TOPSRCDIR)
reader = BuildReader(config)
schedules = set()
for p, m in reader.files_info(paths).items():
schedules |= set(m["SCHEDULES"].components)
print(", ".join(schedules))

File diff suppressed because it is too large Load Diff

View File

@ -32,19 +32,20 @@ class TestStaticAnalysis(unittest.TestCase):
# world we should test the clang_analysis mach command
# since that small function is an internal detail.
# But there is zero test infra for that mach command
from mozbuild.code_analysis.mach_commands import _is_ignored_path
from mozbuild.code_analysis.mach_commands import StaticAnalysis
config = MozbuildObject.from_environment()
context = mock.MagicMock()
context.cwd = config.topsrcdir
cmd = StaticAnalysis(context)
command_context = mock.MagicMock()
command_context.topsrcdir = os.path.join("/root", "dir")
path = os.path.join("/root", "dir", "path1")
ignored_dirs_re = r"path1|path2/here|path3\there"
self.assertTrue(
_is_ignored_path(command_context, ignored_dirs_re, path) is not None
cmd._is_ignored_path(command_context, ignored_dirs_re, path) is not None
)
# simulating a win32 env
@ -54,26 +55,27 @@ class TestStaticAnalysis(unittest.TestCase):
os.sep = "\\"
try:
self.assertTrue(
_is_ignored_path(command_context, ignored_dirs_re, win32_path)
cmd._is_ignored_path(command_context, ignored_dirs_re, win32_path)
is not None
)
finally:
os.sep = old_sep
self.assertTrue(
_is_ignored_path(command_context, ignored_dirs_re, "path2") is None
cmd._is_ignored_path(command_context, ignored_dirs_re, "path2") is None
)
def test_get_files(self):
from mozbuild.code_analysis.mach_commands import get_abspath_files
from mozbuild.code_analysis.mach_commands import StaticAnalysis
config = MozbuildObject.from_environment()
context = mock.MagicMock()
context.cwd = config.topsrcdir
cmd = StaticAnalysis(context)
command_context = mock.MagicMock()
command_context.topsrcdir = mozpath.join("/root", "dir")
source = get_abspath_files(
source = cmd.get_abspath_files(
command_context, ["file1", mozpath.join("directory", "file2")]
)

View File

@ -7,170 +7,173 @@ from __future__ import absolute_import, print_function, unicode_literals
import sys
import logging
from mach.decorators import CommandArgument, Command, SubCommand
from mach.decorators import CommandArgument, CommandProvider, Command, SubCommand
from mozbuild.base import MachCommandBase
from mozbuild.vendor.moz_yaml import load_moz_yaml, MozYamlVerifyError
# Fun quirk of ./mach - you can specify a default argument as well as subcommands.
# If the default argument matches a subcommand, the subcommand gets called. If it
# doesn't, we wind up in the default command.
@Command(
"vendor",
category="misc",
description="Vendor third-party dependencies into the source repository.",
)
@CommandArgument(
"--check-for-update",
action="store_true",
help="For scripted use, prints the new commit to update to, or nothing if up to date.",
default=False,
)
@CommandArgument(
"--add-to-exports",
action="store_true",
help="Will attempt to add new header files into any relevant EXPORTS block",
default=False,
)
@CommandArgument(
"--ignore-modified",
action="store_true",
help="Ignore modified files in current checkout",
default=False,
)
@CommandArgument("-r", "--revision", help="Repository tag or commit to update to.")
@CommandArgument(
"--verify", "-v", action="store_true", help="(Only) verify the manifest"
)
@CommandArgument("library", nargs=1, help="The moz.yaml file of the library to vendor.")
def vendor(
command_context,
library,
revision,
ignore_modified=False,
check_for_update=False,
add_to_exports=False,
verify=False,
):
"""
Vendor third-party dependencies into the source repository.
@CommandProvider
class Vendor(MachCommandBase):
# Fun quirk of ./mach - you can specify a default argument as well as subcommands.
# If the default argument matches a subcommand, the subcommand gets called. If it
# doesn't, we wind up in the default command.
@Command(
"vendor",
category="misc",
description="Vendor third-party dependencies into the source repository.",
)
@CommandArgument(
"--check-for-update",
action="store_true",
help="For scripted use, prints the new commit to update to, or nothing if up to date.",
default=False,
)
@CommandArgument(
"--add-to-exports",
action="store_true",
help="Will attempt to add new header files into any relevant EXPORTS block",
default=False,
)
@CommandArgument(
"--ignore-modified",
action="store_true",
help="Ignore modified files in current checkout",
default=False,
)
@CommandArgument("-r", "--revision", help="Repository tag or commit to update to.")
@CommandArgument(
"--verify", "-v", action="store_true", help="(Only) verify the manifest"
)
@CommandArgument(
"library", nargs=1, help="The moz.yaml file of the library to vendor."
)
def vendor(
self,
command_context,
library,
revision,
ignore_modified=False,
check_for_update=False,
add_to_exports=False,
verify=False,
):
"""
Vendor third-party dependencies into the source repository.
Vendoring rust and python can be done with ./mach vendor [rust/python].
Vendoring other libraries can be done with ./mach vendor [arguments] path/to/file.yaml
"""
library = library[0]
assert library not in ["rust", "python"]
Vendoring rust and python can be done with ./mach vendor [rust/python].
Vendoring other libraries can be done with ./mach vendor [arguments] path/to/file.yaml
"""
library = library[0]
assert library not in ["rust", "python"]
command_context.populate_logger()
command_context.log_manager.enable_unstructured()
if check_for_update:
logging.disable(level=logging.CRITICAL)
command_context.populate_logger()
command_context.log_manager.enable_unstructured()
if check_for_update:
logging.disable(level=logging.CRITICAL)
try:
manifest = load_moz_yaml(library)
if verify:
print("%s: OK" % library)
sys.exit(0)
except MozYamlVerifyError as e:
print(e)
sys.exit(1)
try:
manifest = load_moz_yaml(library)
if verify:
print("%s: OK" % library)
sys.exit(0)
except MozYamlVerifyError as e:
print(e)
sys.exit(1)
if not ignore_modified and not check_for_update:
check_modified_files(command_context)
if not revision:
revision = "HEAD"
if not ignore_modified and not check_for_update:
self.check_modified_files(command_context)
if not revision:
revision = "HEAD"
from mozbuild.vendor.vendor_manifest import VendorManifest
from mozbuild.vendor.vendor_manifest import VendorManifest
vendor_command = command_context._spawn(VendorManifest)
vendor_command.vendor(library, manifest, revision, check_for_update, add_to_exports)
vendor_command = command_context._spawn(VendorManifest)
vendor_command.vendor(
library, manifest, revision, check_for_update, add_to_exports
)
sys.exit(0)
sys.exit(0)
def check_modified_files(command_context):
"""
Ensure that there aren't any uncommitted changes to files
in the working copy, since we're going to change some state
on the user.
"""
modified = command_context.repository.get_changed_files("M")
if modified:
command_context.log(
logging.ERROR,
"modified_files",
{},
"""You have uncommitted changes to the following files:
def check_modified_files(self, command_context):
"""
Ensure that there aren't any uncommitted changes to files
in the working copy, since we're going to change some state
on the user.
"""
modified = command_context.repository.get_changed_files("M")
if modified:
command_context.log(
logging.ERROR,
"modified_files",
{},
"""You have uncommitted changes to the following files:
{files}
Please commit or stash these changes before vendoring, or re-run with `--ignore-modified`.
""".format(
files="\n".join(sorted(modified))
),
)
sys.exit(1)
files="\n".join(sorted(modified))
),
)
sys.exit(1)
# =====================================================================
# =====================================================================
@SubCommand(
"vendor",
"rust",
description="Vendor rust crates from crates.io into third_party/rust",
)
@CommandArgument(
"--ignore-modified",
action="store_true",
help="Ignore modified files in current checkout",
default=False,
)
@CommandArgument(
"--build-peers-said-large-imports-were-ok",
action="store_true",
help=(
"Permit overly-large files to be added to the repository. "
"To get permission to set this, raise a question in the #build "
"channel at https://chat.mozilla.org."
),
default=False,
)
def vendor_rust(self, command_context, **kwargs):
from mozbuild.vendor.vendor_rust import VendorRust
vendor_command = command_context._spawn(VendorRust)
vendor_command.vendor(**kwargs)
@SubCommand(
"vendor",
"rust",
description="Vendor rust crates from crates.io into third_party/rust",
)
@CommandArgument(
"--ignore-modified",
action="store_true",
help="Ignore modified files in current checkout",
default=False,
)
@CommandArgument(
"--build-peers-said-large-imports-were-ok",
action="store_true",
help=(
"Permit overly-large files to be added to the repository. "
"To get permission to set this, raise a question in the #build "
"channel at https://chat.mozilla.org."
),
default=False,
)
def vendor_rust(command_context, **kwargs):
from mozbuild.vendor.vendor_rust import VendorRust
# =====================================================================
vendor_command = command_context._spawn(VendorRust)
vendor_command.vendor(**kwargs)
@SubCommand(
"vendor",
"python",
description="Vendor Python packages from pypi.org into third_party/python. "
"Some extra files like docs and tests will automatically be excluded."
"Installs the packages listed in third_party/python/requirements.in and "
"their dependencies.",
)
@CommandArgument(
"--keep-extra-files",
action="store_true",
default=False,
help="Keep all files, including tests and documentation.",
)
def vendor_python(self, command_context, **kwargs):
from mozbuild.vendor.vendor_python import VendorPython
if sys.version_info[:2] != (3, 6):
print(
"You must use Python 3.6 to vendor Python packages. If you don't "
"have Python 3.6, you can request that your package be added by "
"creating a bug: \n"
"https://bugzilla.mozilla.org/enter_bug.cgi?product=Firefox%20Build%20System&component=Mach%20Core" # noqa F401
)
return 1
# =====================================================================
@SubCommand(
"vendor",
"python",
description="Vendor Python packages from pypi.org into third_party/python. "
"Some extra files like docs and tests will automatically be excluded."
"Installs the packages listed in third_party/python/requirements.in and "
"their dependencies.",
)
@CommandArgument(
"--keep-extra-files",
action="store_true",
default=False,
help="Keep all files, including tests and documentation.",
)
def vendor_python(command_context, **kwargs):
from mozbuild.vendor.vendor_python import VendorPython
if sys.version_info[:2] != (3, 6):
print(
"You must use Python 3.6 to vendor Python packages. If you don't "
"have Python 3.6, you can request that your package be added by "
"creating a bug: \n"
"https://bugzilla.mozilla.org/enter_bug.cgi?product=Firefox%20Build%20System&component=Mach%20Core" # noqa F401
)
return 1
vendor_command = command_context._spawn(VendorPython)
vendor_command.vendor(**kwargs)
vendor_command = command_context._spawn(VendorPython)
vendor_command.vendor(**kwargs)

View File

@ -6,8 +6,8 @@ import sys
from functools import partial
import json
from mach.decorators import Command, CommandArgument
from mozbuild.base import MachCommandConditions as conditions
from mach.decorators import CommandProvider, Command, CommandArgument
from mozbuild.base import MachCommandBase, MachCommandConditions as conditions
_TRY_PLATFORMS = {
@ -30,237 +30,245 @@ def get_perftest_parser():
return PerftestArgumentParser
def get_parser():
return run_perftest._mach_command._parser
@CommandProvider
class Perftest(MachCommandBase):
def get_parser(self):
return self.run_perftest._mach_command._parser
@Command(
"perftest",
category="testing",
conditions=[partial(conditions.is_buildapp_in, apps=["firefox", "android"])],
description="Run any flavor of perftest",
parser=get_perftest_parser,
)
def run_perftest(self, command_context, **kwargs):
# original parser that brought us there
original_parser = self.get_parser()
@Command(
"perftest",
category="testing",
conditions=[partial(conditions.is_buildapp_in, apps=["firefox", "android"])],
description="Run any flavor of perftest",
parser=get_perftest_parser,
)
def run_perftest(command_context, **kwargs):
# original parser that brought us there
original_parser = get_parser()
from pathlib import Path
from pathlib import Path
# user selection with fuzzy UI
from mozperftest.utils import ON_TRY
from mozperftest.script import ScriptInfo, ScriptType, ParseError
# user selection with fuzzy UI
from mozperftest.utils import ON_TRY
from mozperftest.script import ScriptInfo, ScriptType, ParseError
if not ON_TRY and kwargs.get("tests", []) == []:
from moztest.resolve import TestResolver
from mozperftest.fzf.fzf import select
if not ON_TRY and kwargs.get("tests", []) == []:
from moztest.resolve import TestResolver
from mozperftest.fzf.fzf import select
resolver = command_context._spawn(TestResolver)
test_objects = list(resolver.resolve_tests(paths=None, flavor="perftest"))
selected = select(test_objects)
resolver = command_context._spawn(TestResolver)
test_objects = list(resolver.resolve_tests(paths=None, flavor="perftest"))
selected = select(test_objects)
def full_path(selection):
__, script_name, __, location = selection.split(" ")
return str(
Path(
command_context.topsrcdir.rstrip(os.sep),
location.strip(os.sep),
script_name,
def full_path(selection):
__, script_name, __, location = selection.split(" ")
return str(
Path(
command_context.topsrcdir.rstrip(os.sep),
location.strip(os.sep),
script_name,
)
)
)
kwargs["tests"] = [full_path(s) for s in selected]
kwargs["tests"] = [full_path(s) for s in selected]
if kwargs["tests"] == []:
print("\nNo selection. Bye!")
if kwargs["tests"] == []:
print("\nNo selection. Bye!")
return
if len(kwargs["tests"]) > 1:
print("\nSorry no support yet for multiple local perftest")
return
if len(kwargs["tests"]) > 1:
print("\nSorry no support yet for multiple local perftest")
return
sel = "\n".join(kwargs["tests"])
print("\nGood job! Best selection.\n%s" % sel)
# if the script is xpcshell, we can force the flavor here
# XXX on multi-selection, what happens if we have seeveral flavors?
try:
script_info = ScriptInfo(kwargs["tests"][0])
except ParseError as e:
if e.exception is IsADirectoryError:
script_info = None
sel = "\n".join(kwargs["tests"])
print("\nGood job! Best selection.\n%s" % sel)
# if the script is xpcshell, we can force the flavor here
# XXX on multi-selection, what happens if we have seeveral flavors?
try:
script_info = ScriptInfo(kwargs["tests"][0])
except ParseError as e:
if e.exception is IsADirectoryError:
script_info = None
else:
raise
else:
raise
else:
if script_info.script_type == ScriptType.xpcshell:
kwargs["flavor"] = script_info.script_type.name
else:
# we set the value only if not provided (so "mobile-browser"
# can be picked)
if "flavor" not in kwargs:
kwargs["flavor"] = "desktop-browser"
if script_info.script_type == ScriptType.xpcshell:
kwargs["flavor"] = script_info.script_type.name
else:
# we set the value only if not provided (so "mobile-browser"
# can be picked)
if "flavor" not in kwargs:
kwargs["flavor"] = "desktop-browser"
push_to_try = kwargs.pop("push_to_try", False)
if push_to_try:
sys.path.append(str(Path(command_context.topsrcdir, "tools", "tryselect")))
push_to_try = kwargs.pop("push_to_try", False)
if push_to_try:
sys.path.append(str(Path(command_context.topsrcdir, "tools", "tryselect")))
from tryselect.push import push_to_try
from tryselect.push import push_to_try
perftest_parameters = {}
args = script_info.update_args(**original_parser.get_user_args(kwargs))
platform = args.pop("try_platform", "linux")
if isinstance(platform, str):
platform = [platform]
perftest_parameters = {}
args = script_info.update_args(**original_parser.get_user_args(kwargs))
platform = args.pop("try_platform", "linux")
if isinstance(platform, str):
platform = [platform]
platform = ["%s-%s" % (plat, script_info.script_type.name) for plat in platform]
platform = [
"%s-%s" % (plat, script_info.script_type.name) for plat in platform
]
for plat in platform:
if plat not in _TRY_PLATFORMS:
# we can extend platform support here: linux, win, macOs, pixel2
# by adding more jobs in taskcluster/ci/perftest/kind.yml
# then picking up the right one here
raise NotImplementedError(
"%r doesn't exist or is not yet supported" % plat
for plat in platform:
if plat not in _TRY_PLATFORMS:
# we can extend platform support here: linux, win, macOs, pixel2
# by adding more jobs in taskcluster/ci/perftest/kind.yml
# then picking up the right one here
raise NotImplementedError(
"%r doesn't exist or is not yet supported" % plat
)
def relative(path):
if path.startswith(command_context.topsrcdir):
return path[len(command_context.topsrcdir) :].lstrip(os.sep)
return path
for name, value in args.items():
# ignore values that are set to default
if original_parser.get_default(name) == value:
continue
if name == "tests":
value = [relative(path) for path in value]
perftest_parameters[name] = value
parameters = {
"try_task_config": {
"tasks": [_TRY_PLATFORMS[plat] for plat in platform],
"perftest-options": perftest_parameters,
},
"try_mode": "try_task_config",
}
task_config = {"parameters": parameters, "version": 2}
if args.get("verbose"):
print("Pushing run to try...")
print(json.dumps(task_config, indent=4, sort_keys=True))
push_to_try("perftest", "perftest", try_task_config=task_config)
return
from mozperftest.runner import run_tests
run_tests(command_context, kwargs, original_parser.get_user_args(kwargs))
print("\nFirefox. Fast For Good.\n")
@CommandProvider
class PerftestTests(MachCommandBase):
@Command("perftest-test", category="testing", description="Run perftest tests")
@CommandArgument(
"tests", default=None, nargs="*", help="Tests to run. By default will run all"
)
@CommandArgument(
"-s",
"--skip-linters",
action="store_true",
default=False,
help="Skip flake8 and black",
)
@CommandArgument(
"-v", "--verbose", action="store_true", default=False, help="Verbose mode"
)
def run_tests(self, command_context, **kwargs):
command_context.activate_virtualenv()
from pathlib import Path
from mozperftest.utils import temporary_env
with temporary_env(
COVERAGE_RCFILE=str(Path(HERE, ".coveragerc")), RUNNING_TESTS="YES"
):
self._run_tests(command_context, **kwargs)
def _run_tests(self, command_context, **kwargs):
from pathlib import Path
from mozperftest.runner import _setup_path
from mozperftest.utils import (
install_package,
ON_TRY,
checkout_script,
checkout_python_script,
)
venv = command_context.virtualenv_manager
skip_linters = kwargs.get("skip_linters", False)
verbose = kwargs.get("verbose", False)
# include in sys.path all deps
_setup_path()
try:
import coverage # noqa
except ImportError:
pydeps = Path(command_context.topsrcdir, "third_party", "python")
vendors = ["coverage"]
if not ON_TRY:
vendors.append("attrs")
# pip-installing dependencies that require compilation or special setup
for dep in vendors:
install_package(
command_context.virtualenv_manager, str(Path(pydeps, dep))
)
def relative(path):
if path.startswith(command_context.topsrcdir):
return path[len(command_context.topsrcdir) :].lstrip(os.sep)
return path
if not ON_TRY and not skip_linters:
cmd = "./mach lint "
if verbose:
cmd += " -v"
cmd += " " + str(HERE)
if not checkout_script(
cmd, label="linters", display=verbose, verbose=verbose
):
raise AssertionError("Please fix your code.")
for name, value in args.items():
# ignore values that are set to default
if original_parser.get_default(name) == value:
continue
if name == "tests":
value = [relative(path) for path in value]
perftest_parameters[name] = value
# running pytest with coverage
# coverage is done in three steps:
# 1/ coverage erase => erase any previous coverage data
# 2/ coverage run pytest ... => run the tests and collect info
# 3/ coverage report => generate the report
tests_dir = Path(HERE, "tests").resolve()
tests = kwargs.get("tests", [])
if tests == []:
tests = str(tests_dir)
run_coverage_check = not skip_linters
else:
run_coverage_check = False
parameters = {
"try_task_config": {
"tasks": [_TRY_PLATFORMS[plat] for plat in platform],
"perftest-options": perftest_parameters,
},
"try_mode": "try_task_config",
}
def _get_test(test):
if Path(test).exists():
return str(test)
return str(tests_dir / test)
task_config = {"parameters": parameters, "version": 2}
if args.get("verbose"):
print("Pushing run to try...")
print(json.dumps(task_config, indent=4, sort_keys=True))
tests = " ".join([_get_test(test) for test in tests])
push_to_try("perftest", "perftest", try_task_config=task_config)
return
# on macOS + try we skip the coverage
# because macOS workers prevent us from installing
# packages from PyPI
if sys.platform == "darwin" and ON_TRY:
run_coverage_check = False
from mozperftest.runner import run_tests
import pytest
run_tests(command_context, kwargs, original_parser.get_user_args(kwargs))
options = "-xs"
if kwargs.get("verbose"):
options += "v"
print("\nFirefox. Fast For Good.\n")
@Command("perftest-test", category="testing", description="Run perftest tests")
@CommandArgument(
"tests", default=None, nargs="*", help="Tests to run. By default will run all"
)
@CommandArgument(
"-s",
"--skip-linters",
action="store_true",
default=False,
help="Skip flake8 and black",
)
@CommandArgument(
"-v", "--verbose", action="store_true", default=False, help="Verbose mode"
)
def run_tests(command_context, **kwargs):
command_context.activate_virtualenv()
from pathlib import Path
from mozperftest.utils import temporary_env
with temporary_env(
COVERAGE_RCFILE=str(Path(HERE, ".coveragerc")), RUNNING_TESTS="YES"
):
_run_tests(command_context, **kwargs)
def _run_tests(command_context, **kwargs):
from pathlib import Path
from mozperftest.runner import _setup_path
from mozperftest.utils import (
install_package,
ON_TRY,
checkout_script,
checkout_python_script,
)
venv = command_context.virtualenv_manager
skip_linters = kwargs.get("skip_linters", False)
verbose = kwargs.get("verbose", False)
# include in sys.path all deps
_setup_path()
try:
import coverage # noqa
except ImportError:
pydeps = Path(command_context.topsrcdir, "third_party", "python")
vendors = ["coverage"]
if not ON_TRY:
vendors.append("attrs")
# pip-installing dependencies that require compilation or special setup
for dep in vendors:
install_package(command_context.virtualenv_manager, str(Path(pydeps, dep)))
if not ON_TRY and not skip_linters:
cmd = "./mach lint "
if verbose:
cmd += " -v"
cmd += " " + str(HERE)
if not checkout_script(cmd, label="linters", display=verbose, verbose=verbose):
raise AssertionError("Please fix your code.")
# running pytest with coverage
# coverage is done in three steps:
# 1/ coverage erase => erase any previous coverage data
# 2/ coverage run pytest ... => run the tests and collect info
# 3/ coverage report => generate the report
tests_dir = Path(HERE, "tests").resolve()
tests = kwargs.get("tests", [])
if tests == []:
tests = str(tests_dir)
run_coverage_check = not skip_linters
else:
run_coverage_check = False
def _get_test(test):
if Path(test).exists():
return str(test)
return str(tests_dir / test)
tests = " ".join([_get_test(test) for test in tests])
# on macOS + try we skip the coverage
# because macOS workers prevent us from installing
# packages from PyPI
if sys.platform == "darwin" and ON_TRY:
run_coverage_check = False
import pytest
options = "-xs"
if kwargs.get("verbose"):
options += "v"
if run_coverage_check:
if run_coverage_check:
assert checkout_python_script(
venv, "coverage", ["erase"], label="remove old coverage data"
)
args = ["run", pytest.__file__, options, "--duration", "10", tests]
assert checkout_python_script(
venv, "coverage", ["erase"], label="remove old coverage data"
venv, "coverage", args, label="running tests", verbose=verbose
)
args = ["run", pytest.__file__, options, "--duration", "10", tests]
assert checkout_python_script(
venv, "coverage", args, label="running tests", verbose=verbose
)
if run_coverage_check and not checkout_python_script(
venv, "coverage", ["report"], display=True
):
raise ValueError("Coverage is too low!")
if run_coverage_check and not checkout_python_script(
venv, "coverage", ["report"], display=True
):
raise ValueError("Coverage is too low!")

View File

@ -17,10 +17,9 @@ from mach.registrar import Registrar
Registrar.categories = {"testing": []}
Registrar.commands_by_category = {"testing": set()}
from mozbuild.base import MachCommandBase # noqa
import mozperftest.mach_commands # noqa
from mozperftest.environment import MachEnvironment # noqa
from mozperftest.mach_commands import Perftest, PerftestTests # noqa
from mozperftest.tests.support import EXAMPLE_TEST, ROOT, running_on_try # noqa
from mozperftest.utils import temporary_env, silence # noqa
@ -46,7 +45,7 @@ class _TestMachEnvironment(MachEnvironment):
@contextmanager
def _get_command(command=mozperftest.mach_commands.run_perftest):
def _get_command(klass=Perftest):
from mozbuild.base import MozbuildObject
from mozperftest.argparser import PerftestArgumentParser
@ -69,59 +68,59 @@ def _get_command(command=mozperftest.mach_commands.run_perftest):
return _run
try:
command_context = MachCommandBase(context())
obj = klass(context())
parser = PerftestArgumentParser()
obj.get_parser = lambda: parser
if command == mozperftest.mach_commands.run_perftest:
command = _run_perftest(command)
if isinstance(obj, Perftest):
obj.run_perftest = _run_perftest(obj.run_perftest)
with mock.patch("mozperftest.mach_commands.get_parser", new=lambda: parser):
yield command, command_context
yield obj
finally:
shutil.rmtree(context.state_dir)
@mock.patch("mozperftest.MachEnvironment", new=_TestMachEnvironment)
@mock.patch("mozbuild.base.MachCommandBase.activate_virtualenv")
@mock.patch("mozperftest.mach_commands.MachCommandBase.activate_virtualenv")
def test_command(mocked_func):
with _get_command() as (cmd, command_context), silence(command_context):
cmd(command_context, tests=[EXAMPLE_TEST], flavor="desktop-browser")
with _get_command() as test, silence(test):
test.run_perftest(test, tests=[EXAMPLE_TEST], flavor="desktop-browser")
@mock.patch("mozperftest.MachEnvironment")
@mock.patch("mozbuild.base.MachCommandBase.activate_virtualenv")
@mock.patch("mozperftest.mach_commands.MachCommandBase.activate_virtualenv")
def test_command_iterations(venv, env):
kwargs = {
"tests": [EXAMPLE_TEST],
"hooks": ITERATION_HOOKS,
"flavor": "desktop-browser",
}
with _get_command() as (cmd, command_context), silence(command_context):
cmd(command_context, **kwargs)
# the hook changes the iteration value to 5.
# each iteration generates 5 calls, so we want to see 25
assert len(env.mock_calls) == 25
with _get_command() as test, silence(test):
test.run_perftest(test, **kwargs)
# the hook changes the iteration value to 5.
# each iteration generates 5 calls, so we want to see 25
assert len(env.mock_calls) == 25
@mock.patch("mozperftest.MachEnvironment")
@mock.patch("mozbuild.base.MachCommandBase.activate_virtualenv")
@mock.patch("mozperftest.mach_commands.MachCommandBase.activate_virtualenv")
def test_hooks_state(venv, env):
kwargs = {
"tests": [EXAMPLE_TEST],
"hooks": STATE_HOOKS,
"flavor": "desktop-browser",
}
with _get_command() as (cmd, command_context), silence(command_context):
cmd(command_context, **kwargs)
with _get_command() as test, silence(test):
test.run_perftest(test, **kwargs)
@mock.patch("mozperftest.MachEnvironment", new=_TestMachEnvironment)
@mock.patch("mozbuild.base.MachCommandBase.activate_virtualenv")
@mock.patch("mozperftest.mach_commands.MachCommandBase.activate_virtualenv")
@mock.patch("tryselect.push.push_to_try")
def test_push_command(push_to_try, venv):
with _get_command() as (cmd, command_context), silence(command_context):
cmd(
command_context,
with _get_command() as test, silence(test):
test.run_perftest(
test,
tests=[EXAMPLE_TEST],
flavor="desktop-browser",
push_to_try=True,
@ -132,13 +131,13 @@ def test_push_command(push_to_try, venv):
@mock.patch("mozperftest.MachEnvironment", new=_TestMachEnvironment)
@mock.patch("mozbuild.base.MachCommandBase.activate_virtualenv")
@mock.patch("mozperftest.mach_commands.MachCommandBase.activate_virtualenv")
@mock.patch("tryselect.push.push_to_try")
def test_push_command_unknown_platforms(push_to_try, venv):
# full stop when a platform is unknown
with _get_command() as (cmd, command_context), pytest.raises(NotImplementedError):
cmd(
command_context,
with _get_command() as test, pytest.raises(NotImplementedError):
test.run_perftest(
test,
tests=[EXAMPLE_TEST],
flavor="desktop-browser",
push_to_try=True,
@ -147,15 +146,12 @@ def test_push_command_unknown_platforms(push_to_try, venv):
@mock.patch("mozperftest.MachEnvironment", new=_TestMachEnvironment)
@mock.patch("mozbuild.base.MachCommandBase.activate_virtualenv")
@mock.patch("mozperftest.mach_commands.MachCommandBase.activate_virtualenv")
@mock.patch("tryselect.push.push_to_try")
def test_push_command_several_platforms(push_to_try, venv):
with running_on_try(False), _get_command() as (
cmd,
command_context,
): # , silence(command_context):
cmd(
command_context,
with running_on_try(False), _get_command() as test: # , silence(test):
test.run_perftest(
test,
tests=[EXAMPLE_TEST],
flavor="desktop-browser",
push_to_try=True,
@ -169,45 +165,39 @@ def test_push_command_several_platforms(push_to_try, venv):
@mock.patch("mozperftest.MachEnvironment", new=_TestMachEnvironment)
@mock.patch("mozbuild.base.MachCommandBase.activate_virtualenv")
@mock.patch("mozperftest.mach_commands.MachCommandBase.activate_virtualenv")
def test_doc_flavor(mocked_func):
with _get_command() as (cmd, command_context), silence(command_context):
cmd(command_context, tests=[EXAMPLE_TEST], flavor="doc")
with _get_command() as test, silence(test):
test.run_perftest(test, tests=[EXAMPLE_TEST], flavor="doc")
@mock.patch("mozperftest.MachEnvironment", new=_TestMachEnvironment)
@mock.patch("mozbuild.base.MachCommandBase.activate_virtualenv")
@mock.patch("mozperftest.mach_commands.MachCommandBase.activate_virtualenv")
@mock.patch("mozperftest.utils.run_script")
def test_test_runner(*mocked):
from mozperftest.mach_commands import run_tests
with running_on_try(False), _get_command(run_tests) as (cmd, command_context):
cmd(command_context, tests=[EXAMPLE_TEST], verbose=True)
with running_on_try(False), _get_command(PerftestTests) as test:
test.run_tests(test, tests=[EXAMPLE_TEST], verbose=True)
@mock.patch("mozperftest.MachEnvironment", new=_TestMachEnvironment)
@mock.patch("mozbuild.base.MachCommandBase.activate_virtualenv")
@mock.patch("mozperftest.mach_commands.MachCommandBase.activate_virtualenv")
@mock.patch("mozperftest.utils.run_python_script")
def test_test_runner_on_try(*mocked):
from mozperftest.mach_commands import run_tests
# simulating on try to run the paths parser
with running_on_try(), _get_command(run_tests) as (cmd, command_context):
cmd(command_context, tests=[EXAMPLE_TEST])
with running_on_try(), _get_command(PerftestTests) as test:
test.run_tests(test, tests=[EXAMPLE_TEST])
@mock.patch("mozperftest.MachEnvironment", new=_TestMachEnvironment)
@mock.patch("mozbuild.base.MachCommandBase.activate_virtualenv")
@mock.patch("mozperftest.mach_commands.MachCommandBase.activate_virtualenv")
@mock.patch("mozperftest.utils.run_script")
def test_test_runner_coverage(*mocked):
from mozperftest.mach_commands import run_tests
# simulating with coverage not installed
with running_on_try(False), _get_command(run_tests) as (cmd, command_context):
with running_on_try(False), _get_command(PerftestTests) as test:
old = list(sys.meta_path)
sys.meta_path = []
try:
cmd(command_context, tests=[EXAMPLE_TEST])
test.run_tests(test, tests=[EXAMPLE_TEST])
finally:
sys.meta_path = old
@ -233,24 +223,21 @@ def resolve_tests(tests=None):
@mock.patch("mozperftest.MachEnvironment", new=_TestMachEnvironment)
@mock.patch("mozbuild.base.MachCommandBase.activate_virtualenv")
@mock.patch("mozperftest.mach_commands.MachCommandBase.activate_virtualenv")
@mock.patch("mozperftest.fzf.fzf.select", new=fzf_selection)
@mock.patch("moztest.resolve.TestResolver.resolve_tests", new=resolve_tests())
def test_fzf_flavor(*mocked):
with running_on_try(False), _get_command() as (
cmd,
command_context,
): # , silence():
cmd(command_context, flavor="desktop-browser")
with running_on_try(False), _get_command() as test: # , silence():
test.run_perftest(test, flavor="desktop-browser")
@mock.patch("mozperftest.MachEnvironment", new=_TestMachEnvironment)
@mock.patch("mozbuild.base.MachCommandBase.activate_virtualenv")
@mock.patch("mozperftest.mach_commands.MachCommandBase.activate_virtualenv")
@mock.patch("mozperftest.fzf.fzf.select", new=fzf_selection)
@mock.patch("moztest.resolve.TestResolver.resolve_tests", new=resolve_tests([]))
def test_fzf_nothing_selected(*mocked):
with running_on_try(False), _get_command() as (cmd, command_context), silence():
cmd(command_context, flavor="desktop-browser")
with running_on_try(False), _get_command() as test, silence():
test.run_perftest(test, flavor="desktop-browser")
if __name__ == "__main__":

View File

@ -10,135 +10,136 @@ from __future__ import absolute_import, print_function, unicode_literals
import sys
import logging
from mach.decorators import CommandArgument, Command, SubCommand
from mach.decorators import CommandArgument, CommandProvider, Command, SubCommand
from mozbuild.base import MachCommandBase
from mozilla_version.gecko import GeckoVersion
@Command(
"release",
category="release",
description="Task that are part of the release process.",
)
def release(command_context):
"""
The release subcommands all relate to the release process.
"""
@SubCommand(
"release",
"buglist",
description="Generate list of bugs since the last release.",
)
@CommandArgument(
"--version",
required=True,
type=GeckoVersion.parse,
help="The version being built.",
)
@CommandArgument("--product", required=True, help="The product being built.")
@CommandArgument("--repo", help="The repo being built.")
@CommandArgument("--revision", required=True, help="The revision being built.")
def buglist(command_context, version, product, revision, repo):
setup_logging(command_context)
from mozrelease.buglist_creator import create_bugs_url
print(
create_bugs_url(
product=product,
current_version=version,
current_revision=revision,
repo=repo,
)
@CommandProvider
class MachCommands(MachCommandBase):
@Command(
"release",
category="release",
description="Task that are part of the release process.",
)
def release(self, command_context):
"""
The release subcommands all relate to the release process.
"""
@SubCommand(
"release",
"send-buglist-email",
description="Send an email with the bugs since the last release.",
)
@CommandArgument(
"--address",
required=True,
action="append",
dest="addresses",
help="The email address to send the bug list to "
"(may be specified more than once.",
)
@CommandArgument(
"--version",
type=GeckoVersion.parse,
required=True,
help="The version being built.",
)
@CommandArgument("--product", required=True, help="The product being built.")
@CommandArgument("--repo", required=True, help="The repo being built.")
@CommandArgument("--revision", required=True, help="The revision being built.")
@CommandArgument("--build-number", required=True, help="The build number")
@CommandArgument("--task-group-id", help="The task group of the build.")
def buglist_email(command_context, **options):
setup_logging(command_context)
from mozrelease.buglist_creator import email_release_drivers
email_release_drivers(**options)
@SubCommand(
"release",
"push-scriptworker-canary",
description="Push tasks to try, to test new scriptworker deployments.",
)
@CommandArgument(
"--address",
required=True,
action="append",
dest="addresses",
help="The email address to send notifications to "
"(may be specified more than once).",
)
@CommandArgument(
"--scriptworker",
required=True,
action="append",
dest="scriptworkers",
help="Scriptworker to run canary for (may be specified more than once).",
)
@CommandArgument(
"--ssh-key-secret",
required=False,
help="Taskcluster secret with ssh-key to use for hg.mozilla.org",
)
def push_scriptworker_canary(command_context, scriptworkers, addresses, ssh_key_secret):
setup_logging(command_context)
from mozrelease.scriptworker_canary import push_canary
push_canary(
scriptworkers=scriptworkers,
addresses=addresses,
ssh_key_secret=ssh_key_secret,
@SubCommand(
"release",
"buglist",
description="Generate list of bugs since the last release.",
)
@CommandArgument(
"--version",
required=True,
type=GeckoVersion.parse,
help="The version being built.",
)
@CommandArgument("--product", required=True, help="The product being built.")
@CommandArgument("--repo", help="The repo being built.")
@CommandArgument("--revision", required=True, help="The revision being built.")
def buglist(self, command_context, version, product, revision, repo):
self.setup_logging(command_context)
from mozrelease.buglist_creator import create_bugs_url
def setup_logging(command_context, quiet=False, verbose=True):
"""
Set up Python logging for all loggers, sending results to stderr (so
that command output can be redirected easily) and adding the typical
mach timestamp.
"""
# remove the old terminal handler
old = command_context.log_manager.replace_terminal_handler(None)
# re-add it, with level and fh set appropriately
if not quiet:
level = logging.DEBUG if verbose else logging.INFO
command_context.log_manager.add_terminal_logging(
fh=sys.stderr,
level=level,
write_interval=old.formatter.write_interval,
write_times=old.formatter.write_times,
print(
create_bugs_url(
product=product,
current_version=version,
current_revision=revision,
repo=repo,
)
)
# all of the taskgraph logging is unstructured logging
command_context.log_manager.enable_unstructured()
@SubCommand(
"release",
"send-buglist-email",
description="Send an email with the bugs since the last release.",
)
@CommandArgument(
"--address",
required=True,
action="append",
dest="addresses",
help="The email address to send the bug list to "
"(may be specified more than once.",
)
@CommandArgument(
"--version",
type=GeckoVersion.parse,
required=True,
help="The version being built.",
)
@CommandArgument("--product", required=True, help="The product being built.")
@CommandArgument("--repo", required=True, help="The repo being built.")
@CommandArgument("--revision", required=True, help="The revision being built.")
@CommandArgument("--build-number", required=True, help="The build number")
@CommandArgument("--task-group-id", help="The task group of the build.")
def buglist_email(self, command_context, **options):
self.setup_logging(command_context)
from mozrelease.buglist_creator import email_release_drivers
email_release_drivers(**options)
@SubCommand(
"release",
"push-scriptworker-canary",
description="Push tasks to try, to test new scriptworker deployments.",
)
@CommandArgument(
"--address",
required=True,
action="append",
dest="addresses",
help="The email address to send notifications to "
"(may be specified more than once).",
)
@CommandArgument(
"--scriptworker",
required=True,
action="append",
dest="scriptworkers",
help="Scriptworker to run canary for (may be specified more than once).",
)
@CommandArgument(
"--ssh-key-secret",
required=False,
help="Taskcluster secret with ssh-key to use for hg.mozilla.org",
)
def push_scriptworker_canary(
self, command_context, scriptworkers, addresses, ssh_key_secret
):
self.setup_logging(command_context)
from mozrelease.scriptworker_canary import push_canary
push_canary(
scriptworkers=scriptworkers,
addresses=addresses,
ssh_key_secret=ssh_key_secret,
)
def setup_logging(self, command_context, quiet=False, verbose=True):
"""
Set up Python logging for all loggers, sending results to stderr (so
that command output can be redirected easily) and adding the typical
mach timestamp.
"""
# remove the old terminal handler
old = command_context.log_manager.replace_terminal_handler(None)
# re-add it, with level and fh set appropriately
if not quiet:
level = logging.DEBUG if verbose else logging.INFO
command_context.log_manager.add_terminal_logging(
fh=sys.stderr,
level=level,
write_interval=old.formatter.write_interval,
write_times=old.formatter.write_times,
)
# all of the taskgraph logging is unstructured logging
command_context.log_manager.enable_unstructured()

View File

@ -24,10 +24,12 @@ from six import iteritems
from mach.decorators import (
Command,
CommandArgument,
CommandProvider,
SubCommand,
)
from mozbuild.base import (
MachCommandBase,
MozbuildObject,
BinaryNotFoundException,
)
@ -50,112 +52,116 @@ def setup():
os.environ["PATH"] = "{}:{}".format(path, os.environ["PATH"])
def remotedir(command_context):
return os.path.join(command_context.topsrcdir, "remote")
@CommandProvider
class RemoteCommands(MachCommandBase):
def remotedir(self, command_context):
return os.path.join(command_context.topsrcdir, "remote")
@Command("remote", category="misc", description="Remote protocol related operations.")
def remote(command_context):
"""The remote subcommands all relate to the remote protocol."""
command_context._sub_mach(["help", "remote"])
return 1
@SubCommand(
"remote", "vendor-puppeteer", "Pull in latest changes of the Puppeteer client."
)
@CommandArgument(
"--repository",
metavar="REPO",
required=True,
help="The (possibly remote) repository to clone from.",
)
@CommandArgument(
"--commitish",
metavar="COMMITISH",
required=True,
help="The commit or tag object name to check out.",
)
@CommandArgument(
"--no-install",
dest="install",
action="store_false",
default=True,
help="Do not install the just-pulled Puppeteer package,",
)
def vendor_puppeteer(command_context, repository, commitish, install):
puppeteer_dir = os.path.join(remotedir(command_context), "test", "puppeteer")
# Preserve our custom mocha reporter
shutil.move(
os.path.join(puppeteer_dir, "json-mocha-reporter.js"),
remotedir(command_context),
@Command(
"remote", category="misc", description="Remote protocol related operations."
)
shutil.rmtree(puppeteer_dir, ignore_errors=True)
os.makedirs(puppeteer_dir)
with TemporaryDirectory() as tmpdir:
git("clone", "-q", repository, tmpdir)
git("checkout", commitish, worktree=tmpdir)
git(
"checkout-index",
"-a",
"-f",
"--prefix",
"{}/".format(puppeteer_dir),
worktree=tmpdir,
)
def remote(self, command_context):
"""The remote subcommands all relate to the remote protocol."""
command_context._sub_mach(["help", "remote"])
return 1
# remove files which may interfere with git checkout of central
try:
os.remove(os.path.join(puppeteer_dir, ".gitattributes"))
os.remove(os.path.join(puppeteer_dir, ".gitignore"))
except OSError:
pass
unwanted_dirs = ["experimental", "docs"]
for dir in unwanted_dirs:
dir_path = os.path.join(puppeteer_dir, dir)
if os.path.isdir(dir_path):
shutil.rmtree(dir_path)
shutil.move(
os.path.join(remotedir(command_context), "json-mocha-reporter.js"),
puppeteer_dir,
@SubCommand(
"remote", "vendor-puppeteer", "Pull in latest changes of the Puppeteer client."
)
import yaml
annotation = {
"schema": 1,
"bugzilla": {
"product": "Remote Protocol",
"component": "Agent",
},
"origin": {
"name": "puppeteer",
"description": "Headless Chrome Node API",
"url": repository,
"license": "Apache-2.0",
"release": commitish,
},
}
with open(os.path.join(puppeteer_dir, "moz.yaml"), "w") as fh:
yaml.safe_dump(
annotation,
fh,
default_flow_style=False,
encoding="utf-8",
allow_unicode=True,
@CommandArgument(
"--repository",
metavar="REPO",
required=True,
help="The (possibly remote) repository to clone from.",
)
@CommandArgument(
"--commitish",
metavar="COMMITISH",
required=True,
help="The commit or tag object name to check out.",
)
@CommandArgument(
"--no-install",
dest="install",
action="store_false",
default=True,
help="Do not install the just-pulled Puppeteer package,",
)
def vendor_puppeteer(self, command_context, repository, commitish, install):
puppeteer_dir = os.path.join(
self.remotedir(command_context), "test", "puppeteer"
)
if install:
env = {"PUPPETEER_SKIP_DOWNLOAD": "1"}
npm(
"install",
cwd=os.path.join(command_context.topsrcdir, puppeteer_dir),
env=env,
# Preserve our custom mocha reporter
shutil.move(
os.path.join(puppeteer_dir, "json-mocha-reporter.js"),
self.remotedir(command_context),
)
shutil.rmtree(puppeteer_dir, ignore_errors=True)
os.makedirs(puppeteer_dir)
with TemporaryDirectory() as tmpdir:
git("clone", "-q", repository, tmpdir)
git("checkout", commitish, worktree=tmpdir)
git(
"checkout-index",
"-a",
"-f",
"--prefix",
"{}/".format(puppeteer_dir),
worktree=tmpdir,
)
# remove files which may interfere with git checkout of central
try:
os.remove(os.path.join(puppeteer_dir, ".gitattributes"))
os.remove(os.path.join(puppeteer_dir, ".gitignore"))
except OSError:
pass
unwanted_dirs = ["experimental", "docs"]
for dir in unwanted_dirs:
dir_path = os.path.join(puppeteer_dir, dir)
if os.path.isdir(dir_path):
shutil.rmtree(dir_path)
shutil.move(
os.path.join(self.remotedir(command_context), "json-mocha-reporter.js"),
puppeteer_dir,
)
import yaml
annotation = {
"schema": 1,
"bugzilla": {
"product": "Remote Protocol",
"component": "Agent",
},
"origin": {
"name": "puppeteer",
"description": "Headless Chrome Node API",
"url": repository,
"license": "Apache-2.0",
"release": commitish,
},
}
with open(os.path.join(puppeteer_dir, "moz.yaml"), "w") as fh:
yaml.safe_dump(
annotation,
fh,
default_flow_style=False,
encoding="utf-8",
allow_unicode=True,
)
if install:
env = {"PUPPETEER_SKIP_DOWNLOAD": "1"}
npm(
"install",
cwd=os.path.join(command_context.topsrcdir, puppeteer_dir),
env=env,
)
def git(*args, **kwargs):
@ -578,118 +584,122 @@ def create_parser_puppeteer():
return p
@Command(
"puppeteer-test",
category="testing",
description="Run Puppeteer unit tests.",
parser=create_parser_puppeteer,
)
def puppeteer_test(
command_context,
binary=None,
ci=False,
enable_fission=False,
enable_webrender=False,
headless=False,
extra_prefs=None,
extra_options=None,
verbosity=0,
tests=None,
product="firefox",
write_results=None,
subset=False,
**kwargs
):
logger = mozlog.commandline.setup_logging(
"puppeteer-test", kwargs, {"mach": sys.stdout}
@CommandProvider
class PuppeteerTest(MachCommandBase):
@Command(
"puppeteer-test",
category="testing",
description="Run Puppeteer unit tests.",
parser=create_parser_puppeteer,
)
def puppeteer_test(
self,
command_context,
binary=None,
ci=False,
enable_fission=False,
enable_webrender=False,
headless=False,
extra_prefs=None,
extra_options=None,
verbosity=0,
tests=None,
product="firefox",
write_results=None,
subset=False,
**kwargs
):
# moztest calls this programmatically with test objects or manifests
if "test_objects" in kwargs and tests is not None:
logger.error("Expected either 'test_objects' or 'tests'")
exit(1)
logger = mozlog.commandline.setup_logging(
"puppeteer-test", kwargs, {"mach": sys.stdout}
)
if product != "firefox" and extra_prefs is not None:
logger.error("User preferences are not recognized by %s" % product)
exit(1)
# moztest calls this programmatically with test objects or manifests
if "test_objects" in kwargs and tests is not None:
logger.error("Expected either 'test_objects' or 'tests'")
exit(1)
if "test_objects" in kwargs:
tests = []
for test in kwargs["test_objects"]:
tests.append(test["path"])
if product != "firefox" and extra_prefs is not None:
logger.error("User preferences are not recognized by %s" % product)
exit(1)
prefs = {}
for s in extra_prefs or []:
kv = s.split("=")
if len(kv) != 2:
logger.error("syntax error in --setpref={}".format(s))
exit(EX_USAGE)
prefs[kv[0]] = kv[1].strip()
if "test_objects" in kwargs:
tests = []
for test in kwargs["test_objects"]:
tests.append(test["path"])
options = {}
for s in extra_options or []:
kv = s.split("=")
if len(kv) != 2:
logger.error("syntax error in --setopt={}".format(s))
exit(EX_USAGE)
options[kv[0]] = kv[1].strip()
prefs = {}
for s in extra_prefs or []:
kv = s.split("=")
if len(kv) != 2:
logger.error("syntax error in --setpref={}".format(s))
exit(EX_USAGE)
prefs[kv[0]] = kv[1].strip()
if enable_fission:
prefs.update({"fission.autostart": True})
options = {}
for s in extra_options or []:
kv = s.split("=")
if len(kv) != 2:
logger.error("syntax error in --setopt={}".format(s))
exit(EX_USAGE)
options[kv[0]] = kv[1].strip()
if verbosity == 1:
prefs["remote.log.level"] = "Debug"
elif verbosity > 1:
prefs["remote.log.level"] = "Trace"
if verbosity > 2:
prefs["remote.log.truncate"] = False
if enable_fission:
prefs.update({"fission.autostart": True})
install_puppeteer(command_context, product, ci)
if verbosity == 1:
prefs["remote.log.level"] = "Debug"
elif verbosity > 1:
prefs["remote.log.level"] = "Trace"
if verbosity > 2:
prefs["remote.log.truncate"] = False
params = {
"binary": binary,
"headless": headless,
"enable_webrender": enable_webrender,
"extra_prefs": prefs,
"product": product,
"extra_launcher_options": options,
"write_results": write_results,
"subset": subset,
}
puppeteer = command_context._spawn(PuppeteerRunner)
try:
return puppeteer.run_test(logger, *tests, **params)
except BinaryNotFoundException as e:
logger.error(e)
logger.info(e.help())
exit(1)
except Exception as e:
exit(EX_SOFTWARE, e)
self.install_puppeteer(command_context, product, ci)
params = {
"binary": binary,
"headless": headless,
"enable_webrender": enable_webrender,
"extra_prefs": prefs,
"product": product,
"extra_launcher_options": options,
"write_results": write_results,
"subset": subset,
}
puppeteer = command_context._spawn(PuppeteerRunner)
try:
return puppeteer.run_test(logger, *tests, **params)
except BinaryNotFoundException as e:
logger.error(e)
logger.info(e.help())
exit(1)
except Exception as e:
exit(EX_SOFTWARE, e)
def install_puppeteer(command_context, product, ci):
setup()
env = {}
from mozversioncontrol import get_repository_object
def install_puppeteer(self, command_context, product, ci):
setup()
env = {}
from mozversioncontrol import get_repository_object
repo = get_repository_object(command_context.topsrcdir)
puppeteer_dir = os.path.join("remote", "test", "puppeteer")
changed_files = False
for f in repo.get_changed_files():
if f.startswith(puppeteer_dir) and f.endswith(".ts"):
changed_files = True
break
repo = get_repository_object(command_context.topsrcdir)
puppeteer_dir = os.path.join("remote", "test", "puppeteer")
changed_files = False
for f in repo.get_changed_files():
if f.startswith(puppeteer_dir) and f.endswith(".ts"):
changed_files = True
break
if product != "chrome":
env["PUPPETEER_SKIP_DOWNLOAD"] = "1"
lib_dir = os.path.join(command_context.topsrcdir, puppeteer_dir, "lib")
if changed_files and os.path.isdir(lib_dir):
# clobber lib to force `tsc compile` step
shutil.rmtree(lib_dir)
if product != "chrome":
env["PUPPETEER_SKIP_DOWNLOAD"] = "1"
lib_dir = os.path.join(command_context.topsrcdir, puppeteer_dir, "lib")
if changed_files and os.path.isdir(lib_dir):
# clobber lib to force `tsc compile` step
shutil.rmtree(lib_dir)
command = "ci" if ci else "install"
npm(command, cwd=os.path.join(command_context.topsrcdir, puppeteer_dir), env=env)
command = "ci" if ci else "install"
npm(
command, cwd=os.path.join(command_context.topsrcdir, puppeteer_dir), env=env
)
def exit(code, error=None):

View File

@ -6,12 +6,14 @@ import os
import sys
from mach.util import UserError
from mozbuild.base import MachCommandBase
from mozpack.files import FileFinder
from mozpack.path import basedir
from mach.decorators import (
CommandArgument,
CommandProvider,
Command,
)
@ -62,55 +64,56 @@ def is_excluded_directory(directory, exclusions):
return False
@Command(
"generate-test-certs",
category="devenv",
description="Generate test certificates and keys from specifications.",
)
@CommandArgument(
"specifications",
nargs="*",
help="Specification files for test certs. If omitted, all certs are regenerated.",
)
def generate_test_certs(command_context, specifications):
"""Generate test certificates and keys from specifications."""
@CommandProvider
class MachCommands(MachCommandBase):
@Command(
"generate-test-certs",
category="devenv",
description="Generate test certificates and keys from specifications.",
)
@CommandArgument(
"specifications",
nargs="*",
help="Specification files for test certs. If omitted, all certs are regenerated.",
)
def generate_test_certs(self, command_context, specifications):
"""Generate test certificates and keys from specifications."""
command_context.activate_virtualenv()
import pycert
import pykey
command_context.activate_virtualenv()
import pycert
import pykey
if not specifications:
specifications = find_all_specifications(command_context)
if not specifications:
specifications = self.find_all_specifications(command_context)
for specification in specifications:
if is_certspec_file(specification):
module = pycert
elif is_keyspec_file(specification):
module = pykey
else:
raise UserError(
"'{}' is not a .certspec or .keyspec file".format(specification)
)
run_module_main_on(module, os.path.abspath(specification))
return 0
for specification in specifications:
if is_certspec_file(specification):
module = pycert
elif is_keyspec_file(specification):
module = pykey
else:
raise UserError(
"'{}' is not a .certspec or .keyspec file".format(specification)
)
run_module_main_on(module, os.path.abspath(specification))
return 0
def find_all_specifications(command_context):
"""Searches the source tree for all specification files
and returns them as a list."""
specifications = []
inclusions = [
"netwerk/test/unit",
"security/manager/ssl/tests",
"services/settings/test/unit/test_remote_settings_signatures",
"testing/xpcshell/moz-http2",
]
exclusions = ["security/manager/ssl/tests/unit/test_signed_apps"]
finder = FileFinder(command_context.topsrcdir)
for inclusion_path in inclusions:
for f, _ in finder.find(inclusion_path):
if basedir(f, exclusions):
continue
if is_specification_file(f):
specifications.append(os.path.join(command_context.topsrcdir, f))
return specifications
def find_all_specifications(self, command_context):
"""Searches the source tree for all specification files
and returns them as a list."""
specifications = []
inclusions = [
"netwerk/test/unit",
"security/manager/ssl/tests",
"services/settings/test/unit/test_remote_settings_signatures",
"testing/xpcshell/moz-http2",
]
exclusions = ["security/manager/ssl/tests/unit/test_signed_apps"]
finder = FileFinder(command_context.topsrcdir)
for inclusion_path in inclusions:
for f, _ in finder.find(inclusion_path):
if basedir(f, exclusions):
continue
if is_specification_file(f):
specifications.append(os.path.join(command_context.topsrcdir, f))
return specifications

View File

@ -18,9 +18,11 @@ from functools import partial
from mach.decorators import (
Command,
CommandArgument,
CommandProvider,
SettingsProvider,
SubCommand,
)
from mozbuild.base import MachCommandBase
import taskgraph.main
from taskgraph.main import commands as taskgraph_commands
@ -143,309 +145,302 @@ def get_taskgraph_decision_parser():
return parser
@Command(
"taskgraph",
category="ci",
description="Manipulate TaskCluster task graphs defined in-tree",
)
def taskgraph_command(command_context):
"""The taskgraph subcommands all relate to the generation of task graphs
for Gecko continuous integration. A task graph is a set of tasks linked
by dependencies: for example, a binary must be built before it is tested,
and that build may further depend on various toolchains, libraries, etc.
"""
@SubCommand(
"taskgraph",
"tasks",
description="Show all tasks in the taskgraph",
parser=partial(get_taskgraph_command_parser, "tasks"),
)
def taskgraph_tasks(command_context, **options):
return run_show_taskgraph(command_context, **options)
@SubCommand(
"taskgraph",
"full",
description="Show the full taskgraph",
parser=partial(get_taskgraph_command_parser, "full"),
)
def taskgraph_full(command_context, **options):
return run_show_taskgraph(command_context, **options)
@SubCommand(
"taskgraph",
"target",
description="Show the target task set",
parser=partial(get_taskgraph_command_parser, "target"),
)
def taskgraph_target(command_context, **options):
return run_show_taskgraph(command_context, **options)
@SubCommand(
"taskgraph",
"target-graph",
description="Show the target taskgraph",
parser=partial(get_taskgraph_command_parser, "target-graph"),
)
def taskgraph_target_graph(command_context, **options):
return run_show_taskgraph(command_context, **options)
@SubCommand(
"taskgraph",
"optimized",
description="Show the optimized taskgraph",
parser=partial(get_taskgraph_command_parser, "optimized"),
)
def taskgraph_optimized(command_context, **options):
return run_show_taskgraph(command_context, **options)
@SubCommand(
"taskgraph",
"morphed",
description="Show the morphed taskgraph",
parser=partial(get_taskgraph_command_parser, "morphed"),
)
def taskgraph_morphed(command_context, **options):
return run_show_taskgraph(command_context, **options)
def run_show_taskgraph(command_context, **options):
# There are cases where we don't want to set up mach logging (e.g logs
# are being redirected to disk). By monkeypatching the 'setup_logging'
# function we can let 'taskgraph.main' decide whether or not to log to
# the terminal.
taskgraph.main.setup_logging = partial(
setup_logging,
command_context,
quiet=options["quiet"],
verbose=options["verbose"],
@CommandProvider
class MachCommands(MachCommandBase):
@Command(
"taskgraph",
category="ci",
description="Manipulate TaskCluster task graphs defined in-tree",
)
show_taskgraph = options.pop("func")
return show_taskgraph(options)
def taskgraph(self, command_context):
"""The taskgraph subcommands all relate to the generation of task graphs
for Gecko continuous integration. A task graph is a set of tasks linked
by dependencies: for example, a binary must be built before it is tested,
and that build may further depend on various toolchains, libraries, etc.
"""
@SubCommand(
"taskgraph",
"tasks",
description="Show all tasks in the taskgraph",
parser=partial(get_taskgraph_command_parser, "tasks"),
)
def taskgraph_tasks(self, command_context, **options):
return self.run_show_taskgraph(command_context, **options)
@SubCommand(
"taskgraph",
"full",
description="Show the full taskgraph",
parser=partial(get_taskgraph_command_parser, "full"),
)
def taskgraph_full(self, command_context, **options):
return self.run_show_taskgraph(command_context, **options)
@SubCommand(
"taskgraph",
"target",
description="Show the target task set",
parser=partial(get_taskgraph_command_parser, "target"),
)
def taskgraph_target(self, command_context, **options):
return self.run_show_taskgraph(command_context, **options)
@SubCommand(
"taskgraph",
"target-graph",
description="Show the target taskgraph",
parser=partial(get_taskgraph_command_parser, "target-graph"),
)
def taskgraph_target_graph(self, command_context, **options):
return self.run_show_taskgraph(command_context, **options)
@SubCommand(
"taskgraph",
"optimized",
description="Show the optimized taskgraph",
parser=partial(get_taskgraph_command_parser, "optimized"),
)
def taskgraph_optimized(self, command_context, **options):
return self.run_show_taskgraph(command_context, **options)
@SubCommand(
"taskgraph",
"morphed",
description="Show the morphed taskgraph",
parser=partial(get_taskgraph_command_parser, "morphed"),
)
def taskgraph_morphed(self, command_context, **options):
return self.run_show_taskgraph(command_context, **options)
def run_show_taskgraph(self, command_context, **options):
# There are cases where we don't want to set up mach logging (e.g logs
# are being redirected to disk). By monkeypatching the 'setup_logging'
# function we can let 'taskgraph.main' decide whether or not to log to
# the terminal.
taskgraph.main.setup_logging = partial(
self.setup_logging,
command_context,
quiet=options["quiet"],
verbose=options["verbose"],
)
show_taskgraph = options.pop("func")
return show_taskgraph(options)
@SubCommand("taskgraph", "actions", description="Write actions.json to stdout")
@CommandArgument(
"--root", "-r", help="root of the taskgraph definition relative to topsrcdir"
)
@CommandArgument(
"--quiet", "-q", action="store_true", help="suppress all logging output"
)
@CommandArgument(
"--verbose",
"-v",
action="store_true",
help="include debug-level logging output",
)
@CommandArgument(
"--parameters",
"-p",
default="project=mozilla-central",
help="parameters file (.yml or .json; see "
"`taskcluster/docs/parameters.rst`)`",
)
def taskgraph_actions(self, command_context, **options):
return self.show_actions(command_context, options)
@SubCommand(
"taskgraph",
"decision",
description="Run the decision task",
parser=get_taskgraph_decision_parser,
)
def taskgraph_decision(self, command_context, **options):
"""Run the decision task: generate a task graph and submit to
TaskCluster. This is only meant to be called within decision tasks,
and requires a great many arguments. Commands like `mach taskgraph
optimized` are better suited to use on the command line, and can take
the parameters file generated by a decision task."""
try:
self.setup_logging(command_context)
start = time.monotonic()
ret = taskgraph_commands["decision"].func(options)
end = time.monotonic()
if os.environ.get("MOZ_AUTOMATION") == "1":
perfherder_data = {
"framework": {"name": "build_metrics"},
"suites": [
{
"name": "decision",
"value": end - start,
"lowerIsBetter": True,
"shouldAlert": True,
"subtests": [],
}
],
}
print(
"PERFHERDER_DATA: {}".format(json.dumps(perfherder_data)),
file=sys.stderr,
)
return ret
except Exception:
traceback.print_exc()
sys.exit(1)
@SubCommand(
"taskgraph",
"cron",
description="Provide a pointer to the new `.cron.yml` handler.",
)
def taskgraph_cron(self, command_context, **options):
print(
'Handling of ".cron.yml" files has move to '
"https://hg.mozilla.org/ci/ci-admin/file/default/build-decision."
)
sys.exit(1)
@SubCommand(
"taskgraph",
"action-callback",
description="Run action callback used by action tasks",
parser=partial(get_taskgraph_command_parser, "action-callback"),
)
def action_callback(self, command_context, **options):
self.setup_logging(command_context)
taskgraph_commands["action-callback"].func(options)
@SubCommand(
"taskgraph",
"test-action-callback",
description="Run an action callback in a testing mode",
parser=partial(get_taskgraph_command_parser, "test-action-callback"),
)
def test_action_callback(self, command_context, **options):
self.setup_logging(command_context)
if not options["parameters"]:
options["parameters"] = "project=mozilla-central"
taskgraph_commands["test-action-callback"].func(options)
def setup_logging(self, command_context, quiet=False, verbose=True):
"""
Set up Python logging for all loggers, sending results to stderr (so
that command output can be redirected easily) and adding the typical
mach timestamp.
"""
# remove the old terminal handler
old = command_context.log_manager.replace_terminal_handler(None)
# re-add it, with level and fh set appropriately
if not quiet:
level = logging.DEBUG if verbose else logging.INFO
command_context.log_manager.add_terminal_logging(
fh=sys.stderr,
level=level,
write_interval=old.formatter.write_interval,
write_times=old.formatter.write_times,
)
# all of the taskgraph logging is unstructured logging
command_context.log_manager.enable_unstructured()
def show_actions(self, command_context, options):
import taskgraph
import taskgraph.actions
import taskgraph.generator
import taskgraph.parameters
try:
self.setup_logging(
command_context, quiet=options["quiet"], verbose=options["verbose"]
)
parameters = taskgraph.parameters.parameters_loader(options["parameters"])
tgg = taskgraph.generator.TaskGraphGenerator(
root_dir=options.get("root"),
parameters=parameters,
)
actions = taskgraph.actions.render_actions_json(
tgg.parameters,
tgg.graph_config,
decision_task_id="DECISION-TASK",
)
print(json.dumps(actions, sort_keys=True, indent=2, separators=(",", ": ")))
except Exception:
traceback.print_exc()
sys.exit(1)
@SubCommand("taskgraph", "actions", description="Write actions.json to stdout")
@CommandArgument(
"--root", "-r", help="root of the taskgraph definition relative to topsrcdir"
)
@CommandArgument(
"--quiet", "-q", action="store_true", help="suppress all logging output"
)
@CommandArgument(
"--verbose",
"-v",
action="store_true",
help="include debug-level logging output",
)
@CommandArgument(
"--parameters",
"-p",
default="project=mozilla-central",
help="parameters file (.yml or .json; see `taskcluster/docs/parameters.rst`)`",
)
def taskgraph_actions(command_context, **options):
return show_actions(command_context, options)
@CommandProvider
class TaskClusterImagesProvider(MachCommandBase):
@Command(
"taskcluster-load-image",
category="ci",
description="Load a pre-built Docker image. Note that you need to "
"have docker installed and running for this to work.",
parser=partial(get_taskgraph_command_parser, "load-image"),
)
def load_image(self, command_context, **kwargs):
taskgraph_commands["load-image"].func(kwargs)
@Command(
"taskcluster-build-image",
category="ci",
description="Build a Docker image",
parser=partial(get_taskgraph_command_parser, "build-image"),
)
def build_image(self, command_context, **kwargs):
try:
taskgraph_commands["build-image"].func(kwargs)
except Exception:
traceback.print_exc()
sys.exit(1)
@Command(
"taskcluster-image-digest",
category="ci",
description="Print the digest of the image of this name based on the "
"current contents of the tree.",
parser=partial(get_taskgraph_command_parser, "build-image"),
)
def image_digest(self, command_context, **kwargs):
taskgraph_commands["image-digest"].func(kwargs)
@SubCommand(
"taskgraph",
"decision",
description="Run the decision task",
parser=get_taskgraph_decision_parser,
)
def taskgraph_decision(command_context, **options):
"""Run the decision task: generate a task graph and submit to
TaskCluster. This is only meant to be called within decision tasks,
and requires a great many arguments. Commands like `mach taskgraph
optimized` are better suited to use on the command line, and can take
the parameters file generated by a decision task."""
try:
setup_logging(command_context)
start = time.monotonic()
ret = taskgraph_commands["decision"].func(options)
end = time.monotonic()
if os.environ.get("MOZ_AUTOMATION") == "1":
perfherder_data = {
"framework": {"name": "build_metrics"},
"suites": [
{
"name": "decision",
"value": end - start,
"lowerIsBetter": True,
"shouldAlert": True,
"subtests": [],
}
],
@CommandProvider
class TaskClusterPartialsData(MachCommandBase):
@Command(
"release-history",
category="ci",
description="Query balrog for release history used by enable partials generation",
)
@CommandArgument(
"-b",
"--branch",
help="The gecko project branch used in balrog, such as "
"mozilla-central, release, maple",
)
@CommandArgument(
"--product", default="Firefox", help="The product identifier, such as 'Firefox'"
)
def generate_partials_builds(self, command_context, product, branch):
from taskgraph.util.partials import populate_release_history
try:
import yaml
release_history = {
"release_history": populate_release_history(product, branch)
}
print(
"PERFHERDER_DATA: {}".format(json.dumps(perfherder_data)),
file=sys.stderr,
yaml.safe_dump(
release_history, allow_unicode=True, default_flow_style=False
)
)
return ret
except Exception:
traceback.print_exc()
sys.exit(1)
@SubCommand(
"taskgraph",
"cron",
description="Provide a pointer to the new `.cron.yml` handler.",
)
def taskgraph_cron(command_context, **options):
print(
'Handling of ".cron.yml" files has move to '
"https://hg.mozilla.org/ci/ci-admin/file/default/build-decision."
)
sys.exit(1)
@SubCommand(
"taskgraph",
"action-callback",
description="Run action callback used by action tasks",
parser=partial(get_taskgraph_command_parser, "action-callback"),
)
def action_callback(command_context, **options):
setup_logging(command_context)
taskgraph_commands["action-callback"].func(options)
@SubCommand(
"taskgraph",
"test-action-callback",
description="Run an action callback in a testing mode",
parser=partial(get_taskgraph_command_parser, "test-action-callback"),
)
def test_action_callback(command_context, **options):
setup_logging(command_context)
if not options["parameters"]:
options["parameters"] = "project=mozilla-central"
taskgraph_commands["test-action-callback"].func(options)
def setup_logging(command_context, quiet=False, verbose=True):
"""
Set up Python logging for all loggers, sending results to stderr (so
that command output can be redirected easily) and adding the typical
mach timestamp.
"""
# remove the old terminal handler
old = command_context.log_manager.replace_terminal_handler(None)
# re-add it, with level and fh set appropriately
if not quiet:
level = logging.DEBUG if verbose else logging.INFO
command_context.log_manager.add_terminal_logging(
fh=sys.stderr,
level=level,
write_interval=old.formatter.write_interval,
write_times=old.formatter.write_times,
)
# all of the taskgraph logging is unstructured logging
command_context.log_manager.enable_unstructured()
def show_actions(command_context, options):
import taskgraph
import taskgraph.actions
import taskgraph.generator
import taskgraph.parameters
try:
setup_logging(
command_context, quiet=options["quiet"], verbose=options["verbose"]
)
parameters = taskgraph.parameters.parameters_loader(options["parameters"])
tgg = taskgraph.generator.TaskGraphGenerator(
root_dir=options.get("root"),
parameters=parameters,
)
actions = taskgraph.actions.render_actions_json(
tgg.parameters,
tgg.graph_config,
decision_task_id="DECISION-TASK",
)
print(json.dumps(actions, sort_keys=True, indent=2, separators=(",", ": ")))
except Exception:
traceback.print_exc()
sys.exit(1)
@Command(
"taskcluster-load-image",
category="ci",
description="Load a pre-built Docker image. Note that you need to "
"have docker installed and running for this to work.",
parser=partial(get_taskgraph_command_parser, "load-image"),
)
def load_image(command_context, **kwargs):
taskgraph_commands["load-image"].func(kwargs)
@Command(
"taskcluster-build-image",
category="ci",
description="Build a Docker image",
parser=partial(get_taskgraph_command_parser, "build-image"),
)
def build_image(command_context, **kwargs):
try:
taskgraph_commands["build-image"].func(kwargs)
except Exception:
traceback.print_exc()
sys.exit(1)
@Command(
"taskcluster-image-digest",
category="ci",
description="Print the digest of the image of this name based on the "
"current contents of the tree.",
parser=partial(get_taskgraph_command_parser, "build-image"),
)
def image_digest(command_context, **kwargs):
taskgraph_commands["image-digest"].func(kwargs)
@Command(
"release-history",
category="ci",
description="Query balrog for release history used by enable partials generation",
)
@CommandArgument(
"-b",
"--branch",
help="The gecko project branch used in balrog, such as "
"mozilla-central, release, maple",
)
@CommandArgument(
"--product", default="Firefox", help="The product identifier, such as 'Firefox'"
)
def generate_partials_builds(command_context, product, branch):
from taskgraph.util.partials import populate_release_history
try:
import yaml
release_history = {"release_history": populate_release_history(product, branch)}
print(
yaml.safe_dump(
release_history, allow_unicode=True, default_flow_style=False
)
)
except Exception:
traceback.print_exc()
sys.exit(1)
except Exception:
traceback.print_exc()
sys.exit(1)

View File

@ -12,6 +12,7 @@ import sys
import six
from mozbuild.base import (
MachCommandBase,
MachCommandConditions as conditions,
BinaryNotFoundException,
)
@ -19,6 +20,7 @@ from mozbuild.base import (
from mach.decorators import (
CommandArgument,
CommandArgumentGroup,
CommandProvider,
Command,
)
@ -35,316 +37,321 @@ def setup_awsy_argument_parser():
return parser
AWSY_PATH = os.path.dirname(os.path.realpath(__file__))
if AWSY_PATH not in sys.path:
sys.path.append(AWSY_PATH)
from awsy import ITERATIONS, PER_TAB_PAUSE, SETTLE_WAIT_TIME, MAX_TABS
@CommandProvider
class MachCommands(MachCommandBase):
AWSY_PATH = os.path.dirname(os.path.realpath(__file__))
if AWSY_PATH not in sys.path:
sys.path.append(AWSY_PATH)
from awsy import ITERATIONS, PER_TAB_PAUSE, SETTLE_WAIT_TIME, MAX_TABS
def run_awsy(self, command_context, tests, binary=None, **kwargs):
import json
from mozlog.structured import commandline
def run_awsy(command_context, tests, binary=None, **kwargs):
import json
from mozlog.structured import commandline
from marionette_harness.runtests import MarionetteTestRunner, MarionetteHarness
from marionette_harness.runtests import MarionetteTestRunner, MarionetteHarness
parser = setup_awsy_argument_parser()
parser = setup_awsy_argument_parser()
awsy_source_dir = os.path.join(command_context.topsrcdir, "testing", "awsy")
if not tests:
tests = [os.path.join(awsy_source_dir, "awsy", "test_memory_usage.py")]
awsy_source_dir = os.path.join(command_context.topsrcdir, "testing", "awsy")
if not tests:
tests = [os.path.join(awsy_source_dir, "awsy", "test_memory_usage.py")]
args = argparse.Namespace(tests=tests)
args = argparse.Namespace(tests=tests)
args.binary = binary
args.binary = binary
if kwargs["quick"]:
kwargs["entities"] = 3
kwargs["iterations"] = 1
kwargs["perTabPause"] = 1
kwargs["settleWaitTime"] = 1
if kwargs["quick"]:
kwargs["entities"] = 3
kwargs["iterations"] = 1
kwargs["perTabPause"] = 1
kwargs["settleWaitTime"] = 1
if "single_stylo_traversal" in kwargs and kwargs["single_stylo_traversal"]:
os.environ["STYLO_THREADS"] = "1"
else:
os.environ["STYLO_THREADS"] = "4"
if "single_stylo_traversal" in kwargs and kwargs["single_stylo_traversal"]:
os.environ["STYLO_THREADS"] = "1"
else:
os.environ["STYLO_THREADS"] = "4"
runtime_testvars = {}
for arg in (
"webRootDir",
"pageManifest",
"resultsDir",
"entities",
"iterations",
"perTabPause",
"settleWaitTime",
"maxTabs",
"dmd",
"tp6",
):
if arg in kwargs and kwargs[arg] is not None:
runtime_testvars[arg] = kwargs[arg]
runtime_testvars = {}
for arg in (
"webRootDir",
"pageManifest",
"resultsDir",
"entities",
"iterations",
"perTabPause",
"settleWaitTime",
"maxTabs",
"dmd",
"tp6",
):
if arg in kwargs and kwargs[arg] is not None:
runtime_testvars[arg] = kwargs[arg]
if "webRootDir" not in runtime_testvars:
awsy_tests_dir = os.path.join(command_context.topobjdir, "_tests", "awsy")
web_root_dir = os.path.join(awsy_tests_dir, "html")
runtime_testvars["webRootDir"] = web_root_dir
else:
web_root_dir = runtime_testvars["webRootDir"]
awsy_tests_dir = os.path.dirname(web_root_dir)
if "webRootDir" not in runtime_testvars:
awsy_tests_dir = os.path.join(command_context.topobjdir, "_tests", "awsy")
web_root_dir = os.path.join(awsy_tests_dir, "html")
runtime_testvars["webRootDir"] = web_root_dir
else:
web_root_dir = runtime_testvars["webRootDir"]
awsy_tests_dir = os.path.dirname(web_root_dir)
if "resultsDir" not in runtime_testvars:
runtime_testvars["resultsDir"] = os.path.join(awsy_tests_dir, "results")
if "resultsDir" not in runtime_testvars:
runtime_testvars["resultsDir"] = os.path.join(awsy_tests_dir, "results")
runtime_testvars["bin"] = binary
runtime_testvars["run_local"] = True
runtime_testvars["bin"] = binary
runtime_testvars["run_local"] = True
page_load_test_dir = os.path.join(web_root_dir, "page_load_test")
if not os.path.isdir(page_load_test_dir):
os.makedirs(page_load_test_dir)
page_load_test_dir = os.path.join(web_root_dir, "page_load_test")
if not os.path.isdir(page_load_test_dir):
os.makedirs(page_load_test_dir)
if not os.path.isdir(runtime_testvars["resultsDir"]):
os.makedirs(runtime_testvars["resultsDir"])
if not os.path.isdir(runtime_testvars["resultsDir"]):
os.makedirs(runtime_testvars["resultsDir"])
runtime_testvars_path = os.path.join(awsy_tests_dir, "runtime-testvars.json")
if kwargs["testvars"]:
kwargs["testvars"].append(runtime_testvars_path)
else:
kwargs["testvars"] = [runtime_testvars_path]
runtime_testvars_path = os.path.join(awsy_tests_dir, "runtime-testvars.json")
if kwargs["testvars"]:
kwargs["testvars"].append(runtime_testvars_path)
else:
kwargs["testvars"] = [runtime_testvars_path]
runtime_testvars_file = open(runtime_testvars_path, "wb" if six.PY2 else "w")
runtime_testvars_file.write(json.dumps(runtime_testvars, indent=2))
runtime_testvars_file.close()
runtime_testvars_file = open(runtime_testvars_path, "wb" if six.PY2 else "w")
runtime_testvars_file.write(json.dumps(runtime_testvars, indent=2))
runtime_testvars_file.close()
manifest_file = os.path.join(awsy_source_dir, "tp5n-pageset.manifest")
tooltool_args = {
"args": [
sys.executable,
os.path.join(command_context.topsrcdir, "mach"),
"artifact",
"toolchain",
"-v",
"--tooltool-manifest=%s" % manifest_file,
"--cache-dir=%s"
% os.path.join(command_context.topsrcdir, "tooltool-cache"),
]
}
command_context.run_process(cwd=page_load_test_dir, **tooltool_args)
tp5nzip = os.path.join(page_load_test_dir, "tp5n.zip")
tp5nmanifest = os.path.join(page_load_test_dir, "tp5n", "tp5n.manifest")
if not os.path.exists(tp5nmanifest):
unzip_args = {
"args": ["unzip", "-q", "-o", tp5nzip, "-d", page_load_test_dir]
}
try:
command_context.run_process(**unzip_args)
except Exception as exc:
troubleshoot = ""
if mozinfo.os == "win":
troubleshoot = (
" Try using --web-root to specify a "
"directory closer to the drive root."
)
manifest_file = os.path.join(awsy_source_dir, "tp5n-pageset.manifest")
tooltool_args = {
"args": [
sys.executable,
os.path.join(command_context.topsrcdir, "mach"),
"artifact",
"toolchain",
"-v",
"--tooltool-manifest=%s" % manifest_file,
"--cache-dir=%s"
% os.path.join(command_context.topsrcdir, "tooltool-cache"),
]
}
command_context.run_process(cwd=page_load_test_dir, **tooltool_args)
tp5nzip = os.path.join(page_load_test_dir, "tp5n.zip")
tp5nmanifest = os.path.join(page_load_test_dir, "tp5n", "tp5n.manifest")
if not os.path.exists(tp5nmanifest):
unzip_args = {"args": ["unzip", "-q", "-o", tp5nzip, "-d", page_load_test_dir]}
try:
command_context.run_process(**unzip_args)
except Exception as exc:
troubleshoot = ""
if mozinfo.os == "win":
troubleshoot = (
" Try using --web-root to specify a "
"directory closer to the drive root."
command_context.log(
logging.ERROR,
"awsy",
{"directory": page_load_test_dir, "exception": exc},
"Failed to unzip `tp5n.zip` into "
"`{directory}` with `{exception}`." + troubleshoot,
)
raise exc
command_context.log(
logging.ERROR,
"awsy",
{"directory": page_load_test_dir, "exception": exc},
"Failed to unzip `tp5n.zip` into "
"`{directory}` with `{exception}`." + troubleshoot,
)
raise exc
# If '--preferences' was not specified supply our default set.
if not kwargs["prefs_files"]:
kwargs["prefs_files"] = [
os.path.join(awsy_source_dir, "conf", "prefs.json")
]
# If '--preferences' was not specified supply our default set.
if not kwargs["prefs_files"]:
kwargs["prefs_files"] = [os.path.join(awsy_source_dir, "conf", "prefs.json")]
# Setup DMD env vars if necessary.
if kwargs["dmd"]:
bin_dir = os.path.dirname(binary)
# Setup DMD env vars if necessary.
if kwargs["dmd"]:
bin_dir = os.path.dirname(binary)
if "DMD" not in os.environ:
os.environ["DMD"] = "1"
if "DMD" not in os.environ:
os.environ["DMD"] = "1"
# Work around a startup crash with DMD on windows
if mozinfo.os == "win":
kwargs["pref"] = "security.sandbox.content.level:0"
command_context.log(
logging.WARNING,
"awsy",
{},
"Forcing 'security.sandbox.content.level' = 0 because DMD is enabled.",
)
elif mozinfo.os == "mac":
# On mac binary is in MacOS and dmd.py is in Resources, ie:
# Name.app/Contents/MacOS/libdmd.dylib
# Name.app/Contents/Resources/dmd.py
bin_dir = os.path.join(bin_dir, "../Resources/")
# Work around a startup crash with DMD on windows
if mozinfo.os == "win":
kwargs["pref"] = "security.sandbox.content.level:0"
command_context.log(
logging.WARNING,
"awsy",
{},
"Forcing 'security.sandbox.content.level' = 0 because DMD is enabled.",
)
elif mozinfo.os == "mac":
# On mac binary is in MacOS and dmd.py is in Resources, ie:
# Name.app/Contents/MacOS/libdmd.dylib
# Name.app/Contents/Resources/dmd.py
bin_dir = os.path.join(bin_dir, "../Resources/")
# Also add the bin dir to the python path so we can use dmd.py
if bin_dir not in sys.path:
sys.path.append(bin_dir)
# Also add the bin dir to the python path so we can use dmd.py
if bin_dir not in sys.path:
sys.path.append(bin_dir)
for k, v in six.iteritems(kwargs):
setattr(args, k, v)
for k, v in six.iteritems(kwargs):
setattr(args, k, v)
parser.verify_usage(args)
parser.verify_usage(args)
args.logger = commandline.setup_logging(
"Are We Slim Yet Tests", args, {"mach": sys.stdout}
)
failed = MarionetteHarness(MarionetteTestRunner, args=vars(args)).run()
if failed > 0:
return 1
else:
return 0
@Command(
"awsy-test",
category="testing",
description="Run Are We Slim Yet (AWSY) memory usage testing using marionette.",
parser=setup_awsy_argument_parser,
)
@CommandArgumentGroup("AWSY")
@CommandArgument(
"--web-root",
group="AWSY",
action="store",
type=str,
dest="webRootDir",
help="Path to web server root directory. If not specified, "
"defaults to topobjdir/_tests/awsy/html.",
)
@CommandArgument(
"--page-manifest",
group="AWSY",
action="store",
type=str,
dest="pageManifest",
help="Path to page manifest text file containing a list "
"of urls to test. The urls must be served from localhost. If not "
"specified, defaults to page_load_test/tp5n/tp5n.manifest under "
"the web root.",
)
@CommandArgument(
"--results",
group="AWSY",
action="store",
type=str,
dest="resultsDir",
help="Path to results directory. If not specified, defaults "
"to the parent directory of the web root.",
)
@CommandArgument(
"--quick",
group="AWSY",
action="store_true",
dest="quick",
default=False,
help="Set --entities=3, --iterations=1, --per-tab-pause=1, "
"--settle-wait-time=1 for a quick test. Overrides any explicit "
"argument settings.",
)
@CommandArgument(
"--entities",
group="AWSY",
action="store",
type=int,
dest="entities",
help="Number of urls to load. Defaults to the total number of urls.",
)
@CommandArgument(
"--max-tabs",
group="AWSY",
action="store",
type=int,
dest="maxTabs",
help="Maximum number of tabs to open. Defaults to %s." % MAX_TABS,
)
@CommandArgument(
"--iterations",
group="AWSY",
action="store",
type=int,
dest="iterations",
help="Number of times to run through the test suite. "
"Defaults to %s." % ITERATIONS,
)
@CommandArgument(
"--per-tab-pause",
group="AWSY",
action="store",
type=int,
dest="perTabPause",
help="Seconds to wait in between opening tabs. Defaults to %s." % PER_TAB_PAUSE,
)
@CommandArgument(
"--settle-wait-time",
group="AWSY",
action="store",
type=int,
dest="settleWaitTime",
help="Seconds to wait for things to settled down. "
"Defaults to %s." % SETTLE_WAIT_TIME,
)
@CommandArgument(
"--dmd",
group="AWSY",
action="store_true",
dest="dmd",
default=False,
help="Enable DMD during testing. Requires a DMD-enabled build.",
)
@CommandArgument(
"--tp6",
group="AWSY",
action="store_true",
dest="tp6",
default=False,
help="Use the tp6 pageset during testing.",
)
def run_awsy_test(command_context, tests, **kwargs):
"""mach awsy-test runs the in-tree version of the Are We Slim Yet
(AWSY) tests.
awsy-test is implemented as a marionette test and marionette
test arguments also apply although they are not necessary
since reasonable defaults will be chosen.
The AWSY specific arguments can be found in the Command
Arguments for AWSY section below.
awsy-test will automatically download the tp5n.zip talos
pageset from tooltool and install it under
topobjdir/_tests/awsy/html. You can specify your own page set
by specifying --web-root and --page-manifest.
The results of the test will be placed in the results
directory specified by the --results argument.
On Windows, you may experience problems due to path length
errors when extracting the tp5n.zip file containing the
test pages or when attempting to write checkpoints to the
results directory. In that case, you should specify both
the --web-root and --results arguments pointing to a location
with a short path. For example:
--web-root=c:\\\\tmp\\\\html --results=c:\\\\tmp\\\\results
Note that the double backslashes are required.
"""
kwargs["logger_name"] = "Awsy Tests"
if "test_objects" in kwargs:
tests = []
for obj in kwargs["test_objects"]:
tests.append(obj["file_relpath"])
del kwargs["test_objects"]
if not kwargs.get("binary") and conditions.is_firefox(command_context):
try:
kwargs["binary"] = command_context.get_binary_path("app")
except BinaryNotFoundException as e:
command_context.log(
logging.ERROR, "awsy", {"error": str(e)}, "ERROR: {error}"
)
command_context.log(logging.INFO, "awsy", {"help": e.help()}, "{help}")
args.logger = commandline.setup_logging(
"Are We Slim Yet Tests", args, {"mach": sys.stdout}
)
failed = MarionetteHarness(MarionetteTestRunner, args=vars(args)).run()
if failed > 0:
return 1
return run_awsy(command_context, tests, **kwargs)
else:
return 0
@Command(
"awsy-test",
category="testing",
description="Run Are We Slim Yet (AWSY) memory usage testing using marionette.",
parser=setup_awsy_argument_parser,
)
@CommandArgumentGroup("AWSY")
@CommandArgument(
"--web-root",
group="AWSY",
action="store",
type=str,
dest="webRootDir",
help="Path to web server root directory. If not specified, "
"defaults to topobjdir/_tests/awsy/html.",
)
@CommandArgument(
"--page-manifest",
group="AWSY",
action="store",
type=str,
dest="pageManifest",
help="Path to page manifest text file containing a list "
"of urls to test. The urls must be served from localhost. If not "
"specified, defaults to page_load_test/tp5n/tp5n.manifest under "
"the web root.",
)
@CommandArgument(
"--results",
group="AWSY",
action="store",
type=str,
dest="resultsDir",
help="Path to results directory. If not specified, defaults "
"to the parent directory of the web root.",
)
@CommandArgument(
"--quick",
group="AWSY",
action="store_true",
dest="quick",
default=False,
help="Set --entities=3, --iterations=1, --per-tab-pause=1, "
"--settle-wait-time=1 for a quick test. Overrides any explicit "
"argument settings.",
)
@CommandArgument(
"--entities",
group="AWSY",
action="store",
type=int,
dest="entities",
help="Number of urls to load. Defaults to the total number of " "urls.",
)
@CommandArgument(
"--max-tabs",
group="AWSY",
action="store",
type=int,
dest="maxTabs",
help="Maximum number of tabs to open. " "Defaults to %s." % MAX_TABS,
)
@CommandArgument(
"--iterations",
group="AWSY",
action="store",
type=int,
dest="iterations",
help="Number of times to run through the test suite. "
"Defaults to %s." % ITERATIONS,
)
@CommandArgument(
"--per-tab-pause",
group="AWSY",
action="store",
type=int,
dest="perTabPause",
help="Seconds to wait in between opening tabs. "
"Defaults to %s." % PER_TAB_PAUSE,
)
@CommandArgument(
"--settle-wait-time",
group="AWSY",
action="store",
type=int,
dest="settleWaitTime",
help="Seconds to wait for things to settled down. "
"Defaults to %s." % SETTLE_WAIT_TIME,
)
@CommandArgument(
"--dmd",
group="AWSY",
action="store_true",
dest="dmd",
default=False,
help="Enable DMD during testing. Requires a DMD-enabled build.",
)
@CommandArgument(
"--tp6",
group="AWSY",
action="store_true",
dest="tp6",
default=False,
help="Use the tp6 pageset during testing.",
)
def run_awsy_test(self, command_context, tests, **kwargs):
"""mach awsy-test runs the in-tree version of the Are We Slim Yet
(AWSY) tests.
awsy-test is implemented as a marionette test and marionette
test arguments also apply although they are not necessary
since reasonable defaults will be chosen.
The AWSY specific arguments can be found in the Command
Arguments for AWSY section below.
awsy-test will automatically download the tp5n.zip talos
pageset from tooltool and install it under
topobjdir/_tests/awsy/html. You can specify your own page set
by specifying --web-root and --page-manifest.
The results of the test will be placed in the results
directory specified by the --results argument.
On Windows, you may experience problems due to path length
errors when extracting the tp5n.zip file containing the
test pages or when attempting to write checkpoints to the
results directory. In that case, you should specify both
the --web-root and --results arguments pointing to a location
with a short path. For example:
--web-root=c:\\\\tmp\\\\html --results=c:\\\\tmp\\\\results
Note that the double backslashes are required.
"""
kwargs["logger_name"] = "Awsy Tests"
if "test_objects" in kwargs:
tests = []
for obj in kwargs["test_objects"]:
tests.append(obj["file_relpath"])
del kwargs["test_objects"]
if not kwargs.get("binary") and conditions.is_firefox(command_context):
try:
kwargs["binary"] = command_context.get_binary_path("app")
except BinaryNotFoundException as e:
command_context.log(
logging.ERROR, "awsy", {"error": str(e)}, "ERROR: {error}"
)
command_context.log(logging.INFO, "awsy", {"help": e.help()}, "{help}")
return 1
return self.run_awsy(command_context, tests, **kwargs)

View File

@ -7,106 +7,107 @@ import sys
import os
import tempfile
from mach.decorators import CommandArgument, Command
from mozbuild.base import BinaryNotFoundException
from mach.decorators import CommandArgument, CommandProvider, Command
from mozbuild.base import MachCommandBase, BinaryNotFoundException
requirements = os.path.join(os.path.dirname(__file__), "requirements", "base.txt")
def _init(command_context):
command_context.activate_virtualenv()
command_context.virtualenv_manager.install_pip_requirements(
requirements, require_hashes=False
@CommandProvider
class CondprofileCommandProvider(MachCommandBase):
def _init(self, command_context):
command_context.activate_virtualenv()
command_context.virtualenv_manager.install_pip_requirements(
requirements, require_hashes=False
)
@Command("fetch-condprofile", category="testing")
@CommandArgument("--target-dir", default=None, help="Target directory")
@CommandArgument("--platform", default=None, help="Platform")
@CommandArgument("--scenario", default="full", help="Scenario") # grab choices
@CommandArgument("--customization", default="default", help="Customization") # same
@CommandArgument("--task-id", default=None, help="Task ID")
@CommandArgument("--download-cache", action="store_true", default=True)
@CommandArgument(
"--repo",
default="mozilla-central",
choices=["mozilla-central", "try"],
help="Repository",
)
def fetch(
self,
command_context,
target_dir,
platform,
scenario,
customization,
task_id,
download_cache,
repo,
):
self._init(command_context)
from condprof.client import get_profile
from condprof.util import get_current_platform
if platform is None:
platform = get_current_platform()
@Command("fetch-condprofile", category="testing")
@CommandArgument("--target-dir", default=None, help="Target directory")
@CommandArgument("--platform", default=None, help="Platform")
@CommandArgument("--scenario", default="full", help="Scenario") # grab choices
@CommandArgument("--customization", default="default", help="Customization") # same
@CommandArgument("--task-id", default=None, help="Task ID")
@CommandArgument("--download-cache", action="store_true", default=True)
@CommandArgument(
"--repo",
default="mozilla-central",
choices=["mozilla-central", "try"],
help="Repository",
)
def fetch(
command_context,
target_dir,
platform,
scenario,
customization,
task_id,
download_cache,
repo,
):
_init(command_context)
from condprof.client import get_profile
from condprof.util import get_current_platform
if target_dir is None:
target_dir = tempfile.mkdtemp()
if platform is None:
platform = get_current_platform()
get_profile(
target_dir, platform, scenario, customization, task_id, download_cache, repo
)
if target_dir is None:
target_dir = tempfile.mkdtemp()
get_profile(
target_dir, platform, scenario, customization, task_id, download_cache, repo
@Command("run-condprofile", category="testing")
@CommandArgument("archive", help="Archives Dir", type=str, default=None)
@CommandArgument("--firefox", help="Firefox Binary", type=str, default=None)
@CommandArgument("--scenario", help="Scenario to use", type=str, default="all")
@CommandArgument("--profile", help="Existing profile Dir", type=str, default=None)
@CommandArgument(
"--customization", help="Profile customization to use", type=str, default="all"
)
@CommandArgument(
"--visible", help="Don't use headless mode", action="store_true", default=False
)
@CommandArgument(
"--archives-dir", help="Archives local dir", type=str, default="/tmp/archives"
)
@CommandArgument(
"--force-new", help="Create from scratch", action="store_true", default=False
)
@CommandArgument(
"--strict",
help="Errors out immediatly on a scenario failure",
action="store_true",
default=True,
)
@CommandArgument(
"--geckodriver",
help="Path to the geckodriver binary",
type=str,
default=sys.platform.startswith("win") and "geckodriver.exe" or "geckodriver",
)
@CommandArgument("--device-name", help="Name of the device", type=str, default=None)
def run(self, command_context, **kw):
os.environ["MANUAL_MACH_RUN"] = "1"
self._init(command_context)
if kw["firefox"] is None:
try:
kw["firefox"] = command_context.get_binary_path()
except BinaryNotFoundException as e:
command_context.log(
logging.ERROR,
"run-condprofile",
{"error": str(e)},
"ERROR: {error}",
)
command_context.log(
logging.INFO, "run-condprofile", {"help": e.help()}, "{help}"
)
return 1
@Command("run-condprofile", category="testing")
@CommandArgument("archive", help="Archives Dir", type=str, default=None)
@CommandArgument("--firefox", help="Firefox Binary", type=str, default=None)
@CommandArgument("--scenario", help="Scenario to use", type=str, default="all")
@CommandArgument("--profile", help="Existing profile Dir", type=str, default=None)
@CommandArgument(
"--customization", help="Profile customization to use", type=str, default="all"
)
@CommandArgument(
"--visible", help="Don't use headless mode", action="store_true", default=False
)
@CommandArgument(
"--archives-dir", help="Archives local dir", type=str, default="/tmp/archives"
)
@CommandArgument(
"--force-new", help="Create from scratch", action="store_true", default=False
)
@CommandArgument(
"--strict",
help="Errors out immediatly on a scenario failure",
action="store_true",
default=True,
)
@CommandArgument(
"--geckodriver",
help="Path to the geckodriver binary",
type=str,
default=sys.platform.startswith("win") and "geckodriver.exe" or "geckodriver",
)
@CommandArgument("--device-name", help="Name of the device", type=str, default=None)
def run(command_context, **kw):
os.environ["MANUAL_MACH_RUN"] = "1"
_init(command_context)
from condprof.runner import run
if kw["firefox"] is None:
try:
kw["firefox"] = command_context.get_binary_path()
except BinaryNotFoundException as e:
command_context.log(
logging.ERROR,
"run-condprofile",
{"error": str(e)},
"ERROR: {error}",
)
command_context.log(
logging.INFO, "run-condprofile", {"help": e.help()}, "{help}"
)
return 1
from condprof.runner import run
run(**kw)
run(**kw)

View File

@ -10,12 +10,14 @@ import os
import sys
from mozbuild.base import (
MachCommandBase,
MachCommandConditions as conditions,
BinaryNotFoundException,
)
from mach.decorators import (
Command,
CommandProvider,
)
@ -85,28 +87,32 @@ def run_firefox_ui_test(testtype=None, topsrcdir=None, **kwargs):
return 0
@Command(
"firefox-ui-functional",
category="testing",
conditions=[conditions.is_firefox],
description="Run the functional test suite of Firefox UI tests.",
parser=setup_argument_parser_functional,
)
def run_firefox_ui_functional(command_context, **kwargs):
try:
kwargs["binary"] = kwargs["binary"] or command_context.get_binary_path("app")
except BinaryNotFoundException as e:
command_context.log(
logging.ERROR,
"firefox-ui-functional",
{"error": str(e)},
"ERROR: {error}",
)
command_context.log(
logging.INFO, "firefox-ui-functional", {"help": e.help()}, "{help}"
)
return 1
return run_firefox_ui_test(
testtype="functional", topsrcdir=command_context.topsrcdir, **kwargs
@CommandProvider
class MachCommands(MachCommandBase):
@Command(
"firefox-ui-functional",
category="testing",
conditions=[conditions.is_firefox],
description="Run the functional test suite of Firefox UI tests.",
parser=setup_argument_parser_functional,
)
def run_firefox_ui_functional(self, command_context, **kwargs):
try:
kwargs["binary"] = kwargs["binary"] or command_context.get_binary_path(
"app"
)
except BinaryNotFoundException as e:
command_context.log(
logging.ERROR,
"firefox-ui-functional",
{"error": str(e)},
"ERROR: {error}",
)
command_context.log(
logging.INFO, "firefox-ui-functional", {"help": e.help()}, "{help}"
)
return 1
return run_firefox_ui_test(
testtype="functional", topsrcdir=command_context.topsrcdir, **kwargs
)

View File

@ -11,119 +11,123 @@ from mach.decorators import (
Command,
CommandArgument,
CommandArgumentGroup,
CommandProvider,
)
from mozbuild.base import BinaryNotFoundException
from mozbuild.base import MachCommandBase, BinaryNotFoundException
@Command(
"geckodriver",
category="post-build",
description="Run the WebDriver implementation for Gecko.",
)
@CommandArgument(
"--binary", type=str, help="Firefox binary (defaults to the local build)."
)
@CommandArgument(
"params", nargs="...", help="Flags to be passed through to geckodriver."
)
@CommandArgumentGroup("debugging")
@CommandArgument(
"--debug",
action="store_true",
group="debugging",
help="Enable the debugger. Not specifying a --debugger "
"option will result in the default debugger "
"being used.",
)
@CommandArgument(
"--debugger",
default=None,
type=str,
group="debugging",
help="Name of debugger to use.",
)
@CommandArgument(
"--debugger-args",
default=None,
metavar="params",
type=str,
group="debugging",
help="Flags to pass to the debugger itself; split as the Bourne shell would.",
)
def run(command_context, binary, params, debug, debugger, debugger_args):
try:
binpath = command_context.get_binary_path("geckodriver")
except BinaryNotFoundException as e:
command_context.log(
logging.ERROR, "geckodriver", {"error": str(e)}, "ERROR: {error}"
)
command_context.log(
logging.INFO,
"geckodriver",
{},
"It looks like geckodriver isn't built. "
"Add ac_add_options --enable-geckodriver to your "
"mozconfig "
"and run |./mach build| to build it.",
)
return 1
args = [binpath]
if params:
args.extend(params)
if binary is None:
@CommandProvider
class GeckoDriver(MachCommandBase):
@Command(
"geckodriver",
category="post-build",
description="Run the WebDriver implementation for Gecko.",
)
@CommandArgument(
"--binary", type=str, help="Firefox binary (defaults to the local build)."
)
@CommandArgument(
"params", nargs="...", help="Flags to be passed through to geckodriver."
)
@CommandArgumentGroup("debugging")
@CommandArgument(
"--debug",
action="store_true",
group="debugging",
help="Enable the debugger. Not specifying a --debugger "
"option will result in the default debugger "
"being used.",
)
@CommandArgument(
"--debugger",
default=None,
type=str,
group="debugging",
help="Name of debugger to use.",
)
@CommandArgument(
"--debugger-args",
default=None,
metavar="params",
type=str,
group="debugging",
help="Flags to pass to the debugger itself; "
"split as the Bourne shell would.",
)
def run(self, command_context, binary, params, debug, debugger, debugger_args):
try:
binary = command_context.get_binary_path("app")
binpath = command_context.get_binary_path("geckodriver")
except BinaryNotFoundException as e:
command_context.log(
logging.ERROR, "geckodriver", {"error": str(e)}, "ERROR: {error}"
)
command_context.log(
logging.INFO, "geckodriver", {"help": e.help()}, "{help}"
logging.INFO,
"geckodriver",
{},
"It looks like geckodriver isn't built. "
"Add ac_add_options --enable-geckodriver to your "
"mozconfig "
"and run |./mach build| to build it.",
)
return 1
args.extend(["--binary", binary])
args = [binpath]
if debug or debugger or debugger_args:
if "INSIDE_EMACS" in os.environ:
command_context.log_manager.terminal_handler.setLevel(logging.WARNING)
import mozdebug
if not debugger:
# No debugger name was provided. Look for the default ones on
# current OS.
debugger = mozdebug.get_default_debugger_name(
mozdebug.DebuggerSearch.KeepLooking
)
if debugger:
debuggerInfo = mozdebug.get_debugger_info(debugger, debugger_args)
if not debuggerInfo:
print("Could not find a suitable debugger in your PATH.")
return 1
# Parameters come from the CLI. We need to convert them before
# their use.
if debugger_args:
from mozbuild import shellutil
if params:
args.extend(params)
if binary is None:
try:
debugger_args = shellutil.split(debugger_args)
except shellutil.MetaCharacterException as e:
print(
"The --debugger-args you passed require a real shell to parse them."
binary = command_context.get_binary_path("app")
except BinaryNotFoundException as e:
command_context.log(
logging.ERROR, "geckodriver", {"error": str(e)}, "ERROR: {error}"
)
command_context.log(
logging.INFO, "geckodriver", {"help": e.help()}, "{help}"
)
print("(We can't handle the %r character.)" % e.char)
return 1
# Prepend the debugger args.
args = [debuggerInfo.path] + debuggerInfo.args + args
args.extend(["--binary", binary])
return command_context.run_process(
args=args, ensure_exit_code=False, pass_thru=True
)
if debug or debugger or debugger_args:
if "INSIDE_EMACS" in os.environ:
command_context.log_manager.terminal_handler.setLevel(logging.WARNING)
import mozdebug
if not debugger:
# No debugger name was provided. Look for the default ones on
# current OS.
debugger = mozdebug.get_default_debugger_name(
mozdebug.DebuggerSearch.KeepLooking
)
if debugger:
debuggerInfo = mozdebug.get_debugger_info(debugger, debugger_args)
if not debuggerInfo:
print("Could not find a suitable debugger in your PATH.")
return 1
# Parameters come from the CLI. We need to convert them before
# their use.
if debugger_args:
from mozbuild import shellutil
try:
debugger_args = shellutil.split(debugger_args)
except shellutil.MetaCharacterException as e:
print(
"The --debugger-args you passed require a real shell to parse them."
)
print("(We can't handle the %r character.)" % e.char)
return 1
# Prepend the debugger args.
args = [debuggerInfo.path] + debuggerInfo.args + args
return command_context.run_process(
args=args, ensure_exit_code=False, pass_thru=True
)

View File

@ -9,8 +9,10 @@ import sys
from argparse import Namespace
from mach.decorators import (
CommandProvider,
Command,
)
from mozbuild.base import MachCommandBase
here = os.path.abspath(os.path.dirname(__file__))
parser = None
@ -117,13 +119,15 @@ def setup_argument_parser():
return parser
@Command(
"gtest",
category="testing",
description="Run the gtest harness.",
parser=setup_argument_parser,
)
def gtest(command_context, **kwargs):
command_context._mach_context.activate_mozharness_venv()
result = run_gtest(command_context._mach_context, **kwargs)
return 0 if result else 1
@CommandProvider
class GtestCommands(MachCommandBase):
@Command(
"gtest",
category="testing",
description="Run the gtest harness.",
parser=setup_argument_parser,
)
def gtest(self, command_context, **kwargs):
command_context._mach_context.activate_mozharness_venv()
result = run_gtest(command_context._mach_context, **kwargs)
return 0 if result else 1

File diff suppressed because it is too large Load Diff

View File

@ -13,10 +13,12 @@ import sys
from six import iteritems
from mach.decorators import (
CommandProvider,
Command,
)
from mozbuild.base import (
MachCommandBase,
MachCommandConditions as conditions,
BinaryNotFoundException,
)
@ -61,52 +63,54 @@ def run_marionette(tests, binary=None, topsrcdir=None, **kwargs):
return 0
@Command(
"marionette-test",
category="testing",
description="Remote control protocol to Gecko, used for browser automation.",
conditions=[functools.partial(conditions.is_buildapp_in, apps=SUPPORTED_APPS)],
parser=create_parser_tests,
)
def marionette_test(command_context, tests, **kwargs):
if "test_objects" in kwargs:
tests = []
for obj in kwargs["test_objects"]:
tests.append(obj["file_relpath"])
del kwargs["test_objects"]
@CommandProvider
class MarionetteTest(MachCommandBase):
@Command(
"marionette-test",
category="testing",
description="Remote control protocol to Gecko, used for browser automation.",
conditions=[functools.partial(conditions.is_buildapp_in, apps=SUPPORTED_APPS)],
parser=create_parser_tests,
)
def marionette_test(self, command_context, tests, **kwargs):
if "test_objects" in kwargs:
tests = []
for obj in kwargs["test_objects"]:
tests.append(obj["file_relpath"])
del kwargs["test_objects"]
if not tests:
if conditions.is_thunderbird(command_context):
tests = [
os.path.join(
command_context.topsrcdir,
"comm/testing/marionette/unit-tests.ini",
if not tests:
if conditions.is_thunderbird(command_context):
tests = [
os.path.join(
command_context.topsrcdir,
"comm/testing/marionette/unit-tests.ini",
)
]
else:
tests = [
os.path.join(
command_context.topsrcdir,
"testing/marionette/harness/marionette_harness/tests/unit-tests.ini",
)
]
if not kwargs.get("binary") and (
conditions.is_firefox(command_context)
or conditions.is_thunderbird(command_context)
):
try:
kwargs["binary"] = command_context.get_binary_path("app")
except BinaryNotFoundException as e:
command_context.log(
logging.ERROR,
"marionette-test",
{"error": str(e)},
"ERROR: {error}",
)
]
else:
tests = [
os.path.join(
command_context.topsrcdir,
"testing/marionette/harness/marionette_harness/tests/unit-tests.ini",
command_context.log(
logging.INFO, "marionette-test", {"help": e.help()}, "{help}"
)
]
return 1
if not kwargs.get("binary") and (
conditions.is_firefox(command_context)
or conditions.is_thunderbird(command_context)
):
try:
kwargs["binary"] = command_context.get_binary_path("app")
except BinaryNotFoundException as e:
command_context.log(
logging.ERROR,
"marionette-test",
{"error": str(e)},
"ERROR: {error}",
)
command_context.log(
logging.INFO, "marionette-test", {"help": e.help()}, "{help}"
)
return 1
return run_marionette(tests, topsrcdir=command_context.topsrcdir, **kwargs)
return run_marionette(tests, topsrcdir=command_context.topsrcdir, **kwargs)

View File

@ -11,8 +11,10 @@ import sys
from functools import partial
from mach.decorators import (
CommandProvider,
Command,
)
from mozbuild.base import MachCommandBase
parser = None
@ -59,13 +61,15 @@ def setup_marionette_argument_parser():
return parser
@Command(
"marionette-test",
category="testing",
description="Run a Marionette test (Check UI or the internal JavaScript "
"using marionette).",
parser=setup_marionette_argument_parser,
)
def run_marionette_test(command_context, **kwargs):
command_context.context.activate_mozharness_venv()
return run_marionette(command_context.context, **kwargs)
@CommandProvider
class MachCommands(MachCommandBase):
@Command(
"marionette-test",
category="testing",
description="Run a Marionette test (Check UI or the internal JavaScript "
"using marionette).",
parser=setup_marionette_argument_parser,
)
def run_marionette_test(self, command_context, **kwargs):
command_context.context.activate_mozharness_venv()
return run_marionette(command_context.context, **kwargs)

View File

@ -14,12 +14,14 @@ import sys
import warnings
from mozbuild.base import (
MachCommandBase,
MachCommandConditions as conditions,
MozbuildObject,
)
from mach.decorators import (
CommandArgument,
CommandProvider,
Command,
)
@ -295,267 +297,284 @@ def verify_host_bin():
return 0
@Command(
"mochitest",
category="testing",
conditions=[functools.partial(conditions.is_buildapp_in, apps=SUPPORTED_APPS)],
description="Run any flavor of mochitest (integration test).",
parser=setup_argument_parser,
)
def run_mochitest_general(
command_context, flavor=None, test_objects=None, resolve_tests=True, **kwargs
):
from mochitest_options import ALL_FLAVORS
from mozlog.commandline import setup_logging
from mozlog.handlers import StreamHandler
from moztest.resolve import get_suite_definition
@CommandProvider
class MachCommands(MachCommandBase):
@Command(
"mochitest",
category="testing",
conditions=[functools.partial(conditions.is_buildapp_in, apps=SUPPORTED_APPS)],
description="Run any flavor of mochitest (integration test).",
parser=setup_argument_parser,
)
def run_mochitest_general(
self,
command_context,
flavor=None,
test_objects=None,
resolve_tests=True,
**kwargs
):
from mochitest_options import ALL_FLAVORS
from mozlog.commandline import setup_logging
from mozlog.handlers import StreamHandler
from moztest.resolve import get_suite_definition
# TODO: This is only strictly necessary while mochitest is using Python
# 2 and can be removed once the command is migrated to Python 3.
command_context.activate_virtualenv()
# TODO: This is only strictly necessary while mochitest is using Python
# 2 and can be removed once the command is migrated to Python 3.
command_context.activate_virtualenv()
buildapp = None
for app in SUPPORTED_APPS:
if conditions.is_buildapp_in(command_context, apps=[app]):
buildapp = app
break
flavors = None
if flavor:
for fname, fobj in six.iteritems(ALL_FLAVORS):
if flavor in fobj["aliases"]:
if buildapp not in fobj["enabled_apps"]:
continue
flavors = [fname]
buildapp = None
for app in SUPPORTED_APPS:
if conditions.is_buildapp_in(command_context, apps=[app]):
buildapp = app
break
else:
flavors = [
f for f, v in six.iteritems(ALL_FLAVORS) if buildapp in v["enabled_apps"]
]
from mozbuild.controller.building import BuildDriver
flavors = None
if flavor:
for fname, fobj in six.iteritems(ALL_FLAVORS):
if flavor in fobj["aliases"]:
if buildapp not in fobj["enabled_apps"]:
continue
flavors = [fname]
break
else:
flavors = [
f
for f, v in six.iteritems(ALL_FLAVORS)
if buildapp in v["enabled_apps"]
]
command_context._ensure_state_subdir_exists(".")
from mozbuild.controller.building import BuildDriver
test_paths = kwargs["test_paths"]
kwargs["test_paths"] = []
command_context._ensure_state_subdir_exists(".")
if kwargs.get("debugger", None):
import mozdebug
test_paths = kwargs["test_paths"]
kwargs["test_paths"] = []
if not mozdebug.get_debugger_info(kwargs.get("debugger")):
sys.exit(1)
if kwargs.get("debugger", None):
import mozdebug
mochitest = command_context._spawn(MochitestRunner)
tests = []
if resolve_tests:
tests = mochitest.resolve_tests(
test_paths, test_objects, cwd=command_context._mach_context.cwd
)
if not mozdebug.get_debugger_info(kwargs.get("debugger")):
sys.exit(1)
if not kwargs.get("log"):
# Create shared logger
format_args = {"level": command_context._mach_context.settings["test"]["level"]}
if len(tests) == 1:
format_args["verbose"] = True
format_args["compact"] = False
mochitest = command_context._spawn(MochitestRunner)
tests = []
if resolve_tests:
tests = mochitest.resolve_tests(
test_paths, test_objects, cwd=command_context._mach_context.cwd
)
default_format = command_context._mach_context.settings["test"]["format"]
kwargs["log"] = setup_logging(
"mach-mochitest", kwargs, {default_format: sys.stdout}, format_args
)
for handler in kwargs["log"].handlers:
if isinstance(handler, StreamHandler):
handler.formatter.inner.summary_on_shutdown = True
if not kwargs.get("log"):
# Create shared logger
format_args = {
"level": command_context._mach_context.settings["test"]["level"]
}
if len(tests) == 1:
format_args["verbose"] = True
format_args["compact"] = False
driver = command_context._spawn(BuildDriver)
driver.install_tests()
default_format = command_context._mach_context.settings["test"]["format"]
kwargs["log"] = setup_logging(
"mach-mochitest", kwargs, {default_format: sys.stdout}, format_args
)
for handler in kwargs["log"].handlers:
if isinstance(handler, StreamHandler):
handler.formatter.inner.summary_on_shutdown = True
subsuite = kwargs.get("subsuite")
if subsuite == "default":
kwargs["subsuite"] = None
suites = defaultdict(list)
is_webrtc_tag_present = False
unsupported = set()
for test in tests:
# Check if we're running a webrtc test so we can enable webrtc
# specific test logic later if needed.
if "webrtc" in test.get("tags", ""):
is_webrtc_tag_present = True
# Filter out non-mochitests and unsupported flavors.
if test["flavor"] not in ALL_FLAVORS:
continue
key = (test["flavor"], test.get("subsuite", ""))
if test["flavor"] not in flavors:
unsupported.add(key)
continue
driver = command_context._spawn(BuildDriver)
driver.install_tests()
subsuite = kwargs.get("subsuite")
if subsuite == "default":
# "--subsuite default" means only run tests that don't have a subsuite
if test.get("subsuite"):
kwargs["subsuite"] = None
suites = defaultdict(list)
is_webrtc_tag_present = False
unsupported = set()
for test in tests:
# Check if we're running a webrtc test so we can enable webrtc
# specific test logic later if needed.
if "webrtc" in test.get("tags", ""):
is_webrtc_tag_present = True
# Filter out non-mochitests and unsupported flavors.
if test["flavor"] not in ALL_FLAVORS:
continue
key = (test["flavor"], test.get("subsuite", ""))
if test["flavor"] not in flavors:
unsupported.add(key)
continue
elif subsuite and test.get("subsuite", "") != subsuite:
unsupported.add(key)
continue
suites[key].append(test)
if subsuite == "default":
# "--subsuite default" means only run tests that don't have a subsuite
if test.get("subsuite"):
unsupported.add(key)
continue
elif subsuite and test.get("subsuite", "") != subsuite:
unsupported.add(key)
continue
# Only webrtc mochitests in the media suite need the websocketprocessbridge.
if ("mochitest", "media") in suites and is_webrtc_tag_present:
req = os.path.join(
"testing",
"tools",
"websocketprocessbridge",
"websocketprocessbridge_requirements_3.txt",
)
command_context.virtualenv_manager.activate()
command_context.virtualenv_manager.install_pip_requirements(
req, require_hashes=False
)
suites[key].append(test)
# sys.executable is used to start the websocketprocessbridge, though for some
# reason it doesn't get set when calling `activate_this.py` in the virtualenv.
sys.executable = command_context.virtualenv_manager.python_path
# This is a hack to introduce an option in mach to not send
# filtered tests to the mochitest harness. Mochitest harness will read
# the master manifest in that case.
if not resolve_tests:
for flavor in flavors:
key = (flavor, kwargs.get("subsuite"))
suites[key] = []
if not suites:
# Make it very clear why no tests were found
if not unsupported:
print(
TESTS_NOT_FOUND.format(
"\n".join(sorted(list(test_paths or test_objects)))
)
# Only webrtc mochitests in the media suite need the websocketprocessbridge.
if ("mochitest", "media") in suites and is_webrtc_tag_present:
req = os.path.join(
"testing",
"tools",
"websocketprocessbridge",
"websocketprocessbridge_requirements_3.txt",
)
command_context.virtualenv_manager.activate()
command_context.virtualenv_manager.install_pip_requirements(
req, require_hashes=False
)
# sys.executable is used to start the websocketprocessbridge, though for some
# reason it doesn't get set when calling `activate_this.py` in the virtualenv.
sys.executable = command_context.virtualenv_manager.python_path
# This is a hack to introduce an option in mach to not send
# filtered tests to the mochitest harness. Mochitest harness will read
# the master manifest in that case.
if not resolve_tests:
for flavor in flavors:
key = (flavor, kwargs.get("subsuite"))
suites[key] = []
if not suites:
# Make it very clear why no tests were found
if not unsupported:
print(
TESTS_NOT_FOUND.format(
"\n".join(sorted(list(test_paths or test_objects)))
)
)
return 1
msg = []
for f, s in unsupported:
fobj = ALL_FLAVORS[f]
apps = fobj["enabled_apps"]
name = fobj["aliases"][0]
if s:
name = "{} --subsuite {}".format(name, s)
if buildapp not in apps:
reason = "requires {}".format(" or ".join(apps))
else:
reason = "excluded by the command line"
msg.append(" mochitest -f {} ({})".format(name, reason))
print(SUPPORTED_TESTS_NOT_FOUND.format(buildapp, "\n".join(sorted(msg))))
return 1
msg = []
for f, s in unsupported:
fobj = ALL_FLAVORS[f]
apps = fobj["enabled_apps"]
name = fobj["aliases"][0]
if s:
name = "{} --subsuite {}".format(name, s)
if buildapp == "android":
from mozrunner.devices.android_device import (
verify_android_device,
InstallIntent,
)
if buildapp not in apps:
reason = "requires {}".format(" or ".join(apps))
else:
reason = "excluded by the command line"
msg.append(" mochitest -f {} ({})".format(name, reason))
print(SUPPORTED_TESTS_NOT_FOUND.format(buildapp, "\n".join(sorted(msg))))
return 1
app = kwargs.get("app")
if not app:
app = "org.mozilla.geckoview.test"
device_serial = kwargs.get("deviceSerial")
install = (
InstallIntent.NO if kwargs.get("no_install") else InstallIntent.YES
)
# verify installation
verify_android_device(
command_context,
install=install,
xre=False,
network=True,
app=app,
device_serial=device_serial,
)
run_mochitest = mochitest.run_android_test
else:
run_mochitest = mochitest.run_desktop_test
overall = None
for (flavor, subsuite), tests in sorted(suites.items()):
suite_name, suite = get_suite_definition(flavor, subsuite)
if "test_paths" in suite["kwargs"]:
del suite["kwargs"]["test_paths"]
harness_args = kwargs.copy()
harness_args.update(suite["kwargs"])
# Pass in the full suite name as defined in moztest/resolve.py in case
# chunk-by-runtime is called, in which case runtime information for
# specific mochitest suite has to be loaded. See Bug 1637463.
harness_args.update({"suite_name": suite_name})
result = run_mochitest(command_context, tests=tests, **harness_args)
if result:
overall = result
# Halt tests on keyboard interrupt
if result == -1:
break
# Only shutdown the logger if we created it
if kwargs["log"].name == "mach-mochitest":
kwargs["log"].shutdown()
return overall
@CommandProvider
class GeckoviewJunitCommands(MachCommandBase):
@Command(
"geckoview-junit",
category="testing",
conditions=[conditions.is_android],
description="Run remote geckoview junit tests.",
parser=setup_junit_argument_parser,
)
@CommandArgument(
"--no-install",
help="Do not try to install application on device before "
+ "running (default: False)",
action="store_true",
default=False,
)
def run_junit(self, command_context, no_install, **kwargs):
command_context._ensure_state_subdir_exists(".")
if buildapp == "android":
from mozrunner.devices.android_device import (
get_adb_path,
verify_android_device,
InstallIntent,
)
app = kwargs.get("app")
if not app:
app = "org.mozilla.geckoview.test"
device_serial = kwargs.get("deviceSerial")
install = InstallIntent.NO if kwargs.get("no_install") else InstallIntent.YES
# verify installation
app = kwargs.get("app")
device_serial = kwargs.get("deviceSerial")
verify_android_device(
command_context,
install=install,
install=InstallIntent.NO if no_install else InstallIntent.YES,
xre=False,
network=True,
app=app,
device_serial=device_serial,
)
run_mochitest = mochitest.run_android_test
else:
run_mochitest = mochitest.run_desktop_test
overall = None
for (flavor, subsuite), tests in sorted(suites.items()):
suite_name, suite = get_suite_definition(flavor, subsuite)
if "test_paths" in suite["kwargs"]:
del suite["kwargs"]["test_paths"]
if not kwargs.get("adbPath"):
kwargs["adbPath"] = get_adb_path(command_context)
harness_args = kwargs.copy()
harness_args.update(suite["kwargs"])
# Pass in the full suite name as defined in moztest/resolve.py in case
# chunk-by-runtime is called, in which case runtime information for
# specific mochitest suite has to be loaded. See Bug 1637463.
harness_args.update({"suite_name": suite_name})
if not kwargs.get("log"):
from mozlog.commandline import setup_logging
result = run_mochitest(
command_context._mach_context, tests=tests, **harness_args
format_args = {
"level": command_context._mach_context.settings["test"]["level"]
}
default_format = command_context._mach_context.settings["test"]["format"]
kwargs["log"] = setup_logging(
"mach-mochitest", kwargs, {default_format: sys.stdout}, format_args
)
mochitest = command_context._spawn(MochitestRunner)
return mochitest.run_geckoview_junit_test(
command_context._mach_context, **kwargs
)
if result:
overall = result
# Halt tests on keyboard interrupt
if result == -1:
break
# Only shutdown the logger if we created it
if kwargs["log"].name == "mach-mochitest":
kwargs["log"].shutdown()
return overall
@Command(
"geckoview-junit",
category="testing",
conditions=[conditions.is_android],
description="Run remote geckoview junit tests.",
parser=setup_junit_argument_parser,
)
@CommandArgument(
"--no-install",
help="Do not try to install application on device before "
+ "running (default: False)",
action="store_true",
default=False,
)
def run_junit(command_context, no_install, **kwargs):
command_context._ensure_state_subdir_exists(".")
from mozrunner.devices.android_device import (
get_adb_path,
verify_android_device,
InstallIntent,
)
# verify installation
app = kwargs.get("app")
device_serial = kwargs.get("deviceSerial")
verify_android_device(
command_context,
install=InstallIntent.NO if no_install else InstallIntent.YES,
xre=False,
app=app,
device_serial=device_serial,
)
if not kwargs.get("adbPath"):
kwargs["adbPath"] = get_adb_path(command_context)
if not kwargs.get("log"):
from mozlog.commandline import setup_logging
format_args = {"level": command_context._mach_context.settings["test"]["level"]}
default_format = command_context._mach_context.settings["test"]["format"]
kwargs["log"] = setup_logging(
"mach-mochitest", kwargs, {default_format: sys.stdout}, format_args
)
mochitest = command_context._spawn(MochitestRunner)
return mochitest.run_geckoview_junit_test(command_context._mach_context, **kwargs)

View File

@ -11,8 +11,10 @@ from argparse import Namespace
from functools import partial
from mach.decorators import (
CommandProvider,
Command,
)
from mozbuild.base import MachCommandBase
here = os.path.abspath(os.path.dirname(__file__))
parser = None
@ -191,23 +193,24 @@ def setup_junit_argument_parser():
return parser
@Command(
"mochitest",
category="testing",
description="Run the mochitest harness.",
parser=setup_mochitest_argument_parser,
)
def mochitest(command_context, **kwargs):
command_context._mach_context.activate_mozharness_venv()
return run_test(command_context._mach_context, False, **kwargs)
@CommandProvider
class MochitestCommands(MachCommandBase):
@Command(
"mochitest",
category="testing",
description="Run the mochitest harness.",
parser=setup_mochitest_argument_parser,
)
def mochitest(self, command_context, **kwargs):
command_context._mach_context.activate_mozharness_venv()
return run_test(command_context._mach_context, False, **kwargs)
@Command(
"geckoview-junit",
category="testing",
description="Run the geckoview-junit harness.",
parser=setup_junit_argument_parser,
)
def geckoview_junit(command_context, **kwargs):
command_context._mach_context.activate_mozharness_venv()
return run_test(command_context._mach_context, True, **kwargs)
@Command(
"geckoview-junit",
category="testing",
description="Run the geckoview-junit harness.",
parser=setup_junit_argument_parser,
)
def geckoview_junit(self, command_context, **kwargs):
command_context._mach_context.activate_mozharness_venv()
return run_test(command_context._mach_context, True, **kwargs)

View File

@ -16,10 +16,11 @@ from six.moves.urllib.request import pathname2url
from mach.decorators import (
CommandArgument,
CommandProvider,
Command,
)
from mozbuild.base import MozbuildObject
from mozbuild.base import MachCommandBase, MozbuildObject
from mozbuild.base import MachCommandConditions as conditions
from argparse import ArgumentParser
@ -206,13 +207,15 @@ class MozharnessRunner(MozbuildObject):
return rv
@Command(
"mozharness",
category="testing",
description="Run tests using mozharness.",
conditions=[conditions.is_firefox_or_android],
parser=get_parser,
)
def mozharness(command_context, **kwargs):
runner = command_context._spawn(MozharnessRunner)
return runner.run_suite(kwargs.pop("suite_name")[0], **kwargs)
@CommandProvider
class MozharnessCommands(MachCommandBase):
@Command(
"mozharness",
category="testing",
description="Run tests using mozharness.",
conditions=[conditions.is_firefox_or_android],
parser=get_parser,
)
def mozharness(self, command_context, **kwargs):
runner = command_context._spawn(MozharnessRunner)
return runner.run_suite(kwargs.pop("suite_name")[0], **kwargs)

View File

@ -17,9 +17,10 @@ import subprocess
import sys
import mozfile
from mach.decorators import Command
from mach.decorators import Command, CommandProvider
from mozboot.util import get_state_dir
from mozbuild.base import (
MachCommandBase,
MozbuildObject,
BinaryNotFoundException,
)
@ -309,86 +310,91 @@ def create_parser():
return create_parser(mach_interface=True)
@Command(
"raptor",
category="testing",
description="Run Raptor performance tests.",
parser=create_parser,
)
def run_raptor(command_context, **kwargs):
# Defers this import so that a transitive dependency doesn't
# stop |mach bootstrap| from running
from raptor.power import enable_charging, disable_charging
@CommandProvider
class MachRaptor(MachCommandBase):
@Command(
"raptor",
category="testing",
description="Run Raptor performance tests.",
parser=create_parser,
)
def run_raptor(self, command_context, **kwargs):
# Defers this import so that a transitive dependency doesn't
# stop |mach bootstrap| from running
from raptor.power import enable_charging, disable_charging
build_obj = command_context
build_obj = command_context
is_android = Conditions.is_android(build_obj) or kwargs["app"] in ANDROID_BROWSERS
if is_android:
from mozrunner.devices.android_device import (
verify_android_device,
InstallIntent,
is_android = (
Conditions.is_android(build_obj) or kwargs["app"] in ANDROID_BROWSERS
)
from mozdevice import ADBDeviceFactory
install = (
InstallIntent.NO if kwargs.pop("noinstall", False) else InstallIntent.YES
)
verbose = False
if (
kwargs.get("log_mach_verbose")
or kwargs.get("log_tbpl_level") == "debug"
or kwargs.get("log_mach_level") == "debug"
or kwargs.get("log_raw_level") == "debug"
):
verbose = True
if not verify_android_device(
build_obj,
install=install,
app=kwargs["binary"],
verbose=verbose,
xre=True,
): # Equivalent to 'run_local' = True.
if is_android:
from mozrunner.devices.android_device import (
verify_android_device,
InstallIntent,
)
from mozdevice import ADBDeviceFactory
install = (
InstallIntent.NO
if kwargs.pop("noinstall", False)
else InstallIntent.YES
)
verbose = False
if (
kwargs.get("log_mach_verbose")
or kwargs.get("log_tbpl_level") == "debug"
or kwargs.get("log_mach_level") == "debug"
or kwargs.get("log_raw_level") == "debug"
):
verbose = True
if not verify_android_device(
build_obj,
install=install,
app=kwargs["binary"],
verbose=verbose,
xre=True,
): # Equivalent to 'run_local' = True.
return 1
# Remove mach global arguments from sys.argv to prevent them
# from being consumed by raptor. Treat any item in sys.argv
# occuring before "raptor" as a mach global argument.
argv = []
in_mach = True
for arg in sys.argv:
if not in_mach:
argv.append(arg)
if arg.startswith("raptor"):
in_mach = False
raptor = command_context._spawn(RaptorRunner)
device = None
try:
if kwargs["power_test"] and is_android:
device = ADBDeviceFactory(verbose=True)
disable_charging(device)
return raptor.run_test(argv, kwargs)
except BinaryNotFoundException as e:
command_context.log(
logging.ERROR, "raptor", {"error": str(e)}, "ERROR: {error}"
)
command_context.log(logging.INFO, "raptor", {"help": e.help()}, "{help}")
return 1
except Exception as e:
print(repr(e))
return 1
finally:
if kwargs["power_test"] and device:
enable_charging(device)
# Remove mach global arguments from sys.argv to prevent them
# from being consumed by raptor. Treat any item in sys.argv
# occuring before "raptor" as a mach global argument.
argv = []
in_mach = True
for arg in sys.argv:
if not in_mach:
argv.append(arg)
if arg.startswith("raptor"):
in_mach = False
raptor = command_context._spawn(RaptorRunner)
device = None
try:
if kwargs["power_test"] and is_android:
device = ADBDeviceFactory(verbose=True)
disable_charging(device)
return raptor.run_test(argv, kwargs)
except BinaryNotFoundException as e:
command_context.log(
logging.ERROR, "raptor", {"error": str(e)}, "ERROR: {error}"
)
command_context.log(logging.INFO, "raptor", {"help": e.help()}, "{help}")
return 1
except Exception as e:
print(repr(e))
return 1
finally:
if kwargs["power_test"] and device:
enable_charging(device)
@Command(
"raptor-test",
category="testing",
description="Run Raptor performance tests.",
parser=create_parser,
)
def run_raptor_test(command_context, **kwargs):
return run_raptor(command_context, **kwargs)
@Command(
"raptor-test",
category="testing",
description="Run Raptor performance tests.",
parser=create_parser,
)
def run_raptor_test(self, command_context, **kwargs):
return self.run_raptor(command_context, **kwargs)

View File

@ -15,9 +15,10 @@ import socket
from mozbuild.base import (
MozbuildObject,
MachCommandBase,
BinaryNotFoundException,
)
from mach.decorators import Command
from mach.decorators import CommandProvider, Command
HERE = os.path.dirname(os.path.realpath(__file__))
@ -122,17 +123,19 @@ def create_parser():
return create_parser(mach_interface=True)
@Command(
"talos-test",
category="testing",
description="Run talos tests (performance testing).",
parser=create_parser,
)
def run_talos_test(command_context, **kwargs):
talos = command_context._spawn(TalosRunner)
@CommandProvider
class MachCommands(MachCommandBase):
@Command(
"talos-test",
category="testing",
description="Run talos tests (performance testing).",
parser=create_parser,
)
def run_talos_test(self, command_context, **kwargs):
talos = command_context._spawn(TalosRunner)
try:
return talos.run_test(sys.argv[2:])
except Exception as e:
print(str(e))
return 1
try:
return talos.run_test(sys.argv[2:])
except Exception as e:
print(str(e))
return 1

View File

@ -5,31 +5,35 @@
from __future__ import absolute_import, print_function
import os
from mach.decorators import Command, CommandArgument
from mach.decorators import Command, CommandArgument, CommandProvider
from mozbuild.base import MachCommandBase
from mozpack.copier import Jarrer
from mozpack.files import FileFinder
@Command("tps-build", category="testing", description="Build TPS add-on.")
@CommandArgument("--dest", default=None, help="Where to write add-on.")
def build(command_context, dest):
src = os.path.join(
command_context.topsrcdir, "services", "sync", "tps", "extensions", "tps"
)
dest = os.path.join(
dest or os.path.join(command_context.topobjdir, "services", "sync"),
"tps.xpi",
)
@CommandProvider
class MachCommands(MachCommandBase):
@Command("tps-build", category="testing", description="Build TPS add-on.")
@CommandArgument("--dest", default=None, help="Where to write add-on.")
def build(self, command_context, dest):
"""TPS tests for Sync."""
src = os.path.join(
command_context.topsrcdir, "services", "sync", "tps", "extensions", "tps"
)
dest = os.path.join(
dest or os.path.join(command_context.topobjdir, "services", "sync"),
"tps.xpi",
)
if not os.path.exists(os.path.dirname(dest)):
os.makedirs(os.path.dirname(dest))
if not os.path.exists(os.path.dirname(dest)):
os.makedirs(os.path.dirname(dest))
if os.path.isfile(dest):
os.unlink(dest)
if os.path.isfile(dest):
os.unlink(dest)
jarrer = Jarrer()
for p, f in FileFinder(src).find("*"):
jarrer.add(p, f)
jarrer.copy(dest)
jarrer = Jarrer()
for p, f in FileFinder(src).find("*"):
jarrer.add(p, f)
jarrer.copy(dest)
print("Built TPS add-on as %s" % dest)
print("Built TPS add-on as %s" % dest)

View File

@ -12,11 +12,13 @@ import sys
from six import iteritems
from mozbuild.base import (
MachCommandBase,
MachCommandConditions as conditions,
MozbuildObject,
)
from mach.decorators import (
CommandProvider,
Command,
)
@ -466,181 +468,177 @@ def create_parser_testpaths():
return parser
def setup(command_context):
command_context.activate_virtualenv()
@CommandProvider
class MachCommands(MachCommandBase):
@staticmethod
def setup(command_context):
command_context.activate_virtualenv()
@Command(
"web-platform-tests",
category="testing",
conditions=[conditions.is_firefox_or_android],
description="Run web-platform-tests.",
parser=create_parser_wpt,
)
def run_web_platform_tests(command_context, **params):
setup(command_context)
if params["product"] is None:
if conditions.is_android(command_context):
params["product"] = "firefox_android"
else:
params["product"] = "firefox"
if "test_objects" in params:
include = []
test_types = set()
for item in params["test_objects"]:
include.append(item["name"])
test_types.add(item.get("subsuite"))
if None not in test_types:
params["test_types"] = list(test_types)
params["include"] = include
del params["test_objects"]
if params.get("debugger", None):
import mozdebug
if not mozdebug.get_debugger_info(params.get("debugger")):
sys.exit(1)
wpt_setup = command_context._spawn(WebPlatformTestsRunnerSetup)
wpt_setup._mach_context = command_context._mach_context
wpt_runner = WebPlatformTestsRunner(wpt_setup)
logger = wpt_runner.setup_logging(**params)
if (
conditions.is_android(command_context)
and params["product"] != "firefox_android"
):
logger.warning("Must specify --product=firefox_android in Android environment.")
return wpt_runner.run(logger, **params)
@Command(
"wpt",
category="testing",
conditions=[conditions.is_firefox_or_android],
description="Run web-platform-tests.",
parser=create_parser_wpt,
)
def run_wpt(command_context, **params):
return run_web_platform_tests(command_context, **params)
@Command(
"web-platform-tests-update",
category="testing",
description="Update web-platform-test metadata.",
parser=create_parser_update,
)
def update_web_platform_tests(command_context, **params):
setup(command_context)
command_context.virtualenv_manager.install_pip_package("html5lib==1.0.1")
command_context.virtualenv_manager.install_pip_package("ujson")
command_context.virtualenv_manager.install_pip_package("requests")
wpt_updater = command_context._spawn(WebPlatformTestsUpdater)
logger = wpt_updater.setup_logging(**params)
return wpt_updater.run_update(logger, **params)
@Command(
"wpt-update",
category="testing",
description="Update web-platform-test metadata.",
parser=create_parser_update,
)
def update_wpt(command_context, **params):
return update_web_platform_tests(command_context, **params)
@Command(
"wpt-manifest-update",
category="testing",
description="Update web-platform-test manifests.",
parser=create_parser_manifest_update,
)
def wpt_manifest_update(command_context, **params):
setup(command_context)
wpt_setup = command_context._spawn(WebPlatformTestsRunnerSetup)
wpt_runner = WebPlatformTestsRunner(wpt_setup)
logger = wpt_runner.setup_logging(**params)
logger.warning(
"The wpt manifest is now automatically updated, "
"so running this command is usually unnecessary"
@Command(
"web-platform-tests",
category="testing",
conditions=[conditions.is_firefox_or_android],
description="Run web-platform-tests.",
parser=create_parser_wpt,
)
return 0 if wpt_runner.update_manifest(logger, **params) else 1
def run_web_platform_tests(self, command_context, **params):
self.setup(command_context)
if params["product"] is None:
if conditions.is_android(command_context):
params["product"] = "firefox_android"
else:
params["product"] = "firefox"
if "test_objects" in params:
include = []
test_types = set()
for item in params["test_objects"]:
include.append(item["name"])
test_types.add(item.get("subsuite"))
if None not in test_types:
params["test_types"] = list(test_types)
params["include"] = include
del params["test_objects"]
if params.get("debugger", None):
import mozdebug
if not mozdebug.get_debugger_info(params.get("debugger")):
sys.exit(1)
@Command(
"wpt-serve",
category="testing",
description="Run the wpt server",
parser=create_parser_serve,
)
def wpt_serve(command_context, **params):
setup(command_context)
import logging
wpt_setup = command_context._spawn(WebPlatformTestsRunnerSetup)
wpt_setup._mach_context = command_context._mach_context
wpt_runner = WebPlatformTestsRunner(wpt_setup)
logger = logging.getLogger("web-platform-tests")
logger.addHandler(logging.StreamHandler(sys.stdout))
wpt_serve = command_context._spawn(WebPlatformTestsServeRunner)
return wpt_serve.run(**params)
logger = wpt_runner.setup_logging(**params)
if (
conditions.is_android(command_context)
and params["product"] != "firefox_android"
):
logger.warning(
"Must specify --product=firefox_android in Android environment."
)
@Command(
"wpt-metadata-summary",
category="testing",
description="Create a json summary of the wpt metadata",
parser=create_parser_metadata_summary,
)
def wpt_summary(command_context, **params):
import metasummary
return wpt_runner.run(logger, **params)
wpt_setup = command_context._spawn(WebPlatformTestsRunnerSetup)
return metasummary.run(wpt_setup.topsrcdir, wpt_setup.topobjdir, **params)
@Command(
"wpt",
category="testing",
conditions=[conditions.is_firefox_or_android],
description="Run web-platform-tests.",
parser=create_parser_wpt,
)
def run_wpt(self, command_context, **params):
return self.run_web_platform_tests(command_context, **params)
@Command(
"web-platform-tests-update",
category="testing",
description="Update web-platform-test metadata.",
parser=create_parser_update,
)
def update_web_platform_tests(self, command_context, **params):
self.setup(command_context)
command_context.virtualenv_manager.install_pip_package("html5lib==1.0.1")
command_context.virtualenv_manager.install_pip_package("ujson")
command_context.virtualenv_manager.install_pip_package("requests")
@Command("wpt-metadata-merge", category="testing", parser=create_parser_metadata_merge)
def wpt_meta_merge(command_context, **params):
import metamerge
wpt_updater = command_context._spawn(WebPlatformTestsUpdater)
logger = wpt_updater.setup_logging(**params)
return wpt_updater.run_update(logger, **params)
if params["dest"] is None:
params["dest"] = params["current"]
return metamerge.run(**params)
@Command(
"wpt-update",
category="testing",
description="Update web-platform-test metadata.",
parser=create_parser_update,
)
def update_wpt(self, command_context, **params):
return self.update_web_platform_tests(command_context, **params)
@Command(
"wpt-manifest-update",
category="testing",
description="Update web-platform-test manifests.",
parser=create_parser_manifest_update,
)
def wpt_manifest_update(self, command_context, **params):
self.setup(command_context)
wpt_setup = command_context._spawn(WebPlatformTestsRunnerSetup)
wpt_runner = WebPlatformTestsRunner(wpt_setup)
logger = wpt_runner.setup_logging(**params)
logger.warning(
"The wpt manifest is now automatically updated, "
"so running this command is usually unnecessary"
)
return 0 if wpt_runner.update_manifest(logger, **params) else 1
@Command(
"wpt-unittest",
category="testing",
description="Run the wpt tools and wptrunner unit tests",
parser=create_parser_unittest,
)
def wpt_unittest(command_context, **params):
setup(command_context)
command_context.virtualenv_manager.install_pip_package("tox")
runner = command_context._spawn(WebPlatformTestsUnittestRunner)
return 0 if runner.run(**params) else 1
@Command(
"wpt-serve",
category="testing",
description="Run the wpt server",
parser=create_parser_serve,
)
def wpt_serve(self, command_context, **params):
self.setup(command_context)
import logging
logger = logging.getLogger("web-platform-tests")
logger.addHandler(logging.StreamHandler(sys.stdout))
wpt_serve = command_context._spawn(WebPlatformTestsServeRunner)
return wpt_serve.run(**params)
@Command(
"wpt-test-paths",
category="testing",
description="Get a mapping from test ids to files",
parser=create_parser_testpaths,
)
def wpt_test_paths(command_context, **params):
runner = command_context._spawn(WebPlatformTestsTestPathsRunner)
runner.run(**params)
return 0
@Command(
"wpt-metadata-summary",
category="testing",
description="Create a json summary of the wpt metadata",
parser=create_parser_metadata_summary,
)
def wpt_summary(self, command_context, **params):
import metasummary
wpt_setup = command_context._spawn(WebPlatformTestsRunnerSetup)
return metasummary.run(wpt_setup.topsrcdir, wpt_setup.topobjdir, **params)
@Command(
"wpt-fission-regressions",
category="testing",
description="Dump a list of fission-specific regressions",
parser=create_parser_fission_regressions,
)
def wpt_fission_regressions(command_context, **params):
runner = command_context._spawn(WebPlatformTestsFissionRegressionsRunner)
runner.run(**params)
return 0
@Command(
"wpt-metadata-merge", category="testing", parser=create_parser_metadata_merge
)
def wpt_meta_merge(self, command_context, **params):
import metamerge
if params["dest"] is None:
params["dest"] = params["current"]
return metamerge.run(**params)
@Command(
"wpt-unittest",
category="testing",
description="Run the wpt tools and wptrunner unit tests",
parser=create_parser_unittest,
)
def wpt_unittest(self, command_context, **params):
self.setup(command_context)
command_context.virtualenv_manager.install_pip_package("tox")
runner = command_context._spawn(WebPlatformTestsUnittestRunner)
return 0 if runner.run(**params) else 1
@Command(
"wpt-test-paths",
category="testing",
description="Get a mapping from test ids to files",
parser=create_parser_testpaths,
)
def wpt_test_paths(self, command_context, **params):
runner = command_context._spawn(WebPlatformTestsTestPathsRunner)
runner.run(**params)
return 0
@Command(
"wpt-fission-regressions",
category="testing",
description="Dump a list of fission-specific regressions",
parser=create_parser_fission_regressions,
)
def wpt_fission_regressions(self, command_context, **params):
runner = command_context._spawn(WebPlatformTestsFissionRegressionsRunner)
runner.run(**params)
return 0

View File

@ -9,8 +9,10 @@ import sys
from mach_commands_base import WebPlatformTestsRunner, create_parser_wpt
from mach.decorators import (
CommandProvider,
Command,
)
from mozbuild.base import MachCommandBase
class WebPlatformTestsRunnerSetup(object):
@ -70,14 +72,15 @@ class WebPlatformTestsRunnerSetup(object):
raise NotImplementedError
@Command("web-platform-tests", category="testing", parser=create_parser_wpt)
def run_web_platform_tests(command_context, **kwargs):
command_context._mach_context.activate_mozharness_venv()
return WebPlatformTestsRunner(
WebPlatformTestsRunnerSetup(command_context._mach_context)
).run(**kwargs)
@CommandProvider
class MachCommands(MachCommandBase):
@Command("web-platform-tests", category="testing", parser=create_parser_wpt)
def run_web_platform_tests(self, command_context, **kwargs):
command_context._mach_context.activate_mozharness_venv()
return WebPlatformTestsRunner(
WebPlatformTestsRunnerSetup(command_context._mach_context)
).run(**kwargs)
@Command("wpt", category="testing", parser=create_parser_wpt)
def run_wpt(command_context, **params):
return command_context.run_web_platform_tests(**params)
@Command("wpt", category="testing", parser=create_parser_wpt)
def run_wpt(self, command_context, **params):
return command_context.run_web_platform_tests(**params)

View File

@ -14,12 +14,14 @@ import sys
from mozlog import structured
from mozbuild.base import (
MachCommandBase,
MozbuildObject,
MachCommandConditions as conditions,
BinaryNotFoundException,
)
from mach.decorators import (
CommandProvider,
Command,
)
@ -217,74 +219,76 @@ def get_parser():
return parser_desktop()
@Command(
"xpcshell-test",
category="testing",
description="Run XPCOM Shell tests (API direct unit testing)",
conditions=[lambda *args: True],
parser=get_parser,
)
def run_xpcshell_test(command_context, test_objects=None, **params):
from mozbuild.controller.building import BuildDriver
@CommandProvider
class MachCommands(MachCommandBase):
@Command(
"xpcshell-test",
category="testing",
description="Run XPCOM Shell tests (API direct unit testing)",
conditions=[lambda *args: True],
parser=get_parser,
)
def run_xpcshell_test(self, command_context, test_objects=None, **params):
from mozbuild.controller.building import BuildDriver
if test_objects is not None:
from manifestparser import TestManifest
if test_objects is not None:
from manifestparser import TestManifest
m = TestManifest()
m.tests.extend(test_objects)
params["manifest"] = m
m = TestManifest()
m.tests.extend(test_objects)
params["manifest"] = m
driver = command_context._spawn(BuildDriver)
driver.install_tests()
driver = command_context._spawn(BuildDriver)
driver.install_tests()
# We should probably have a utility function to ensure the tree is
# ready to run tests. Until then, we just create the state dir (in
# case the tree wasn't built with mach).
command_context._ensure_state_subdir_exists(".")
# We should probably have a utility function to ensure the tree is
# ready to run tests. Until then, we just create the state dir (in
# case the tree wasn't built with mach).
command_context._ensure_state_subdir_exists(".")
if not params.get("log"):
log_defaults = {
command_context._mach_context.settings["test"]["format"]: sys.stdout
}
fmt_defaults = {
"level": command_context._mach_context.settings["test"]["level"],
"verbose": True,
}
params["log"] = structured.commandline.setup_logging(
"XPCShellTests", params, log_defaults, fmt_defaults
)
if not params.get("log"):
log_defaults = {
command_context._mach_context.settings["test"]["format"]: sys.stdout
}
fmt_defaults = {
"level": command_context._mach_context.settings["test"]["level"],
"verbose": True,
}
params["log"] = structured.commandline.setup_logging(
"XPCShellTests", params, log_defaults, fmt_defaults
)
if not params["threadCount"]:
# pylint --py3k W1619
params["threadCount"] = int((cpu_count() * 3) / 2)
if not params["threadCount"]:
# pylint --py3k W1619
params["threadCount"] = int((cpu_count() * 3) / 2)
if (
conditions.is_android(command_context)
or command_context.substs.get("MOZ_BUILD_APP") == "b2g"
):
from mozrunner.devices.android_device import (
verify_android_device,
get_adb_path,
InstallIntent,
)
if (
conditions.is_android(command_context)
or command_context.substs.get("MOZ_BUILD_APP") == "b2g"
):
from mozrunner.devices.android_device import (
verify_android_device,
get_adb_path,
InstallIntent,
)
install = InstallIntent.YES if params["setup"] else InstallIntent.NO
device_serial = params.get("deviceSerial")
verify_android_device(
command_context,
network=True,
install=install,
device_serial=device_serial,
)
if not params["adbPath"]:
params["adbPath"] = get_adb_path(command_context)
xpcshell = command_context._spawn(AndroidXPCShellRunner)
else:
xpcshell = command_context._spawn(XPCShellRunner)
xpcshell.cwd = command_context._mach_context.cwd
install = InstallIntent.YES if params["setup"] else InstallIntent.NO
device_serial = params.get("deviceSerial")
verify_android_device(
command_context,
network=True,
install=install,
device_serial=device_serial,
)
if not params["adbPath"]:
params["adbPath"] = get_adb_path(command_context)
xpcshell = command_context._spawn(AndroidXPCShellRunner)
else:
xpcshell = command_context._spawn(XPCShellRunner)
xpcshell.cwd = command_context._mach_context.cwd
try:
return xpcshell.run_test(**params)
except InvalidTestPathError as e:
print(str(e))
return 1
try:
return xpcshell.run_test(**params)
except InvalidTestPathError as e:
print(str(e))
return 1

View File

@ -14,8 +14,10 @@ import mozlog
from xpcshellcommandline import parser_desktop
from mach.decorators import (
CommandProvider,
Command,
)
from mozbuild.base import MachCommandBase
def run_xpcshell(context, **kwargs):
@ -50,12 +52,14 @@ def run_xpcshell(context, **kwargs):
return xpcshell.runTests(**vars(args))
@Command(
"xpcshell-test",
category="testing",
description="Run the xpcshell harness.",
parser=parser_desktop,
)
def xpcshell(command_context, **kwargs):
command_context._mach_context.activate_mozharness_venv()
return run_xpcshell(command_context._mach_context, **kwargs)
@CommandProvider
class MochitestCommands(MachCommandBase):
@Command(
"xpcshell-test",
category="testing",
description="Run the xpcshell harness.",
parser=parser_desktop,
)
def xpcshell(self, command_context, **kwargs):
command_context._mach_context.activate_mozharness_venv()
return run_xpcshell(command_context._mach_context, **kwargs)

View File

@ -2,29 +2,32 @@
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
from mach.decorators import Command, CommandArgument
from mach.decorators import CommandProvider, Command, CommandArgument
from mozbuild.base import MachCommandBase
@Command(
"data-review",
category="misc",
description="Generate a skeleton data review request form for a given bug's data",
)
@CommandArgument(
"bug", default=None, nargs="?", type=str, help="bug number or search pattern"
)
def data_review(command_context, bug=None):
# Get the metrics_index's list of metrics indices
# by loading the index as a module.
from os import path
import sys
sys.path.append(path.join(path.dirname(__file__), path.pardir))
from metrics_index import metrics_yamls
from glean_parser import data_review
from pathlib import Path
return data_review.generate(
bug, [Path(command_context.topsrcdir) / x for x in metrics_yamls]
@CommandProvider
class DataReviewer(MachCommandBase):
@Command(
"data-review",
category="misc",
description="Generate a skeleton data review request form for a given bug's data",
)
@CommandArgument(
"bug", default=None, nargs="?", type=str, help="bug number or search pattern"
)
def data_review(self, command_context, bug=None):
# Get the metrics_index's list of metrics indices
# by loading the index as a module.
from os import path
import sys
sys.path.append(path.join(path.dirname(__file__), path.pardir))
from metrics_index import metrics_yamls
from glean_parser import data_review
from pathlib import Path
return data_review.generate(
bug, [Path(command_context.topsrcdir) / x for x in metrics_yamls]
)

View File

@ -8,9 +8,10 @@ import logging
import os
import sys
from mach.decorators import Command
from mach.decorators import CommandProvider, Command
from mozbuild.base import (
MachCommandBase,
MachCommandConditions as conditions,
BinaryNotFoundException,
)
@ -64,35 +65,37 @@ def run_telemetry(tests, binary=None, topsrcdir=None, **kwargs):
return 0
@Command(
"telemetry-tests-client",
category="testing",
description="Run tests specifically for the Telemetry client",
conditions=[conditions.is_firefox_or_android],
parser=create_parser_tests,
)
def telemetry_test(command_context, tests, **kwargs):
if "test_objects" in kwargs:
tests = []
for obj in kwargs["test_objects"]:
tests.append(obj["file_relpath"])
del kwargs["test_objects"]
if not kwargs.get("binary") and conditions.is_firefox(command_context):
try:
kwargs["binary"] = command_context.get_binary_path("app")
except BinaryNotFoundException as e:
command_context.log(
logging.ERROR,
"telemetry-tests-client",
{"error": str(e)},
"ERROR: {error}",
)
command_context.log(
logging.INFO, "telemetry-tests-client", {"help": e.help()}, "{help}"
)
return 1
if not kwargs.get("server_root"):
kwargs[
"server_root"
] = "toolkit/components/telemetry/tests/marionette/harness/www"
return run_telemetry(tests, topsrcdir=command_context.topsrcdir, **kwargs)
@CommandProvider
class TelemetryTest(MachCommandBase):
@Command(
"telemetry-tests-client",
category="testing",
description="Run tests specifically for the Telemetry client",
conditions=[conditions.is_firefox_or_android],
parser=create_parser_tests,
)
def telemetry_test(self, command_context, tests, **kwargs):
if "test_objects" in kwargs:
tests = []
for obj in kwargs["test_objects"]:
tests.append(obj["file_relpath"])
del kwargs["test_objects"]
if not kwargs.get("binary") and conditions.is_firefox(command_context):
try:
kwargs["binary"] = command_context.get_binary_path("app")
except BinaryNotFoundException as e:
command_context.log(
logging.ERROR,
"telemetry-tests-client",
{"error": str(e)},
"ERROR: {error}",
)
command_context.log(
logging.INFO, "telemetry-tests-client", {"help": e.help()}, "{help}"
)
return 1
if not kwargs.get("server_root"):
kwargs[
"server_root"
] = "toolkit/components/telemetry/tests/marionette/harness/www"
return run_telemetry(tests, topsrcdir=command_context.topsrcdir, **kwargs)

File diff suppressed because it is too large Load Diff

View File

@ -8,9 +8,11 @@ from appdirs import user_config_dir
from hglib.error import CommandError
from mach.decorators import (
CommandArgument,
CommandProvider,
Command,
)
from mach.base import FailedCommandError
from mozbuild.base import MachCommandBase
from mozrelease.scriptworker_canary import get_secret
from pathlib import Path
from redo import retry
@ -20,80 +22,84 @@ import os
import tempfile
@Command(
"compare-locales",
category="build",
description="Run source checks on a localization.",
)
@CommandArgument(
"config_paths",
metavar="l10n.toml",
nargs="+",
help="TOML or INI file for the project",
)
@CommandArgument(
"l10n_base_dir",
metavar="l10n-base-dir",
help="Parent directory of localizations",
)
@CommandArgument(
"locales",
nargs="*",
metavar="locale-code",
help="Locale code and top-level directory of each localization",
)
@CommandArgument(
"-q",
"--quiet",
action="count",
default=0,
help="""Show less data.
@CommandProvider
class CompareLocales(MachCommandBase):
@Command(
"compare-locales",
category="build",
description="Run source checks on a localization.",
)
@CommandArgument(
"config_paths",
metavar="l10n.toml",
nargs="+",
help="TOML or INI file for the project",
)
@CommandArgument(
"l10n_base_dir",
metavar="l10n-base-dir",
help="Parent directory of localizations",
)
@CommandArgument(
"locales",
nargs="*",
metavar="locale-code",
help="Locale code and top-level directory of " "each localization",
)
@CommandArgument(
"-q",
"--quiet",
action="count",
default=0,
help="""Show less data.
Specified once, don't show obsolete entities. Specified twice, also hide
missing entities. Specify thrice to exclude warnings and four times to
just show stats""",
)
@CommandArgument("-m", "--merge", help="""Use this directory to stage merged files""")
@CommandArgument(
"--validate", action="store_true", help="Run compare-locales against reference"
)
@CommandArgument(
"--json",
help="""Serialize to JSON. Value is the name of
)
@CommandArgument(
"-m", "--merge", help="""Use this directory to stage merged files"""
)
@CommandArgument(
"--validate", action="store_true", help="Run compare-locales against reference"
)
@CommandArgument(
"--json",
help="""Serialize to JSON. Value is the name of
the output file, pass "-" to serialize to stdout and hide the default output.
""",
)
@CommandArgument(
"-D",
action="append",
metavar="var=value",
default=[],
dest="defines",
help="Overwrite variables in TOML files",
)
@CommandArgument(
"--full", action="store_true", help="Compare projects that are disabled"
)
@CommandArgument(
"--return-zero", action="store_true", help="Return 0 regardless of l10n status"
)
def compare(command_context, **kwargs):
"""Run compare-locales."""
from compare_locales.commands import CompareLocales
)
@CommandArgument(
"-D",
action="append",
metavar="var=value",
default=[],
dest="defines",
help="Overwrite variables in TOML files",
)
@CommandArgument(
"--full", action="store_true", help="Compare projects that are disabled"
)
@CommandArgument(
"--return-zero", action="store_true", help="Return 0 regardless of l10n status"
)
def compare(self, command_context, **kwargs):
"""Run compare-locales."""
from compare_locales.commands import CompareLocales
class ErrorHelper(object):
"""Dummy ArgumentParser to marshall compare-locales
commandline errors to mach exceptions.
"""
class ErrorHelper(object):
"""Dummy ArgumentParser to marshall compare-locales
commandline errors to mach exceptions.
"""
def error(self, msg):
raise FailedCommandError(msg)
def error(self, msg):
raise FailedCommandError(msg)
def exit(self, message=None, status=0):
raise FailedCommandError(message, exit_code=status)
def exit(self, message=None, status=0):
raise FailedCommandError(message, exit_code=status)
cmd = CompareLocales()
cmd.parser = ErrorHelper()
return cmd.handle(**kwargs)
cmd = CompareLocales()
cmd.parser = ErrorHelper()
return cmd.handle(**kwargs)
# https://stackoverflow.com/a/14117511
@ -114,286 +120,289 @@ FXTREE_PATH = VCT_PATH / "hgext" / "firefoxtree"
HGRC_PATH = Path(user_config_dir("hg")).joinpath("hgrc")
@Command(
"l10n-cross-channel",
category="misc",
description="Create cross-channel content.",
)
@CommandArgument(
"--strings-path",
"-s",
metavar="en-US",
type=Path,
default=Path("en-US"),
help="Path to mercurial repository for gecko-strings-quarantine",
)
@CommandArgument(
"--outgoing-path",
"-o",
type=Path,
help="create an outgoing() patch if there are changes",
)
@CommandArgument(
"--attempts",
type=_positive_int,
default=1,
help="Number of times to try (for automation)",
)
@CommandArgument(
"--ssh-secret",
action="store",
help="Taskcluster secret to use to push (for automation)",
)
@CommandArgument(
"actions",
choices=("prep", "create", "push"),
nargs="+",
# This help block will be poorly formatted until we fix bug 1714239
help="""
"prep": clone repos and pull heads.
"create": create the en-US strings commit an optionally create an
outgoing() patch.
"push": push the en-US strings to the quarantine repo.
""",
)
def cross_channel(
command_context,
strings_path,
outgoing_path,
actions,
attempts,
ssh_secret,
**kwargs,
):
"""Run l10n cross-channel content generation."""
# This can be any path, as long as the name of the directory is en-US.
# Not entirely sure where this is a requirement; perhaps in l10n
# string manipulation logic?
if strings_path.name != "en-US":
raise FailedCommandError("strings_path needs to be named `en-US`")
command_context.activate_virtualenv()
# XXX pin python requirements
command_context.virtualenv_manager.install_pip_requirements(
Path(os.path.dirname(__file__)) / "requirements.in"
@CommandProvider
class CrossChannel(MachCommandBase):
@Command(
"l10n-cross-channel",
category="misc",
description="Create cross-channel content.",
)
strings_path = strings_path.resolve() # abspath
if outgoing_path:
outgoing_path = outgoing_path.resolve() # abspath
try:
with tempfile.TemporaryDirectory() as ssh_key_dir:
retry(
_do_create_content,
attempts=attempts,
retry_exceptions=(RetryError,),
args=(
command_context,
strings_path,
outgoing_path,
ssh_secret,
Path(ssh_key_dir),
actions,
),
)
except RetryError as exc:
raise FailedCommandError(exc) from exc
def _do_create_content(
command_context,
strings_path,
outgoing_path,
ssh_secret,
ssh_key_dir,
actions,
):
from mozxchannel import CrossChannelCreator, get_default_config
config = get_default_config(Path(command_context.topsrcdir), strings_path)
ccc = CrossChannelCreator(config)
status = 0
changes = False
ssh_key_secret = None
ssh_key_file = None
if "prep" in actions:
if ssh_secret:
if not os.environ.get("MOZ_AUTOMATION"):
raise CommandError(
"I don't know how to fetch the ssh secret outside of automation!"
@CommandArgument(
"--strings-path",
"-s",
metavar="en-US",
type=Path,
default=Path("en-US"),
help="Path to mercurial repository for gecko-strings-quarantine",
)
@CommandArgument(
"--outgoing-path",
"-o",
type=Path,
help="create an outgoing() patch if there are changes",
)
@CommandArgument(
"--attempts",
type=_positive_int,
default=1,
help="Number of times to try (for automation)",
)
@CommandArgument(
"--ssh-secret",
action="store",
help="Taskcluster secret to use to push (for automation)",
)
@CommandArgument(
"actions",
choices=("prep", "create", "push"),
nargs="+",
# This help block will be poorly formatted until we fix bug 1714239
help="""
"prep": clone repos and pull heads.
"create": create the en-US strings commit an optionally create an
outgoing() patch.
"push": push the en-US strings to the quarantine repo.
""",
)
def cross_channel(
self,
command_context,
strings_path,
outgoing_path,
actions,
attempts,
ssh_secret,
**kwargs,
):
"""Run l10n cross-channel content generation."""
# This can be any path, as long as the name of the directory is en-US.
# Not entirely sure where this is a requirement; perhaps in l10n
# string manipulation logic?
if strings_path.name != "en-US":
raise FailedCommandError("strings_path needs to be named `en-US`")
command_context.activate_virtualenv()
# XXX pin python requirements
command_context.virtualenv_manager.install_pip_requirements(
Path(os.path.dirname(__file__)) / "requirements.in"
)
strings_path = strings_path.resolve() # abspath
if outgoing_path:
outgoing_path = outgoing_path.resolve() # abspath
try:
with tempfile.TemporaryDirectory() as ssh_key_dir:
retry(
self._do_create_content,
attempts=attempts,
retry_exceptions=(RetryError,),
args=(
command_context,
strings_path,
outgoing_path,
ssh_secret,
Path(ssh_key_dir),
actions,
),
)
ssh_key_secret = get_secret(ssh_secret)
ssh_key_file = ssh_key_dir.joinpath("id_rsa")
ssh_key_file.write_text(ssh_key_secret["ssh_privkey"])
ssh_key_file.chmod(0o600)
# Set up firefoxtree for comm per bug 1659691 comment 22
if os.environ.get("MOZ_AUTOMATION") and not HGRC_PATH.exists():
_clone_hg_repo(command_context, VCT_URL, VCT_PATH)
hgrc_content = [
"[extensions]",
f"firefoxtree = {FXTREE_PATH}",
"",
"[ui]",
"username = trybld",
]
if ssh_key_file:
hgrc_content.extend(
[
f"ssh = ssh -i {ssh_key_file} -l {ssh_key_secret['user']}",
]
)
HGRC_PATH.write_text("\n".join(hgrc_content))
if strings_path.exists() and _check_outgoing(command_context, strings_path):
_strip_outgoing(command_context, strings_path)
# Clone strings + source repos, pull heads
for repo_config in (config["strings"], *config["source"].values()):
if not repo_config["path"].exists():
_clone_hg_repo(
command_context, repo_config["url"], str(repo_config["path"])
)
for head in repo_config["heads"].keys():
command = ["hg", "--cwd", str(repo_config["path"]), "pull"]
command.append(head)
status = _retry_run_process(
command_context, command, ensure_exit_code=False
)
if status not in (0, 255): # 255 on pull with no changes
raise RetryError(f"Failure on pull: status {status}!")
if repo_config.get("update_on_pull"):
command = [
"hg",
"--cwd",
str(repo_config["path"]),
"up",
"-C",
"-r",
head,
]
status = _retry_run_process(
except RetryError as exc:
raise FailedCommandError(exc) from exc
def _do_create_content(
self,
command_context,
strings_path,
outgoing_path,
ssh_secret,
ssh_key_dir,
actions,
):
from mozxchannel import CrossChannelCreator, get_default_config
config = get_default_config(Path(command_context.topsrcdir), strings_path)
ccc = CrossChannelCreator(config)
status = 0
changes = False
ssh_key_secret = None
ssh_key_file = None
if "prep" in actions:
if ssh_secret:
if not os.environ.get("MOZ_AUTOMATION"):
raise CommandError(
"I don't know how to fetch the ssh secret outside of automation!"
)
ssh_key_secret = get_secret(ssh_secret)
ssh_key_file = ssh_key_dir.joinpath("id_rsa")
ssh_key_file.write_text(ssh_key_secret["ssh_privkey"])
ssh_key_file.chmod(0o600)
# Set up firefoxtree for comm per bug 1659691 comment 22
if os.environ.get("MOZ_AUTOMATION") and not HGRC_PATH.exists():
self._clone_hg_repo(command_context, VCT_URL, VCT_PATH)
hgrc_content = [
"[extensions]",
f"firefoxtree = {FXTREE_PATH}",
"",
"[ui]",
"username = trybld",
]
if ssh_key_file:
hgrc_content.extend(
[
f"ssh = ssh -i {ssh_key_file} -l {ssh_key_secret['user']}",
]
)
HGRC_PATH.write_text("\n".join(hgrc_content))
if strings_path.exists() and self._check_outgoing(
command_context, strings_path
):
self._strip_outgoing(command_context, strings_path)
# Clone strings + source repos, pull heads
for repo_config in (config["strings"], *config["source"].values()):
if not repo_config["path"].exists():
self._clone_hg_repo(
command_context, repo_config["url"], str(repo_config["path"])
)
for head in repo_config["heads"].keys():
command = ["hg", "--cwd", str(repo_config["path"]), "pull"]
command.append(head)
status = self._retry_run_process(
command_context, command, ensure_exit_code=False
)
if status not in (0, 255): # 255 on pull with no changes
raise RetryError(f"Failure on update: status {status}!")
_check_hg_repo(
command_context,
repo_config["path"],
heads=repo_config.get("heads", {}).keys(),
)
else:
_check_hg_repo(command_context, strings_path)
for repo_config in config.get("source", {}).values():
_check_hg_repo(
command_context,
repo_config["path"],
heads=repo_config.get("heads", {}).keys(),
)
if _check_outgoing(command_context, strings_path):
raise RetryError(f"check: Outgoing changes in {strings_path}!")
raise RetryError(f"Failure on pull: status {status}!")
if repo_config.get("update_on_pull"):
command = [
"hg",
"--cwd",
str(repo_config["path"]),
"up",
"-C",
"-r",
head,
]
status = self._retry_run_process(
command_context, command, ensure_exit_code=False
)
if status not in (0, 255): # 255 on pull with no changes
raise RetryError(f"Failure on update: status {status}!")
self._check_hg_repo(
command_context,
repo_config["path"],
heads=repo_config.get("heads", {}).keys(),
)
else:
self._check_hg_repo(command_context, strings_path)
for repo_config in config.get("source", {}).values():
self._check_hg_repo(
command_context,
repo_config["path"],
heads=repo_config.get("heads", {}).keys(),
)
if self._check_outgoing(command_context, strings_path):
raise RetryError(f"check: Outgoing changes in {strings_path}!")
if "create" in actions:
try:
status = ccc.create_content()
changes = True
_create_outgoing_patch(command_context, outgoing_path, strings_path)
except CommandError as exc:
if exc.ret != 1:
raise RetryError(exc) from exc
command_context.log(logging.INFO, "create", {}, "No new strings.")
if "create" in actions:
try:
status = ccc.create_content()
changes = True
self._create_outgoing_patch(
command_context, outgoing_path, strings_path
)
except CommandError as exc:
if exc.ret != 1:
raise RetryError(exc) from exc
command_context.log(logging.INFO, "create", {}, "No new strings.")
if "push" in actions:
if changes:
_retry_run_process(
if "push" in actions:
if changes:
self._retry_run_process(
command_context,
[
"hg",
"--cwd",
str(strings_path),
"push",
"-r",
".",
config["strings"]["push_url"],
],
line_handler=print,
)
else:
command_context.log(logging.INFO, "push", {}, "Skipping empty push.")
return status
def _check_outgoing(self, command_context, strings_path):
status = self._retry_run_process(
command_context,
["hg", "--cwd", str(strings_path), "out", "-r", "."],
ensure_exit_code=False,
)
if status == 0:
return True
if status == 1:
return False
raise RetryError(
f"Outgoing check in {strings_path} returned unexpected {status}!"
)
def _strip_outgoing(self, command_context, strings_path):
self._retry_run_process(
command_context,
[
"hg",
"--config",
"extensions.strip=",
"--cwd",
str(strings_path),
"strip",
"--no-backup",
"outgoing()",
],
)
def _create_outgoing_patch(self, command_context, path, strings_path):
if not path:
return
if not path.parent.exists():
os.makedirs(path.parent)
with open(path, "w") as fh:
def writeln(line):
fh.write(f"{line}\n")
self._retry_run_process(
command_context,
[
"hg",
"--cwd",
str(strings_path),
"push",
"log",
"--patch",
"--verbose",
"-r",
".",
config["strings"]["push_url"],
"outgoing()",
],
line_handler=print,
)
else:
command_context.log(logging.INFO, "push", {}, "Skipping empty push.")
return status
def _check_outgoing(command_context, strings_path):
status = _retry_run_process(
command_context,
["hg", "--cwd", str(strings_path), "out", "-r", "."],
ensure_exit_code=False,
)
if status == 0:
return True
if status == 1:
return False
raise RetryError(f"Outgoing check in {strings_path} returned unexpected {status}!")
def _strip_outgoing(command_context, strings_path):
_retry_run_process(
command_context,
[
"hg",
"--config",
"extensions.strip=",
"--cwd",
str(strings_path),
"strip",
"--no-backup",
"outgoing()",
],
)
def _create_outgoing_patch(command_context, path, strings_path):
if not path:
return
if not path.parent.exists():
os.makedirs(path.parent)
with open(path, "w") as fh:
def writeln(line):
fh.write(f"{line}\n")
_retry_run_process(
command_context,
[
"hg",
"--cwd",
str(strings_path),
"log",
"--patch",
"--verbose",
"-r",
"outgoing()",
],
line_handler=writeln,
)
def _retry_run_process(command_context, *args, error_msg=None, **kwargs):
try:
return command_context.run_process(*args, **kwargs)
except Exception as exc:
raise RetryError(error_msg or str(exc)) from exc
def _check_hg_repo(command_context, path, heads=None):
if not (path.is_dir() and (path / ".hg").is_dir()):
raise RetryError(f"{path} is not a Mercurial repository")
if heads:
for head in heads:
_retry_run_process(
command_context,
["hg", "--cwd", str(path), "log", "-r", head],
error_msg=f"check: {path} has no head {head}!",
line_handler=writeln,
)
def _retry_run_process(self, command_context, *args, error_msg=None, **kwargs):
try:
return command_context.run_process(*args, **kwargs)
except Exception as exc:
raise RetryError(error_msg or str(exc)) from exc
def _clone_hg_repo(command_context, url, path):
_retry_run_process(command_context, ["hg", "clone", url, str(path)])
def _check_hg_repo(self, command_context, path, heads=None):
if not (path.is_dir() and (path / ".hg").is_dir()):
raise RetryError(f"{path} is not a Mercurial repository")
if heads:
for head in heads:
self._retry_run_process(
command_context,
["hg", "--cwd", str(path), "log", "-r", head],
error_msg=f"check: {path} has no head {head}!",
)
def _clone_hg_repo(self, command_context, url, path):
self._retry_run_process(command_context, ["hg", "clone", url, str(path)])

View File

@ -8,11 +8,13 @@ import os
from mozbuild.base import (
BuildEnvironmentNotFoundException,
MachCommandBase,
)
from mach.decorators import (
CommandArgument,
CommandProvider,
Command,
)
@ -59,105 +61,109 @@ def get_global_excludes(topsrcdir):
return excludes
@Command(
"lint",
category="devenv",
description="Run linters.",
parser=setup_argument_parser,
)
def lint(command_context, *runargs, **lintargs):
"""Run linters."""
command_context.activate_virtualenv()
from mozlint import cli, parser
try:
buildargs = {}
buildargs["substs"] = copy.deepcopy(dict(command_context.substs))
buildargs["defines"] = copy.deepcopy(dict(command_context.defines))
buildargs["topobjdir"] = command_context.topobjdir
lintargs.update(buildargs)
except BuildEnvironmentNotFoundException:
pass
lintargs.setdefault("root", command_context.topsrcdir)
lintargs["exclude"] = get_global_excludes(lintargs["root"])
lintargs["config_paths"].insert(0, here)
lintargs["virtualenv_bin_path"] = command_context.virtualenv_manager.bin_path
lintargs["virtualenv_manager"] = command_context.virtualenv_manager
for path in EXCLUSION_FILES:
parser.GLOBAL_SUPPORT_FILES.append(
os.path.join(command_context.topsrcdir, path)
)
return cli.run(*runargs, **lintargs)
@Command(
"eslint",
category="devenv",
description="Run eslint or help configure eslint for optimal development.",
)
@CommandArgument(
"paths",
default=None,
nargs="*",
help="Paths to file or directories to lint, like "
"'browser/' Defaults to the "
"current directory if not given.",
)
@CommandArgument(
"-s",
"--setup",
default=False,
action="store_true",
help="Configure eslint for optimal development.",
)
@CommandArgument("-b", "--binary", default=None, help="Path to eslint binary.")
@CommandArgument(
"--fix",
default=False,
action="store_true",
help="Request that eslint automatically fix errors, where possible.",
)
@CommandArgument(
"extra_args",
nargs=argparse.REMAINDER,
help="Extra args that will be forwarded to eslint.",
)
def eslint(command_context, paths, extra_args=[], **kwargs):
command_context._mach_context.commands.dispatch(
@CommandProvider
class MachCommands(MachCommandBase):
@Command(
"lint",
command_context._mach_context,
linters=["eslint"],
paths=paths,
argv=extra_args,
**kwargs
category="devenv",
description="Run linters.",
parser=setup_argument_parser,
)
def lint(self, command_context, *runargs, **lintargs):
"""Run linters."""
command_context.activate_virtualenv()
from mozlint import cli, parser
try:
buildargs = {}
buildargs["substs"] = copy.deepcopy(dict(command_context.substs))
buildargs["defines"] = copy.deepcopy(dict(command_context.defines))
buildargs["topobjdir"] = command_context.topobjdir
lintargs.update(buildargs)
except BuildEnvironmentNotFoundException:
pass
@Command(
"format",
category="devenv",
description="Format files, alternative to 'lint --fix' ",
parser=setup_argument_parser,
)
def format(command_context, paths, extra_args=[], **kwargs):
linters = kwargs["linters"]
if not linters:
linters = VALID_FORMATTERS
else:
invalid_linters = set(linters) - VALID_FORMATTERS
if invalid_linters:
print(
"error: One or more linters passed are not valid formatters. "
"Note that only the following linters are valid formatters:"
lintargs.setdefault("root", command_context.topsrcdir)
lintargs["exclude"] = get_global_excludes(lintargs["root"])
lintargs["config_paths"].insert(0, here)
lintargs["virtualenv_bin_path"] = command_context.virtualenv_manager.bin_path
lintargs["virtualenv_manager"] = command_context.virtualenv_manager
for path in EXCLUSION_FILES:
parser.GLOBAL_SUPPORT_FILES.append(
os.path.join(command_context.topsrcdir, path)
)
print("\n".join(sorted(VALID_FORMATTERS)))
return 1
return cli.run(*runargs, **lintargs)
kwargs["linters"] = list(linters)
kwargs["fix"] = True
command_context._mach_context.commands.dispatch(
"lint", command_context._mach_context, paths=paths, argv=extra_args, **kwargs
@Command(
"eslint",
category="devenv",
description="Run eslint or help configure eslint for optimal development.",
)
@CommandArgument(
"paths",
default=None,
nargs="*",
help="Paths to file or directories to lint, like "
"'browser/' Defaults to the "
"current directory if not given.",
)
@CommandArgument(
"-s",
"--setup",
default=False,
action="store_true",
help="Configure eslint for optimal development.",
)
@CommandArgument("-b", "--binary", default=None, help="Path to eslint binary.")
@CommandArgument(
"--fix",
default=False,
action="store_true",
help="Request that eslint automatically fix errors, where possible.",
)
@CommandArgument(
"extra_args",
nargs=argparse.REMAINDER,
help="Extra args that will be forwarded to eslint.",
)
def eslint(self, command_context, paths, extra_args=[], **kwargs):
command_context._mach_context.commands.dispatch(
"lint",
command_context._mach_context,
linters=["eslint"],
paths=paths,
argv=extra_args,
**kwargs
)
@Command(
"format",
category="devenv",
description="Format files, alternative to 'lint --fix' ",
parser=setup_argument_parser,
)
def format(self, command_context, paths, extra_args=[], **kwargs):
linters = kwargs["linters"]
if not linters:
linters = VALID_FORMATTERS
else:
invalid_linters = set(linters) - VALID_FORMATTERS
if invalid_linters:
print(
"error: One or more linters passed are not valid formatters. "
"Note that only the following linters are valid formatters:"
)
print("\n".join(sorted(VALID_FORMATTERS)))
return 1
kwargs["linters"] = list(linters)
kwargs["fix"] = True
command_context._mach_context.commands.dispatch(
"lint",
command_context._mach_context,
paths=paths,
argv=extra_args,
**kwargs
)

View File

@ -12,11 +12,12 @@ import sys
from mach.decorators import (
CommandArgument,
CommandProvider,
Command,
SubCommand,
)
from mozbuild.base import MozbuildObject
from mozbuild.base import MachCommandBase, MozbuildObject
def _get_busted_bugs(payload):
@ -30,88 +31,89 @@ def _get_busted_bugs(payload):
return response.json().get("bugs", [])
@Command(
"busted",
category="misc",
description="Query known bugs in our tooling, and file new ones.",
)
def busted_default(command_context):
unresolved = _get_busted_bugs({"resolution": "---"})
creation_time = datetime.now() - timedelta(days=15)
creation_time = creation_time.strftime("%Y-%m-%dT%H-%M-%SZ")
resolved = _get_busted_bugs({"creation_time": creation_time})
resolved = [bug for bug in resolved if bug["resolution"]]
all_bugs = sorted(
unresolved + resolved, key=itemgetter("last_change_time"), reverse=True
@CommandProvider
class BustedProvider(MachCommandBase):
@Command(
"busted",
category="misc",
description="Query known bugs in our tooling, and file new ones.",
)
if all_bugs:
for bug in all_bugs:
print(
"[%s] Bug %s - %s"
% (
"UNRESOLVED"
if not bug["resolution"]
else "RESOLVED - %s" % bug["resolution"],
bug["id"],
bug["summary"],
def busted_default(self, command_context):
unresolved = _get_busted_bugs({"resolution": "---"})
creation_time = datetime.now() - timedelta(days=15)
creation_time = creation_time.strftime("%Y-%m-%dT%H-%M-%SZ")
resolved = _get_busted_bugs({"creation_time": creation_time})
resolved = [bug for bug in resolved if bug["resolution"]]
all_bugs = sorted(
unresolved + resolved, key=itemgetter("last_change_time"), reverse=True
)
if all_bugs:
for bug in all_bugs:
print(
"[%s] Bug %s - %s"
% (
"UNRESOLVED"
if not bug["resolution"]
else "RESOLVED - %s" % bug["resolution"],
bug["id"],
bug["summary"],
)
)
else:
print("No known tooling issues found.")
@SubCommand("busted", "file", description="File a bug for busted tooling.")
@CommandArgument(
"against",
help=(
"The specific mach command that is busted (i.e. if you encountered "
"an error with `mach build`, run `mach busted file build`). If "
"the issue is not connected to any particular mach command, you "
"can also run `mach busted file general`."
),
)
def busted_file(self, command_context, against):
import webbrowser
if (
against != "general"
and against not in command_context._mach_context.commands.command_handlers
):
print(
"%s is not a valid value for `against`. `against` must be "
"the name of a `mach` command, or else the string "
'"general".' % against
)
else:
print("No known tooling issues found.")
return 1
@SubCommand("busted", "file", description="File a bug for busted tooling.")
@CommandArgument(
"against",
help=(
"The specific mach command that is busted (i.e. if you encountered "
"an error with `mach build`, run `mach busted file build`). If "
"the issue is not connected to any particular mach command, you "
"can also run `mach busted file general`."
),
)
def busted_file(command_context, against):
import webbrowser
if (
against != "general"
and against not in command_context._mach_context.commands.command_handlers
):
print(
"%s is not a valid value for `against`. `against` must be "
"the name of a `mach` command, or else the string "
'"general".' % against
)
return 1
if against == "general":
product = "Firefox Build System"
component = "General"
else:
import inspect
import mozpack.path as mozpath
# Look up the file implementing that command, then cross-refernce
# moz.build files to get the product/component.
handler = command_context._mach_context.commands.command_handlers[against]
method = getattr(handler.cls, handler.method)
sourcefile = mozpath.relpath(
inspect.getsourcefile(method), command_context.topsrcdir
)
reader = command_context.mozbuild_reader(config_mode="empty")
try:
res = reader.files_info([sourcefile])[sourcefile]["BUG_COMPONENT"]
product, component = res.product, res.component
except TypeError:
# The file might not have a bug set.
if against == "general":
product = "Firefox Build System"
component = "General"
else:
import inspect
import mozpack.path as mozpath
uri = (
"https://bugzilla.mozilla.org/enter_bug.cgi?"
"product=%s&component=%s&blocked=1543241" % (product, component)
)
webbrowser.open_new_tab(uri)
# Look up the file implementing that command, then cross-refernce
# moz.build files to get the product/component.
handler = command_context._mach_context.commands.command_handlers[against]
method = getattr(handler.cls, handler.method)
sourcefile = mozpath.relpath(
inspect.getsourcefile(method), command_context.topsrcdir
)
reader = command_context.mozbuild_reader(config_mode="empty")
try:
res = reader.files_info([sourcefile])[sourcefile]["BUG_COMPONENT"]
product, component = res.product, res.component
except TypeError:
# The file might not have a bug set.
product = "Firefox Build System"
component = "General"
uri = (
"https://bugzilla.mozilla.org/enter_bug.cgi?"
"product=%s&component=%s&blocked=1543241" % (product, component)
)
webbrowser.open_new_tab(uri)
MACH_PASTEBIN_DURATIONS = {
@ -234,123 +236,127 @@ appropriate highlighter.
"""
@Command("pastebin", category="misc", description=MACH_PASTEBIN_DESCRIPTION)
@CommandArgument(
"--list-highlighters",
action="store_true",
help="List known highlighters and exit",
)
@CommandArgument(
"--highlighter", default=None, help="Syntax highlighting to use for paste"
)
@CommandArgument(
"--expires",
default="week",
choices=sorted(MACH_PASTEBIN_DURATIONS.keys()),
help="Expire paste after given time duration (default: %(default)s)",
)
@CommandArgument(
"--verbose",
action="store_true",
help="Print extra info such as selected syntax highlighter",
)
@CommandArgument(
"path",
nargs="?",
default=None,
help="Path to file for upload to paste.mozilla.org",
)
def pastebin(command_context, list_highlighters, highlighter, expires, verbose, path):
import requests
@CommandProvider
class PastebinProvider(MachCommandBase):
@Command("pastebin", category="misc", description=MACH_PASTEBIN_DESCRIPTION)
@CommandArgument(
"--list-highlighters",
action="store_true",
help="List known highlighters and exit",
)
@CommandArgument(
"--highlighter", default=None, help="Syntax highlighting to use for paste"
)
@CommandArgument(
"--expires",
default="week",
choices=sorted(MACH_PASTEBIN_DURATIONS.keys()),
help="Expire paste after given time duration (default: %(default)s)",
)
@CommandArgument(
"--verbose",
action="store_true",
help="Print extra info such as selected syntax highlighter",
)
@CommandArgument(
"path",
nargs="?",
default=None,
help="Path to file for upload to paste.mozilla.org",
)
def pastebin(
self, command_context, list_highlighters, highlighter, expires, verbose, path
):
import requests
def verbose_print(*args, **kwargs):
"""Print a string if `--verbose` flag is set"""
if verbose:
print(*args, **kwargs)
def verbose_print(*args, **kwargs):
"""Print a string if `--verbose` flag is set"""
if verbose:
print(*args, **kwargs)
# Show known highlighters and exit.
if list_highlighters:
lexers = set(EXTENSION_TO_HIGHLIGHTER.values())
print("Available lexers:\n - %s" % "\n - ".join(sorted(lexers)))
return 0
# Show known highlighters and exit.
if list_highlighters:
lexers = set(EXTENSION_TO_HIGHLIGHTER.values())
print("Available lexers:\n" " - %s" % "\n - ".join(sorted(lexers)))
return 0
# Get a correct expiry value.
try:
verbose_print("Setting expiry from %s" % expires)
expires = MACH_PASTEBIN_DURATIONS[expires]
verbose_print("Using %s as expiry" % expires)
except KeyError:
print(
"%s is not a valid duration.\n"
"(hint: try one of %s)"
% (expires, ", ".join(MACH_PASTEBIN_DURATIONS.keys()))
)
return 1
data = {
"format": "json",
"expires": expires,
}
# Get content to be pasted.
if path:
verbose_print("Reading content from %s" % path)
# Get a correct expiry value.
try:
with open(path, "r") as f:
content = f.read()
except IOError:
print("ERROR. No such file %s" % path)
verbose_print("Setting expiry from %s" % expires)
expires = MACH_PASTEBIN_DURATIONS[expires]
verbose_print("Using %s as expiry" % expires)
except KeyError:
print(
"%s is not a valid duration.\n"
"(hint: try one of %s)"
% (expires, ", ".join(MACH_PASTEBIN_DURATIONS.keys()))
)
return 1
lexer = guess_highlighter_from_path(path)
if lexer:
data["lexer"] = lexer
else:
verbose_print("Reading content from stdin")
content = sys.stdin.read()
data = {
"format": "json",
"expires": expires,
}
# Assert the length of content to be posted does not exceed the maximum.
content_length = len(content)
verbose_print("Checking size of content is okay (%d)" % content_length)
if content_length > PASTEMO_MAX_CONTENT_LENGTH:
print(
"Paste content is too large (%d, maximum %d)"
% (content_length, PASTEMO_MAX_CONTENT_LENGTH)
)
# Get content to be pasted.
if path:
verbose_print("Reading content from %s" % path)
try:
with open(path, "r") as f:
content = f.read()
except IOError:
print("ERROR. No such file %s" % path)
return 1
lexer = guess_highlighter_from_path(path)
if lexer:
data["lexer"] = lexer
else:
verbose_print("Reading content from stdin")
content = sys.stdin.read()
# Assert the length of content to be posted does not exceed the maximum.
content_length = len(content)
verbose_print("Checking size of content is okay (%d)" % content_length)
if content_length > PASTEMO_MAX_CONTENT_LENGTH:
print(
"Paste content is too large (%d, maximum %d)"
% (content_length, PASTEMO_MAX_CONTENT_LENGTH)
)
return 1
data["content"] = content
# Highlight as specified language, overwriting value set from filename.
if highlighter:
verbose_print("Setting %s as highlighter" % highlighter)
data["lexer"] = highlighter
try:
verbose_print("Sending request to %s" % PASTEMO_URL)
resp = requests.post(PASTEMO_URL, data=data)
# Error code should always be 400.
# Response content will include a helpful error message,
# so print it here (for example, if an invalid highlighter is
# provided, it will return a list of valid highlighters).
if resp.status_code >= 400:
print("Error code %d: %s" % (resp.status_code, resp.content))
return 1
verbose_print("Pasted successfully")
response_json = resp.json()
verbose_print("Paste highlighted as %s" % response_json["lexer"])
print(response_json["url"])
return 0
except Exception as e:
print("ERROR. Paste failed.")
print("%s" % e)
return 1
data["content"] = content
# Highlight as specified language, overwriting value set from filename.
if highlighter:
verbose_print("Setting %s as highlighter" % highlighter)
data["lexer"] = highlighter
try:
verbose_print("Sending request to %s" % PASTEMO_URL)
resp = requests.post(PASTEMO_URL, data=data)
# Error code should always be 400.
# Response content will include a helpful error message,
# so print it here (for example, if an invalid highlighter is
# provided, it will return a list of valid highlighters).
if resp.status_code >= 400:
print("Error code %d: %s" % (resp.status_code, resp.content))
return 1
verbose_print("Pasted successfully")
response_json = resp.json()
verbose_print("Paste highlighted as %s" % response_json["lexer"])
print(response_json["url"])
return 0
except Exception as e:
print("ERROR. Paste failed.")
print("%s" % e)
return 1
class PypiBasedTool:
"""
@ -426,70 +432,73 @@ def mozregression_create_parser():
return loader.create_parser()
@Command(
"mozregression",
category="misc",
description=("Regression range finder for nightly and inbound builds."),
parser=mozregression_create_parser,
)
def run(command_context, **options):
command_context.activate_virtualenv()
mozregression = PypiBasedTool("mozregression")
mozregression.run(**options)
@Command(
"node",
category="devenv",
description="Run the NodeJS interpreter used for building.",
)
@CommandArgument("args", nargs=argparse.REMAINDER)
def node(command_context, args):
from mozbuild.nodeutil import find_node_executable
# Avoid logging the command
command_context.log_manager.terminal_handler.setLevel(logging.CRITICAL)
node_path, _ = find_node_executable()
return command_context.run_process(
[node_path] + args,
pass_thru=True, # Allow user to run Node interactively.
ensure_exit_code=False, # Don't throw on non-zero exit code.
@CommandProvider
class MozregressionCommand(MachCommandBase):
@Command(
"mozregression",
category="misc",
description=("Regression range finder for nightly" " and inbound builds."),
parser=mozregression_create_parser,
)
def run(self, command_context, **options):
command_context.activate_virtualenv()
mozregression = PypiBasedTool("mozregression")
mozregression.run(**options)
@Command(
"npm",
category="devenv",
description="Run the npm executable from the NodeJS used for building.",
)
@CommandArgument("args", nargs=argparse.REMAINDER)
def npm(command_context, args):
from mozbuild.nodeutil import find_npm_executable
# Avoid logging the command
command_context.log_manager.terminal_handler.setLevel(logging.CRITICAL)
import os
# Add node and npm from mozbuild to front of system path
#
# This isn't pretty, but npm currently executes itself with
# `#!/usr/bin/env node`, which means it just uses the node in the
# current PATH. As a result, stuff gets built wrong and installed
# in the wrong places and probably other badness too without this:
npm_path, _ = find_npm_executable()
if not npm_path:
exit(-1, "could not find npm executable")
path = os.path.abspath(os.path.dirname(npm_path))
os.environ["PATH"] = "{}:{}".format(path, os.environ["PATH"])
return command_context.run_process(
[npm_path, "--scripts-prepend-node-path=auto"] + args,
pass_thru=True, # Avoid eating npm output/error messages
ensure_exit_code=False, # Don't throw on non-zero exit code.
@CommandProvider
class NodeCommands(MachCommandBase):
@Command(
"node",
category="devenv",
description="Run the NodeJS interpreter used for building.",
)
@CommandArgument("args", nargs=argparse.REMAINDER)
def node(self, command_context, args):
from mozbuild.nodeutil import find_node_executable
# Avoid logging the command
command_context.log_manager.terminal_handler.setLevel(logging.CRITICAL)
node_path, _ = find_node_executable()
return command_context.run_process(
[node_path] + args,
pass_thru=True, # Allow user to run Node interactively.
ensure_exit_code=False, # Don't throw on non-zero exit code.
)
@Command(
"npm",
category="devenv",
description="Run the npm executable from the NodeJS used for building.",
)
@CommandArgument("args", nargs=argparse.REMAINDER)
def npm(self, command_context, args):
from mozbuild.nodeutil import find_npm_executable
# Avoid logging the command
command_context.log_manager.terminal_handler.setLevel(logging.CRITICAL)
import os
# Add node and npm from mozbuild to front of system path
#
# This isn't pretty, but npm currently executes itself with
# `#!/usr/bin/env node`, which means it just uses the node in the
# current PATH. As a result, stuff gets built wrong and installed
# in the wrong places and probably other badness too without this:
npm_path, _ = find_npm_executable()
if not npm_path:
exit(-1, "could not find npm executable")
path = os.path.abspath(os.path.dirname(npm_path))
os.environ["PATH"] = "{}:{}".format(path, os.environ["PATH"])
return command_context.run_process(
[npm_path, "--scripts-prepend-node-path=auto"] + args,
pass_thru=True, # Avoid eating npm output/error messages
ensure_exit_code=False, # Don't throw on non-zero exit code.
)
def logspam_create_parser(subcommand):
@ -503,31 +512,30 @@ def logspam_create_parser(subcommand):
from functools import partial
@Command(
"logspam",
category="misc",
description=("Warning categorizer for treeherder test runs."),
)
def logspam(command_context):
pass
@CommandProvider
class LogspamCommand(MachCommandBase):
@Command(
"logspam",
category="misc",
description=("Warning categorizer for treeherder test runs."),
)
def logspam(self, command_context):
pass
@SubCommand("logspam", "report", parser=partial(logspam_create_parser, "report"))
def report(self, command_context, **options):
command_context.activate_virtualenv()
logspam = PypiBasedTool("logspam")
logspam.run(command="report", **options)
@SubCommand("logspam", "report", parser=partial(logspam_create_parser, "report"))
def report(command_context, **options):
command_context.activate_virtualenv()
logspam = PypiBasedTool("logspam")
logspam.run(command="report", **options)
@SubCommand("logspam", "bisect", parser=partial(logspam_create_parser, "bisect"))
def bisect(self, command_context, **options):
command_context.activate_virtualenv()
logspam = PypiBasedTool("logspam")
logspam.run(command="bisect", **options)
@SubCommand("logspam", "bisect", parser=partial(logspam_create_parser, "bisect"))
def bisect(command_context, **options):
command_context.activate_virtualenv()
logspam = PypiBasedTool("logspam")
logspam.run(command="bisect", **options)
@SubCommand("logspam", "file", parser=partial(logspam_create_parser, "file"))
def create(command_context, **options):
command_context.activate_virtualenv()
logspam = PypiBasedTool("logspam")
logspam.run(command="file", **options)
@SubCommand("logspam", "file", parser=partial(logspam_create_parser, "file"))
def create(self, command_context, **options):
command_context.activate_virtualenv()
logspam = PypiBasedTool("logspam")
logspam.run(command="file", **options)

View File

@ -20,10 +20,12 @@ from functools import partial
from pprint import pprint
from mach.registrar import Registrar
from mozbuild.base import MachCommandBase
from mozbuild.util import memoize
from mach.decorators import (
Command,
CommandArgument,
CommandProvider,
SubCommand,
)
@ -33,373 +35,378 @@ DOC_ROOT = os.path.join(topsrcdir, "docs")
BASE_LINK = "http://gecko-docs.mozilla.org-l1.s3-website.us-west-2.amazonaws.com/"
# Helps manage in-tree documentation.
@CommandProvider
class Documentation(MachCommandBase):
"""Helps manage in-tree documentation."""
@Command(
"doc",
category="devenv",
virtualenv_name="docs",
description="Generate and serve documentation from the tree.",
)
@CommandArgument(
"path",
default=None,
metavar="DIRECTORY",
nargs="?",
help="Path to documentation to build and display.",
)
@CommandArgument(
"--format", default="html", dest="fmt", help="Documentation format to write."
)
@CommandArgument(
"--outdir", default=None, metavar="DESTINATION", help="Where to write output."
)
@CommandArgument(
"--archive",
action="store_true",
help="Write a gzipped tarball of generated docs.",
)
@CommandArgument(
"--no-open",
dest="auto_open",
default=True,
action="store_false",
help="Don't automatically open HTML docs in a browser.",
)
@CommandArgument(
"--no-serve",
dest="serve",
default=True,
action="store_false",
help="Don't serve the generated docs after building.",
)
@CommandArgument(
"--http",
default="localhost:5500",
metavar="ADDRESS",
help="Serve documentation on the specified host and port, "
'default "localhost:5500".',
)
@CommandArgument("--upload", action="store_true", help="Upload generated files to S3.")
@CommandArgument(
"-j",
"--jobs",
default=str(multiprocessing.cpu_count()),
dest="jobs",
help="Distribute the build over N processes in parallel.",
)
@CommandArgument("--write-url", default=None, help="Write S3 Upload URL to text file")
@CommandArgument("--verbose", action="store_true", help="Run Sphinx in verbose mode")
def build_docs(
command_context,
path=None,
fmt="html",
outdir=None,
auto_open=True,
serve=True,
http=None,
archive=False,
upload=False,
jobs=None,
write_url=None,
verbose=None,
):
# TODO: Bug 1704891 - move the ESLint setup tools to a shared place.
sys.path.append(mozpath.join(command_context.topsrcdir, "tools", "lint", "eslint"))
import setup_helper
setup_helper.set_project_root(command_context.topsrcdir)
if not setup_helper.check_node_executables_valid():
return 1
setup_helper.eslint_maybe_setup()
# Set the path so that Sphinx can find jsdoc, unfortunately there isn't
# a way to pass this to Sphinx itself at the moment.
os.environ["PATH"] = (
mozpath.join(command_context.topsrcdir, "node_modules", ".bin")
+ os.pathsep
+ _node_path()
+ os.pathsep
+ os.environ["PATH"]
@Command(
"doc",
category="devenv",
virtualenv_name="docs",
description="Generate and serve documentation from the tree.",
)
command_context.activate_virtualenv()
command_context.virtualenv_manager.install_pip_requirements(
os.path.join(here, "requirements.txt")
@CommandArgument(
"path",
default=None,
metavar="DIRECTORY",
nargs="?",
help="Path to documentation to build and display.",
)
@CommandArgument(
"--format", default="html", dest="fmt", help="Documentation format to write."
)
@CommandArgument(
"--outdir", default=None, metavar="DESTINATION", help="Where to write output."
)
@CommandArgument(
"--archive",
action="store_true",
help="Write a gzipped tarball of generated docs.",
)
@CommandArgument(
"--no-open",
dest="auto_open",
default=True,
action="store_false",
help="Don't automatically open HTML docs in a browser.",
)
@CommandArgument(
"--no-serve",
dest="serve",
default=True,
action="store_false",
help="Don't serve the generated docs after building.",
)
@CommandArgument(
"--http",
default="localhost:5500",
metavar="ADDRESS",
help="Serve documentation on the specified host and port, "
'default "localhost:5500".',
)
@CommandArgument(
"--upload", action="store_true", help="Upload generated files to S3."
)
@CommandArgument(
"-j",
"--jobs",
default=str(multiprocessing.cpu_count()),
dest="jobs",
help="Distribute the build over N processes in parallel.",
)
@CommandArgument(
"--write-url", default=None, help="Write S3 Upload URL to text file"
)
@CommandArgument(
"--verbose", action="store_true", help="Run Sphinx in verbose mode"
)
def build_docs(
self,
command_context,
path=None,
fmt="html",
outdir=None,
auto_open=True,
serve=True,
http=None,
archive=False,
upload=False,
jobs=None,
write_url=None,
verbose=None,
):
import webbrowser
from livereload import Server
from moztreedocs.package import create_tarball
# TODO: Bug 1704891 - move the ESLint setup tools to a shared place.
sys.path.append(
mozpath.join(command_context.topsrcdir, "tools", "lint", "eslint")
)
import setup_helper
unique_id = "%s/%s" % (project(), str(uuid.uuid1()))
setup_helper.set_project_root(command_context.topsrcdir)
outdir = outdir or os.path.join(command_context.topobjdir, "docs")
savedir = os.path.join(outdir, fmt)
if not setup_helper.check_node_executables_valid():
return 1
path = path or command_context.topsrcdir
path = os.path.normpath(os.path.abspath(path))
setup_helper.eslint_maybe_setup()
docdir = _find_doc_dir(path)
if not docdir:
print(_dump_sphinx_backtrace())
return die(
"failed to generate documentation:\n"
"%s: could not find docs at this location" % path
# Set the path so that Sphinx can find jsdoc, unfortunately there isn't
# a way to pass this to Sphinx itself at the moment.
os.environ["PATH"] = (
mozpath.join(command_context.topsrcdir, "node_modules", ".bin")
+ os.pathsep
+ self._node_path()
+ os.pathsep
+ os.environ["PATH"]
)
result = _run_sphinx(docdir, savedir, fmt=fmt, jobs=jobs, verbose=verbose)
if result != 0:
print(_dump_sphinx_backtrace())
return die(
"failed to generate documentation:\n"
"%s: sphinx return code %d" % (path, result)
command_context.activate_virtualenv()
command_context.virtualenv_manager.install_pip_requirements(
os.path.join(here, "requirements.txt")
)
else:
print("\nGenerated documentation:\n%s" % savedir)
# Upload the artifact containing the link to S3
# This would be used by code-review to post the link to Phabricator
if write_url is not None:
unique_link = BASE_LINK + unique_id + "/index.html"
with open(write_url, "w") as fp:
fp.write(unique_link)
fp.flush()
print("Generated " + write_url)
import webbrowser
from livereload import Server
from moztreedocs.package import create_tarball
if archive:
archive_path = os.path.join(outdir, "%s.tar.gz" % project())
create_tarball(archive_path, savedir)
print("Archived to %s" % archive_path)
unique_id = "%s/%s" % (self.project(), str(uuid.uuid1()))
if upload:
_s3_upload(savedir, project(), unique_id, version())
outdir = outdir or os.path.join(command_context.topobjdir, "docs")
savedir = os.path.join(outdir, fmt)
if not serve:
index_path = os.path.join(savedir, "index.html")
if auto_open and os.path.isfile(index_path):
webbrowser.open(index_path)
return
path = path or command_context.topsrcdir
path = os.path.normpath(os.path.abspath(path))
# Create livereload server. Any files modified in the specified docdir
# will cause a re-build and refresh of the browser (if open).
try:
host, port = http.split(":", 1)
port = int(port)
except ValueError:
return die("invalid address: %s" % http)
server = Server()
sphinx_trees = manager().trees or {savedir: docdir}
for _, src in sphinx_trees.items():
run_sphinx = partial(
_run_sphinx, src, savedir, fmt=fmt, jobs=jobs, verbose=verbose
)
server.watch(src, run_sphinx)
server.serve(
host=host,
port=port,
root=savedir,
open_url_delay=0.1 if auto_open else None,
)
def _dump_sphinx_backtrace():
"""
If there is a sphinx dump file, read and return
its content.
By default, it isn't displayed.
"""
pattern = "sphinx-err-*"
output = ""
tmpdir = "/tmp"
if not os.path.isdir(tmpdir):
# Only run it on Linux
return
files = os.listdir(tmpdir)
for name in files:
if fnmatch.fnmatch(name, pattern):
pathFile = os.path.join(tmpdir, name)
stat = os.stat(pathFile)
output += "Name: {0} / Creation date: {1}\n".format(
pathFile, time.ctime(stat.st_mtime)
docdir = self._find_doc_dir(path)
if not docdir:
print(self._dump_sphinx_backtrace())
return die(
"failed to generate documentation:\n"
"%s: could not find docs at this location" % path
)
with open(pathFile) as f:
output += f.read()
return output
result = self._run_sphinx(docdir, savedir, fmt=fmt, jobs=jobs, verbose=verbose)
if result != 0:
print(self._dump_sphinx_backtrace())
return die(
"failed to generate documentation:\n"
"%s: sphinx return code %d" % (path, result)
)
else:
print("\nGenerated documentation:\n%s" % savedir)
def _run_sphinx(docdir, savedir, config=None, fmt="html", jobs=None, verbose=None):
import sphinx.cmd.build
# Upload the artifact containing the link to S3
# This would be used by code-review to post the link to Phabricator
if write_url is not None:
unique_link = BASE_LINK + unique_id + "/index.html"
with open(write_url, "w") as fp:
fp.write(unique_link)
fp.flush()
print("Generated " + write_url)
config = config or manager().conf_py_path
# When running sphinx with sentry, it adds significant overhead
# and makes the build generation very very very slow
# So, disable it to generate the doc faster
sentry_sdk.init(None)
args = [
"-T",
"-b",
fmt,
"-c",
os.path.dirname(config),
docdir,
savedir,
]
if jobs:
args.extend(["-j", jobs])
if verbose:
args.extend(["-v", "-v"])
print("Run sphinx with:")
print(args)
return sphinx.cmd.build.build_main(args)
if archive:
archive_path = os.path.join(outdir, "%s.tar.gz" % self.project())
create_tarball(archive_path, savedir)
print("Archived to %s" % archive_path)
if upload:
self._s3_upload(savedir, self.project(), unique_id, self.version())
def manager():
from moztreedocs import manager
if not serve:
index_path = os.path.join(savedir, "index.html")
if auto_open and os.path.isfile(index_path):
webbrowser.open(index_path)
return
return manager
# Create livereload server. Any files modified in the specified docdir
# will cause a re-build and refresh of the browser (if open).
try:
host, port = http.split(":", 1)
port = int(port)
except ValueError:
return die("invalid address: %s" % http)
server = Server()
@memoize
def _read_project_properties():
import imp
sphinx_trees = self.manager().trees or {savedir: docdir}
for _, src in sphinx_trees.items():
run_sphinx = partial(
self._run_sphinx, src, savedir, fmt=fmt, jobs=jobs, verbose=verbose
)
server.watch(src, run_sphinx)
server.serve(
host=host,
port=port,
root=savedir,
open_url_delay=0.1 if auto_open else None,
)
path = os.path.normpath(manager().conf_py_path)
with open(path, "r") as fh:
conf = imp.load_module("doc_conf", fh, path, (".py", "r", imp.PY_SOURCE))
def _dump_sphinx_backtrace(self):
"""
If there is a sphinx dump file, read and return
its content.
By default, it isn't displayed.
"""
pattern = "sphinx-err-*"
output = ""
tmpdir = "/tmp"
# Prefer the Mozilla project name, falling back to Sphinx's
# default variable if it isn't defined.
project = getattr(conf, "moz_project_name", None)
if not project:
project = conf.project.replace(" ", "_")
if not os.path.isdir(tmpdir):
# Only run it on Linux
return
files = os.listdir(tmpdir)
for name in files:
if fnmatch.fnmatch(name, pattern):
pathFile = os.path.join(tmpdir, name)
stat = os.stat(pathFile)
output += "Name: {0} / Creation date: {1}\n".format(
pathFile, time.ctime(stat.st_mtime)
)
with open(pathFile) as f:
output += f.read()
return output
return {"project": project, "version": getattr(conf, "version", None)}
def _run_sphinx(
self, docdir, savedir, config=None, fmt="html", jobs=None, verbose=None
):
import sphinx.cmd.build
config = config or self.manager().conf_py_path
# When running sphinx with sentry, it adds significant overhead
# and makes the build generation very very very slow
# So, disable it to generate the doc faster
sentry_sdk.init(None)
args = [
"-T",
"-b",
fmt,
"-c",
os.path.dirname(config),
docdir,
savedir,
]
if jobs:
args.extend(["-j", jobs])
if verbose:
args.extend(["-v", "-v"])
print("Run sphinx with:")
print(args)
return sphinx.cmd.build.build_main(args)
def project():
return _read_project_properties()["project"]
def manager(self):
from moztreedocs import manager
return manager
def version():
return _read_project_properties()["version"]
@memoize
def _read_project_properties(self):
import imp
path = os.path.normpath(self.manager().conf_py_path)
with open(path, "r") as fh:
conf = imp.load_module("doc_conf", fh, path, (".py", "r", imp.PY_SOURCE))
def _node_path():
from mozbuild.nodeutil import find_node_executable
# Prefer the Mozilla project name, falling back to Sphinx's
# default variable if it isn't defined.
project = getattr(conf, "moz_project_name", None)
if not project:
project = conf.project.replace(" ", "_")
node, _ = find_node_executable()
return {"project": project, "version": getattr(conf, "version", None)}
return os.path.dirname(node)
def project(self):
return self._read_project_properties()["project"]
def version(self):
return self._read_project_properties()["version"]
def _find_doc_dir(path):
if os.path.isfile(path):
return
def _node_path(self):
from mozbuild.nodeutil import find_node_executable
valid_doc_dirs = ("doc", "docs")
if os.path.basename(path) in valid_doc_dirs:
return path
node, _ = find_node_executable()
for d in valid_doc_dirs:
p = os.path.join(path, d)
if os.path.isdir(p):
return p
return os.path.dirname(node)
def _find_doc_dir(self, path):
if os.path.isfile(path):
return
def _s3_upload(root, project, unique_id, version=None):
from moztreedocs.package import distribution_files
from moztreedocs.upload import s3_upload, s3_set_redirects
valid_doc_dirs = ("doc", "docs")
if os.path.basename(path) in valid_doc_dirs:
return path
# Workaround the issue
# BlockingIOError: [Errno 11] write could not complete without blocking
# https://github.com/travis-ci/travis-ci/issues/8920
import fcntl
for d in valid_doc_dirs:
p = os.path.join(path, d)
if os.path.isdir(p):
return p
fcntl.fcntl(1, fcntl.F_SETFL, 0)
def _s3_upload(self, root, project, unique_id, version=None):
from moztreedocs.package import distribution_files
from moztreedocs.upload import s3_upload, s3_set_redirects
# Files are uploaded to multiple locations:
#
# <project>/latest
# <project>/<version>
#
# This allows multiple projects and versions to be stored in the
# S3 bucket.
# Workaround the issue
# BlockingIOError: [Errno 11] write could not complete without blocking
# https://github.com/travis-ci/travis-ci/issues/8920
import fcntl
files = list(distribution_files(root))
key_prefixes = []
if version:
key_prefixes.append("%s/%s" % (project, version))
fcntl.fcntl(1, fcntl.F_SETFL, 0)
# Until we redirect / to main/latest, upload the main docs
# to the root.
if project == "main":
key_prefixes.append("")
# Files are uploaded to multiple locations:
#
# <project>/latest
# <project>/<version>
#
# This allows multiple projects and versions to be stored in the
# S3 bucket.
key_prefixes.append(unique_id)
files = list(distribution_files(root))
key_prefixes = []
if version:
key_prefixes.append("%s/%s" % (project, version))
with open(os.path.join(DOC_ROOT, "config.yml"), "r") as fh:
redirects = yaml.safe_load(fh)["redirects"]
# Until we redirect / to main/latest, upload the main docs
# to the root.
if project == "main":
key_prefixes.append("")
redirects = {k.strip("/"): v.strip("/") for k, v in redirects.items()}
key_prefixes.append(unique_id)
all_redirects = {}
with open(os.path.join(DOC_ROOT, "config.yml"), "r") as fh:
redirects = yaml.safe_load(fh)["redirects"]
for prefix in key_prefixes:
s3_upload(files, prefix)
redirects = {k.strip("/"): v.strip("/") for k, v in redirects.items()}
# Don't setup redirects for the "version" or "uuid" prefixes since
# we are exceeding a 50 redirect limit and external things are
# unlikely to link there anyway (see bug 1614908).
if (version and prefix.endswith(version)) or prefix == unique_id:
continue
all_redirects = {}
if prefix:
prefix += "/"
all_redirects.update({prefix + k: prefix + v for k, v in redirects.items()})
for prefix in key_prefixes:
s3_upload(files, prefix)
print("Redirects currently staged")
pprint(all_redirects, indent=1)
# Don't setup redirects for the "version" or "uuid" prefixes since
# we are exceeding a 50 redirect limit and external things are
# unlikely to link there anyway (see bug 1614908).
if (version and prefix.endswith(version)) or prefix == unique_id:
continue
s3_set_redirects(all_redirects)
if prefix:
prefix += "/"
all_redirects.update({prefix + k: prefix + v for k, v in redirects.items()})
unique_link = BASE_LINK + unique_id + "/index.html"
print("Uploaded documentation can be accessed here " + unique_link)
print("Redirects currently staged")
pprint(all_redirects, indent=1)
s3_set_redirects(all_redirects)
@SubCommand(
"doc",
"mach-telemetry",
description="Generate documentation from Glean metrics.yaml files",
)
def generate_telemetry_docs(command_context):
args = [
sys.executable,
"-m" "glean_parser",
"translate",
"-f",
"markdown",
"-o",
os.path.join(topsrcdir, "python/mach/docs/"),
os.path.join(topsrcdir, "python/mach/pings.yaml"),
os.path.join(topsrcdir, "python/mach/metrics.yaml"),
]
metrics_paths = [
handler.metrics_path
for handler in Registrar.command_handlers.values()
if handler.metrics_path is not None
]
args.extend(
[os.path.join(command_context.topsrcdir, path) for path in set(metrics_paths)]
unique_link = BASE_LINK + unique_id + "/index.html"
print("Uploaded documentation can be accessed here " + unique_link)
@SubCommand(
"doc",
"mach-telemetry",
description="Generate documentation from Glean metrics.yaml files",
)
subprocess.check_call(args)
def generate_telemetry_docs(self, command_context):
args = [
sys.executable,
"-m" "glean_parser",
"translate",
"-f",
"markdown",
"-o",
os.path.join(topsrcdir, "python/mach/docs/"),
os.path.join(topsrcdir, "python/mach/pings.yaml"),
os.path.join(topsrcdir, "python/mach/metrics.yaml"),
]
metrics_paths = [
handler.metrics_path
for handler in Registrar.command_handlers.values()
if handler.metrics_path is not None
]
args.extend(
[
os.path.join(command_context.topsrcdir, path)
for path in set(metrics_paths)
]
)
subprocess.check_call(args)
def die(msg, exit_code=1):

View File

@ -5,117 +5,120 @@
from __future__ import absolute_import, unicode_literals
import mozfile
from mach.decorators import Command, CommandArgument
from mach.decorators import CommandProvider, Command, CommandArgument
from mozbuild.base import MachCommandBase
@Command(
"install-moz-phab",
category="misc",
description="Install patch submission tool.",
)
@CommandArgument(
"--force",
"-f",
action="store_true",
help="Force installation even if already installed.",
)
def install_moz_phab(command_context, force=False):
import logging
import os
import re
import subprocess
import sys
existing = mozfile.which("moz-phab")
if existing and not force:
command_context.log(
logging.ERROR,
"already_installed",
{},
"moz-phab is already installed in %s." % existing,
)
sys.exit(1)
# pip3 is part of Python since 3.4, however some distros choose to
# remove core components from languages, so show a useful error message
# if pip3 is missing.
pip3 = mozfile.which("pip3")
if not pip3:
command_context.log(
logging.ERROR,
"pip3_not_installed",
{},
"`pip3` is not installed. Try installing it with your system "
"package manager.",
)
sys.exit(1)
command = [pip3, "install", "--upgrade", "MozPhab"]
if (
sys.platform.startswith("linux")
or sys.platform.startswith("openbsd")
or sys.platform.startswith("dragonfly")
or sys.platform.startswith("freebsd")
):
# On all Linux and BSD distros we consider doing a user installation.
platform_prefers_user_install = True
elif sys.platform.startswith("darwin"):
# On MacOS we require brew or ports, which work better without --user.
platform_prefers_user_install = False
elif sys.platform.startswith("win32") or sys.platform.startswith("msys"):
# Likewise for Windows we assume a system level install is preferred.
platform_prefers_user_install = False
else:
# Unsupported, default to --user.
command_context.log(
logging.WARNING,
"unsupported_platform",
{},
"Unsupported platform (%s), assuming per-user installation is "
"preferred." % sys.platform,
)
platform_prefers_user_install = True
if platform_prefers_user_install and not os.environ.get("VIRTUAL_ENV"):
# Virtual environments don't see user packages, so only perform a user
# installation if we're not within one.
command.append("--user")
command_context.log(logging.INFO, "run", {}, "Installing moz-phab")
subprocess.run(command)
# There isn't an elegant way of determining the CLI location of a pip-installed package.
# The viable mechanism used here is to:
# 1. Get the list of info about the installed package via pip
# 2. Parse out the install location. This gives us the python environment in which the
# package is installed
# 3. Parse out the relative location of the cli script
# 4. Join the two paths, and execute the script at that location
info = subprocess.check_output(
[pip3, "show", "-f", "MozPhab"], universal_newlines=True
@CommandProvider
class PhabricatorCommandProvider(MachCommandBase):
@Command(
"install-moz-phab",
category="misc",
description="Install patch submission tool.",
)
mozphab_package_location = re.compile(r"Location: (.*)").search(info).group(1)
# This needs to match "moz-phab" (*nix) and "moz-phab.exe" (Windows) while missing
# "moz-phab-script.py" (Windows).
potential_cli_paths = re.compile(
r"([^\s]*moz-phab(?:\.exe)?)$", re.MULTILINE
).findall(info)
if len(potential_cli_paths) != 1:
command_context.log(
logging.WARNING,
"no_mozphab_console_script",
{},
"Could not find the CLI script for moz-phab. Skipping install-certificate step.",
)
sys.exit(1)
console_script = os.path.realpath(
os.path.join(mozphab_package_location, potential_cli_paths[0])
@CommandArgument(
"--force",
"-f",
action="store_true",
help="Force installation even if already installed.",
)
subprocess.run([console_script, "install-certificate"])
def install_moz_phab(self, command_context, force=False):
import logging
import os
import re
import subprocess
import sys
existing = mozfile.which("moz-phab")
if existing and not force:
command_context.log(
logging.ERROR,
"already_installed",
{},
"moz-phab is already installed in %s." % existing,
)
sys.exit(1)
# pip3 is part of Python since 3.4, however some distros choose to
# remove core components from languages, so show a useful error message
# if pip3 is missing.
pip3 = mozfile.which("pip3")
if not pip3:
command_context.log(
logging.ERROR,
"pip3_not_installed",
{},
"`pip3` is not installed. Try installing it with your system "
"package manager.",
)
sys.exit(1)
command = [pip3, "install", "--upgrade", "MozPhab"]
if (
sys.platform.startswith("linux")
or sys.platform.startswith("openbsd")
or sys.platform.startswith("dragonfly")
or sys.platform.startswith("freebsd")
):
# On all Linux and BSD distros we consider doing a user installation.
platform_prefers_user_install = True
elif sys.platform.startswith("darwin"):
# On MacOS we require brew or ports, which work better without --user.
platform_prefers_user_install = False
elif sys.platform.startswith("win32") or sys.platform.startswith("msys"):
# Likewise for Windows we assume a system level install is preferred.
platform_prefers_user_install = False
else:
# Unsupported, default to --user.
command_context.log(
logging.WARNING,
"unsupported_platform",
{},
"Unsupported platform (%s), assuming per-user installation is "
"preferred." % sys.platform,
)
platform_prefers_user_install = True
if platform_prefers_user_install and not os.environ.get("VIRTUAL_ENV"):
# Virtual environments don't see user packages, so only perform a user
# installation if we're not within one.
command.append("--user")
command_context.log(logging.INFO, "run", {}, "Installing moz-phab")
subprocess.run(command)
# There isn't an elegant way of determining the CLI location of a pip-installed package.
# The viable mechanism used here is to:
# 1. Get the list of info about the installed package via pip
# 2. Parse out the install location. This gives us the python environment in which the
# package is installed
# 3. Parse out the relative location of the cli script
# 4. Join the two paths, and execute the script at that location
info = subprocess.check_output(
[pip3, "show", "-f", "MozPhab"], universal_newlines=True
)
mozphab_package_location = re.compile(r"Location: (.*)").search(info).group(1)
# This needs to match "moz-phab" (*nix) and "moz-phab.exe" (Windows) while missing
# "moz-phab-script.py" (Windows).
potential_cli_paths = re.compile(
r"([^\s]*moz-phab(?:\.exe)?)$", re.MULTILINE
).findall(info)
if len(potential_cli_paths) != 1:
command_context.log(
logging.WARNING,
"no_mozphab_console_script",
{},
"Could not find the CLI script for moz-phab. Skipping install-certificate step.",
)
sys.exit(1)
console_script = os.path.realpath(
os.path.join(mozphab_package_location, potential_cli_paths[0])
)
subprocess.run([console_script, "install-certificate"])

View File

@ -9,7 +9,9 @@ from distutils.version import StrictVersion
from mach.decorators import (
Command,
CommandArgument,
CommandProvider,
)
from mozbuild.base import MachCommandBase
def is_osx_10_10_or_greater(cls):
@ -19,136 +21,137 @@ def is_osx_10_10_or_greater(cls):
return release and StrictVersion(release) >= StrictVersion("10.10")
# Get system power consumption and related measurements.
@Command(
"power",
category="misc",
conditions=[is_osx_10_10_or_greater],
description="Get system power consumption and related measurements for "
"all running browsers. Available only on Mac OS X 10.10 and above. "
"Requires root access.",
)
@CommandArgument(
"-i",
"--interval",
type=int,
default=30000,
help="The sample period, measured in milliseconds. Defaults to 30000.",
)
def power(command_context, interval):
"""
Get system power consumption and related measurements.
"""
import os
import re
import subprocess
rapl = os.path.join(command_context.topobjdir, "dist", "bin", "rapl")
interval = str(interval)
# Run a trivial command with |sudo| to gain temporary root privileges
# before |rapl| and |powermetrics| are called. This ensures that |rapl|
# doesn't start measuring while |powermetrics| is waiting for the root
# password to be entered.
try:
subprocess.check_call(["sudo", "true"])
except Exception:
print("\nsudo failed; aborting")
return 1
# This runs rapl in the background because nothing in this script
# depends on the output. This is good because we want |rapl| and
# |powermetrics| to run at the same time.
subprocess.Popen([rapl, "-n", "1", "-i", interval])
lines = subprocess.check_output(
[
"sudo",
"powermetrics",
"--samplers",
"tasks",
"--show-process-coalition",
"--show-process-gpu",
"-n",
"1",
"-i",
interval,
],
universal_newlines=True,
@CommandProvider
class MachCommands(MachCommandBase):
@Command(
"power",
category="misc",
conditions=[is_osx_10_10_or_greater],
description="Get system power consumption and related measurements for "
"all running browsers. Available only on Mac OS X 10.10 and above. "
"Requires root access.",
)
@CommandArgument(
"-i",
"--interval",
type=int,
default=30000,
help="The sample period, measured in milliseconds. Defaults to 30000.",
)
def power(self, command_context, interval):
"""
Get system power consumption and related measurements.
"""
import os
import re
import subprocess
# When run with --show-process-coalition, |powermetrics| groups outputs
# into process coalitions, each of which has a leader.
#
# For example, when Firefox runs from the dock, its coalition looks
# like this:
#
# org.mozilla.firefox
# firefox
# plugin-container
#
# When Safari runs from the dock:
#
# com.apple.Safari
# Safari
# com.apple.WebKit.Networking
# com.apple.WebKit.WebContent
# com.apple.WebKit.WebContent
#
# When Chrome runs from the dock:
#
# com.google.Chrome
# Google Chrome
# Google Chrome Helper
# Google Chrome Helper
#
# In these cases, we want to print the whole coalition.
#
# Also, when you run any of them from the command line, things are the
# same except that the leader is com.apple.Terminal and there may be
# non-browser processes in the coalition, e.g.:
#
# com.apple.Terminal
# firefox
# plugin-container
# <and possibly other, non-browser processes>
#
# Also, the WindowServer and kernel coalitions and processes are often
# relevant.
#
# We want to print all these but omit uninteresting coalitions. We
# could do this by properly parsing powermetrics output, but it's
# simpler and more robust to just grep for a handful of identifying
# strings.
rapl = os.path.join(command_context.topobjdir, "dist", "bin", "rapl")
print() # blank line between |rapl| output and |powermetrics| output
interval = str(interval)
for line in lines.splitlines():
# Search for the following things.
#
# - '^Name' is for the columns headings line.
#
# - 'firefox' and 'plugin-container' are for Firefox
#
# - 'Safari\b' and 'WebKit' are for Safari. The '\b' excludes
# SafariCloudHistoryPush, which is a process that always
# runs, even when Safari isn't open.
#
# - 'Chrome' is for Chrome.
#
# - 'Terminal' is for the terminal. If no browser is running from
# within the terminal, it will show up unnecessarily. This is a
# minor disadvantage of this very simple parsing strategy.
#
# - 'WindowServer' is for the WindowServer.
#
# - 'kernel' is for the kernel.
#
if re.search(
r"(^Name|firefox|plugin-container|Safari\b|WebKit|Chrome|Terminal|WindowServer|kernel)", # NOQA: E501
line,
):
print(line)
# Run a trivial command with |sudo| to gain temporary root privileges
# before |rapl| and |powermetrics| are called. This ensures that |rapl|
# doesn't start measuring while |powermetrics| is waiting for the root
# password to be entered.
try:
subprocess.check_call(["sudo", "true"])
except Exception:
print("\nsudo failed; aborting")
return 1
return 0
# This runs rapl in the background because nothing in this script
# depends on the output. This is good because we want |rapl| and
# |powermetrics| to run at the same time.
subprocess.Popen([rapl, "-n", "1", "-i", interval])
lines = subprocess.check_output(
[
"sudo",
"powermetrics",
"--samplers",
"tasks",
"--show-process-coalition",
"--show-process-gpu",
"-n",
"1",
"-i",
interval,
],
universal_newlines=True,
)
# When run with --show-process-coalition, |powermetrics| groups outputs
# into process coalitions, each of which has a leader.
#
# For example, when Firefox runs from the dock, its coalition looks
# like this:
#
# org.mozilla.firefox
# firefox
# plugin-container
#
# When Safari runs from the dock:
#
# com.apple.Safari
# Safari
# com.apple.WebKit.Networking
# com.apple.WebKit.WebContent
# com.apple.WebKit.WebContent
#
# When Chrome runs from the dock:
#
# com.google.Chrome
# Google Chrome
# Google Chrome Helper
# Google Chrome Helper
#
# In these cases, we want to print the whole coalition.
#
# Also, when you run any of them from the command line, things are the
# same except that the leader is com.apple.Terminal and there may be
# non-browser processes in the coalition, e.g.:
#
# com.apple.Terminal
# firefox
# plugin-container
# <and possibly other, non-browser processes>
#
# Also, the WindowServer and kernel coalitions and processes are often
# relevant.
#
# We want to print all these but omit uninteresting coalitions. We
# could do this by properly parsing powermetrics output, but it's
# simpler and more robust to just grep for a handful of identifying
# strings.
print() # blank line between |rapl| output and |powermetrics| output
for line in lines.splitlines():
# Search for the following things.
#
# - '^Name' is for the columns headings line.
#
# - 'firefox' and 'plugin-container' are for Firefox
#
# - 'Safari\b' and 'WebKit' are for Safari. The '\b' excludes
# SafariCloudHistoryPush, which is a process that always
# runs, even when Safari isn't open.
#
# - 'Chrome' is for Chrome.
#
# - 'Terminal' is for the terminal. If no browser is running from
# within the terminal, it will show up unnecessarily. This is a
# minor disadvantage of this very simple parsing strategy.
#
# - 'WindowServer' is for the WindowServer.
#
# - 'kernel' is for the kernel.
#
if re.search(
r"(^Name|firefox|plugin-container|Safari\b|WebKit|Chrome|Terminal|WindowServer|kernel)", # NOQA: E501
line,
):
print(line)
return 0

View File

@ -9,12 +9,13 @@ import os
import sys
from mach.decorators import (
CommandProvider,
Command,
SettingsProvider,
SubCommand,
)
from mozboot.util import get_state_dir
from mozbuild.base import BuildEnvironmentNotFoundException
from mozbuild.base import BuildEnvironmentNotFoundException, MachCommandBase
from mozbuild.util import memoize
@ -67,445 +68,435 @@ class TryConfig:
]
def init(command_context):
from tryselect import push
@CommandProvider
class TrySelect(MachCommandBase):
def init(self, command_context):
from tryselect import push
push.MAX_HISTORY = command_context._mach_context.settings["try"]["maxhistory"]
push.MAX_HISTORY = command_context._mach_context.settings["try"]["maxhistory"]
@memoize
def presets(self, command_context):
from tryselect.preset import MergedHandler
@memoize
def presets(command_context):
from tryselect.preset import MergedHandler
# Create our handler using both local and in-tree presets. The first
# path in this list will be treated as the 'user' file for the purposes
# of saving and editing. All subsequent paths are 'read-only'. We check
# an environment variable first for testing purposes.
if os.environ.get("MACH_TRY_PRESET_PATHS"):
preset_paths = os.environ["MACH_TRY_PRESET_PATHS"].split(os.pathsep)
else:
preset_paths = [
os.path.join(get_state_dir(), "try_presets.yml"),
os.path.join(
command_context.topsrcdir, "tools", "tryselect", "try_presets.yml"
),
]
# Create our handler using both local and in-tree presets. The first
# path in this list will be treated as the 'user' file for the purposes
# of saving and editing. All subsequent paths are 'read-only'. We check
# an environment variable first for testing purposes.
if os.environ.get("MACH_TRY_PRESET_PATHS"):
preset_paths = os.environ["MACH_TRY_PRESET_PATHS"].split(os.pathsep)
else:
preset_paths = [
os.path.join(get_state_dir(), "try_presets.yml"),
os.path.join(
command_context.topsrcdir, "tools", "tryselect", "try_presets.yml"
),
]
return MergedHandler(*preset_paths)
return MergedHandler(*preset_paths)
def handle_presets(
self, command_context, preset_action=None, save=None, preset=None, **kwargs
):
"""Handle preset related arguments.
This logic lives here so that the underlying selectors don't need
special preset handling. They can all save and load presets the same
way.
"""
from tryselect.util.dicttools import merge
def handle_presets(
command_context, preset_action=None, save=None, preset=None, **kwargs
):
"""Handle preset related arguments.
user_presets = self.presets(command_context).handlers[0]
if preset_action == "list":
self.presets(command_context).list()
sys.exit()
This logic lives here so that the underlying selectors don't need
special preset handling. They can all save and load presets the same
way.
"""
from tryselect.util.dicttools import merge
if preset_action == "edit":
user_presets.edit()
sys.exit()
user_presets = presets(command_context).handlers[0]
if preset_action == "list":
presets(command_context).list()
sys.exit()
parser = command_context._mach_context.handler.parser
subcommand = command_context._mach_context.handler.subcommand
if "preset" not in parser.common_groups:
return kwargs
if preset_action == "edit":
user_presets.edit()
sys.exit()
default = parser.get_default
if save:
selector = (
subcommand or command_context._mach_context.settings["try"]["default"]
)
# Only save non-default values for simplicity.
kwargs = {k: v for k, v in kwargs.items() if v != default(k)}
user_presets.save(save, selector=selector, **kwargs)
print("preset saved, run with: --preset={}".format(save))
sys.exit()
if preset:
if preset not in self.presets(command_context):
command_context._mach_context.parser.error(
"preset '{}' does not exist".format(preset)
)
name = preset
preset = self.presets(command_context)[name]
selector = preset.pop("selector")
preset.pop("description", None) # description isn't used by any selectors
if not subcommand:
subcommand = selector
elif subcommand != selector:
print(
"error: preset '{}' exists for a different selector "
"(did you mean to run 'mach try {}' instead?)".format(
name, selector
)
)
sys.exit(1)
# Order of precedence is defaults -> presets -> cli. Configuration
# from the right overwrites configuration from the left.
defaults = {}
nondefaults = {}
for k, v in kwargs.items():
if v == default(k):
defaults[k] = v
else:
nondefaults[k] = v
kwargs = merge(defaults, preset, nondefaults)
parser = command_context._mach_context.handler.parser
subcommand = command_context._mach_context.handler.subcommand
if "preset" not in parser.common_groups:
return kwargs
default = parser.get_default
if save:
selector = (
subcommand or command_context._mach_context.settings["try"]["default"]
def handle_try_config(self, command_context, **kwargs):
from tryselect.util.dicttools import merge
to_validate = []
kwargs.setdefault("try_config", {})
for cls in command_context._mach_context.handler.parser.task_configs.values():
try_config = cls.try_config(**kwargs)
if try_config is not None:
to_validate.append(cls)
kwargs["try_config"] = merge(kwargs["try_config"], try_config)
for name in cls.dests:
del kwargs[name]
# Validate task_configs after they have all been parsed to avoid
# depending on the order they were processed.
for cls in to_validate:
cls.validate(**kwargs)
return kwargs
def run(self, command_context, **kwargs):
kwargs = self.handle_presets(command_context, **kwargs)
if command_context._mach_context.handler.parser.task_configs:
kwargs = self.handle_try_config(command_context, **kwargs)
mod = importlib.import_module(
"tryselect.selectors.{}".format(
command_context._mach_context.handler.subcommand
)
)
return mod.run(**kwargs)
@Command(
"try",
category="ci",
description="Push selected tasks to the try server",
parser=generic_parser,
)
def try_default(self, command_context, argv=None, **kwargs):
"""Push selected tests to the try server.
The |mach try| command is a frontend for scheduling tasks to
run on try server using selectors. A selector is a subcommand
that provides its own set of command line arguments and are
listed below.
If no subcommand is specified, the `auto` selector is run by
default. Run |mach try auto --help| for more information on
scheduling with the `auto` selector.
"""
self.init(command_context)
subcommand = command_context._mach_context.handler.subcommand
# We do special handling of presets here so that `./mach try --preset foo`
# works no matter what subcommand 'foo' was saved with.
preset = kwargs["preset"]
if preset:
if preset not in self.presets(command_context):
command_context._mach_context.handler.parser.error(
"preset '{}' does not exist".format(preset)
)
subcommand = self.presets(command_context)[preset]["selector"]
sub = subcommand or command_context._mach_context.settings["try"]["default"]
return command_context._mach_context.commands.dispatch(
"try", command_context._mach_context, subcommand=sub, argv=argv, **kwargs
)
# Only save non-default values for simplicity.
kwargs = {k: v for k, v in kwargs.items() if v != default(k)}
user_presets.save(save, selector=selector, **kwargs)
print("preset saved, run with: --preset={}".format(save))
sys.exit()
@SubCommand(
"try",
"fuzzy",
description="Select tasks on try using a fuzzy finder",
parser=get_parser("fuzzy"),
)
def try_fuzzy(self, command_context, **kwargs):
"""Select which tasks to run with a fuzzy finding interface (fzf).
if preset:
if preset not in presets(command_context):
command_context._mach_context.parser.error(
"preset '{}' does not exist".format(preset)
)
When entering the fzf interface you'll be confronted by two panes. The
one on the left contains every possible task you can schedule, the one
on the right contains the list of selected tasks. In other words, the
tasks that will be scheduled once you you press <enter>.
name = preset
preset = presets(command_context)[name]
selector = preset.pop("selector")
preset.pop("description", None) # description isn't used by any selectors
At first fzf will automatically select whichever task is under your
cursor, which simplifies the case when you are looking for a single
task. But normally you'll want to select many tasks. To accomplish
you'll generally start by typing a query in the search bar to filter
down the list of tasks (see Extended Search below). Then you'll either:
if not subcommand:
subcommand = selector
elif subcommand != selector:
print(
"error: preset '{}' exists for a different selector "
"(did you mean to run 'mach try {}' instead?)".format(name, selector)
)
A) Move the cursor to each task you want and press <tab> to select it.
Notice it now shows up in the pane to the right.
OR
B) Press <ctrl-a> to select every task that matches your filter.
You can delete your query, type a new one and select further tasks as
many times as you like. Once you are happy with your selection, press
<enter> to push the selected tasks to try.
All selected task labels and their dependencies will be scheduled. This
means you can select a test task and its build will automatically be
filled in.
Keyboard Shortcuts
------------------
When in the fuzzy finder interface, start typing to filter down the
task list. Then use the following keyboard shortcuts to select tasks:
Ctrl-K / Up => Move cursor up
Ctrl-J / Down => Move cursor down
Tab => Select task + move cursor down
Shift-Tab => Select task + move cursor up
Ctrl-A => Select all currently filtered tasks
Ctrl-D => De-select all currently filtered tasks
Ctrl-T => Toggle select all currently filtered tasks
Alt-Bspace => Clear query from input bar
Enter => Accept selection and exit
Ctrl-C / Esc => Cancel selection and exit
? => Toggle preview pane
There are many more shortcuts enabled by default, you can also define
your own shortcuts by setting `--bind` in the $FZF_DEFAULT_OPTS
environment variable. See `man fzf` for more info.
Extended Search
---------------
When typing in search terms, the following modifiers can be applied:
'word: exact match (line must contain the literal string "word")
^word: exact prefix match (line must start with literal "word")
word$: exact suffix match (line must end with literal "word")
!word: exact negation match (line must not contain literal "word")
'a | 'b: OR operator (joins two exact match operators together)
For example:
^start 'exact | !ignore fuzzy end$
Documentation
-------------
For more detailed documentation, please see:
https://firefox-source-docs.mozilla.org/tools/try/selectors/fuzzy.html
"""
self.init(command_context)
if kwargs.pop("interactive"):
kwargs["query"].append("INTERACTIVE")
if kwargs.pop("intersection"):
kwargs["intersect_query"] = kwargs["query"]
del kwargs["query"]
if kwargs.get("save") and not kwargs.get("query"):
# If saving preset without -q/--query, allow user to use the
# interface to build the query.
kwargs_copy = kwargs.copy()
kwargs_copy["push"] = False
kwargs_copy["save"] = None
kwargs["query"] = self.run(command_context, save_query=True, **kwargs_copy)
if not kwargs["query"]:
return
if kwargs.get("paths"):
kwargs["test_paths"] = kwargs["paths"]
return self.run(command_context, **kwargs)
@SubCommand(
"try",
"chooser",
description="Schedule tasks by selecting them from a web " "interface.",
parser=get_parser("chooser"),
)
def try_chooser(self, command_context, **kwargs):
"""Push tasks selected from a web interface to try.
This selector will build the taskgraph and spin up a dynamically
created 'trychooser-like' web-page on the localhost. After a selection
has been made, pressing the 'Push' button will automatically push the
selection to try.
"""
self.init(command_context)
command_context.activate_virtualenv()
path = os.path.join(
"tools", "tryselect", "selectors", "chooser", "requirements.txt"
)
command_context.virtualenv_manager.install_pip_requirements(path, quiet=True)
return self.run(command_context, **kwargs)
@SubCommand(
"try",
"auto",
description="Automatically determine which tasks to run. This runs the same "
"set of tasks that would be run on autoland. This "
"selector is EXPERIMENTAL.",
parser=get_parser("auto"),
)
def try_auto(self, command_context, **kwargs):
self.init(command_context)
return self.run(command_context, **kwargs)
@SubCommand(
"try",
"again",
description="Schedule a previously generated (non try syntax) " "push again.",
parser=get_parser("again"),
)
def try_again(self, command_context, **kwargs):
self.init(command_context)
return self.run(command_context, **kwargs)
@SubCommand(
"try",
"empty",
description="Push to try without scheduling any tasks.",
parser=get_parser("empty"),
)
def try_empty(self, command_context, **kwargs):
"""Push to try, running no builds or tests
This selector does not prompt you to run anything, it just pushes
your patches to try, running no builds or tests by default. After
the push finishes, you can manually add desired jobs to your push
via Treeherder's Add New Jobs feature, located in the per-push
menu.
"""
self.init(command_context)
return self.run(command_context, **kwargs)
@SubCommand(
"try",
"syntax",
description="Select tasks on try using try syntax",
parser=get_parser("syntax"),
)
def try_syntax(self, command_context, **kwargs):
"""Push the current tree to try, with the specified syntax.
Build options, platforms and regression tests may be selected
using the usual try options (-b, -p and -u respectively). In
addition, tests in a given directory may be automatically
selected by passing that directory as a positional argument to the
command. For example:
mach try -b d -p linux64 dom testing/web-platform/tests/dom
would schedule a try run for linux64 debug consisting of all
tests under dom/ and testing/web-platform/tests/dom.
Test selection using positional arguments is available for
mochitests, reftests, xpcshell tests and web-platform-tests.
Tests may be also filtered by passing --tag to the command,
which will run only tests marked as having the specified
tags e.g.
mach try -b d -p win64 --tag media
would run all tests tagged 'media' on Windows 64.
If both positional arguments or tags and -u are supplied, the
suites in -u will be run in full. Where tests are selected by
positional argument they will be run in a single chunk.
If no build option is selected, both debug and opt will be
scheduled. If no platform is selected a default is taken from
the AUTOTRY_PLATFORM_HINT environment variable, if set.
The command requires either its own mercurial extension ("push-to-try",
installable from mach vcs-setup) or a git repo using git-cinnabar
(installable from mach vcs-setup).
"""
self.init(command_context)
try:
if command_context.substs.get("MOZ_ARTIFACT_BUILDS"):
kwargs["local_artifact_build"] = True
except BuildEnvironmentNotFoundException:
# If we don't have a build locally, we can't tell whether
# an artifact build is desired, but we still want the
# command to succeed, if possible.
pass
config_status = os.path.join(command_context.topobjdir, "config.status")
if (kwargs["paths"] or kwargs["tags"]) and not config_status:
print(CONFIG_ENVIRONMENT_NOT_FOUND)
sys.exit(1)
# Order of precedence is defaults -> presets -> cli. Configuration
# from the right overwrites configuration from the left.
defaults = {}
nondefaults = {}
for k, v in kwargs.items():
if v == default(k):
defaults[k] = v
else:
nondefaults[k] = v
return self.run(command_context, **kwargs)
kwargs = merge(defaults, preset, nondefaults)
return kwargs
def handle_try_config(command_context, **kwargs):
from tryselect.util.dicttools import merge
to_validate = []
kwargs.setdefault("try_config", {})
for cls in command_context._mach_context.handler.parser.task_configs.values():
try_config = cls.try_config(**kwargs)
if try_config is not None:
to_validate.append(cls)
kwargs["try_config"] = merge(kwargs["try_config"], try_config)
for name in cls.dests:
del kwargs[name]
# Validate task_configs after they have all been parsed to avoid
# depending on the order they were processed.
for cls in to_validate:
cls.validate(**kwargs)
return kwargs
def run(command_context, **kwargs):
kwargs = handle_presets(command_context, **kwargs)
if command_context._mach_context.handler.parser.task_configs:
kwargs = handle_try_config(command_context, **kwargs)
mod = importlib.import_module(
"tryselect.selectors.{}".format(
command_context._mach_context.handler.subcommand
)
@SubCommand(
"try",
"coverage",
description="Select tasks on try using coverage data",
parser=get_parser("coverage"),
)
return mod.run(**kwargs)
def try_coverage(self, command_context, **kwargs):
"""Select which tasks to use using coverage data."""
self.init(command_context)
return self.run(command_context, **kwargs)
@Command(
"try",
category="ci",
description="Push selected tasks to the try server",
parser=generic_parser,
)
def try_default(command_context, argv=None, **kwargs):
"""Push selected tests to the try server.
The |mach try| command is a frontend for scheduling tasks to
run on try server using selectors. A selector is a subcommand
that provides its own set of command line arguments and are
listed below.
If no subcommand is specified, the `auto` selector is run by
default. Run |mach try auto --help| for more information on
scheduling with the `auto` selector.
"""
init(command_context)
subcommand = command_context._mach_context.handler.subcommand
# We do special handling of presets here so that `./mach try --preset foo`
# works no matter what subcommand 'foo' was saved with.
preset = kwargs["preset"]
if preset:
if preset not in presets(command_context):
command_context._mach_context.handler.parser.error(
"preset '{}' does not exist".format(preset)
)
subcommand = presets(command_context)[preset]["selector"]
sub = subcommand or command_context._mach_context.settings["try"]["default"]
return command_context._mach_context.commands.dispatch(
"try", command_context._mach_context, subcommand=sub, argv=argv, **kwargs
@SubCommand(
"try",
"release",
description="Push the current tree to try, configured for a staging release.",
parser=get_parser("release"),
)
def try_release(self, command_context, **kwargs):
"""Push the current tree to try, configured for a staging release."""
self.init(command_context)
return self.run(command_context, **kwargs)
@SubCommand(
"try",
"fuzzy",
description="Select tasks on try using a fuzzy finder",
parser=get_parser("fuzzy"),
)
def try_fuzzy(command_context, **kwargs):
"""Select which tasks to run with a fuzzy finding interface (fzf).
When entering the fzf interface you'll be confronted by two panes. The
one on the left contains every possible task you can schedule, the one
on the right contains the list of selected tasks. In other words, the
tasks that will be scheduled once you you press <enter>.
At first fzf will automatically select whichever task is under your
cursor, which simplifies the case when you are looking for a single
task. But normally you'll want to select many tasks. To accomplish
you'll generally start by typing a query in the search bar to filter
down the list of tasks (see Extended Search below). Then you'll either:
A) Move the cursor to each task you want and press <tab> to select it.
Notice it now shows up in the pane to the right.
OR
B) Press <ctrl-a> to select every task that matches your filter.
You can delete your query, type a new one and select further tasks as
many times as you like. Once you are happy with your selection, press
<enter> to push the selected tasks to try.
All selected task labels and their dependencies will be scheduled. This
means you can select a test task and its build will automatically be
filled in.
Keyboard Shortcuts
------------------
When in the fuzzy finder interface, start typing to filter down the
task list. Then use the following keyboard shortcuts to select tasks:
Ctrl-K / Up => Move cursor up
Ctrl-J / Down => Move cursor down
Tab => Select task + move cursor down
Shift-Tab => Select task + move cursor up
Ctrl-A => Select all currently filtered tasks
Ctrl-D => De-select all currently filtered tasks
Ctrl-T => Toggle select all currently filtered tasks
Alt-Bspace => Clear query from input bar
Enter => Accept selection and exit
Ctrl-C / Esc => Cancel selection and exit
? => Toggle preview pane
There are many more shortcuts enabled by default, you can also define
your own shortcuts by setting `--bind` in the $FZF_DEFAULT_OPTS
environment variable. See `man fzf` for more info.
Extended Search
---------------
When typing in search terms, the following modifiers can be applied:
'word: exact match (line must contain the literal string "word")
^word: exact prefix match (line must start with literal "word")
word$: exact suffix match (line must end with literal "word")
!word: exact negation match (line must not contain literal "word")
'a | 'b: OR operator (joins two exact match operators together)
For example:
^start 'exact | !ignore fuzzy end$
Documentation
-------------
For more detailed documentation, please see:
https://firefox-source-docs.mozilla.org/tools/try/selectors/fuzzy.html
"""
init(command_context)
if kwargs.pop("interactive"):
kwargs["query"].append("INTERACTIVE")
if kwargs.pop("intersection"):
kwargs["intersect_query"] = kwargs["query"]
del kwargs["query"]
if kwargs.get("save") and not kwargs.get("query"):
# If saving preset without -q/--query, allow user to use the
# interface to build the query.
kwargs_copy = kwargs.copy()
kwargs_copy["push"] = False
kwargs_copy["save"] = None
kwargs["query"] = run(command_context, save_query=True, **kwargs_copy)
if not kwargs["query"]:
return
if kwargs.get("paths"):
kwargs["test_paths"] = kwargs["paths"]
return run(command_context, **kwargs)
@SubCommand(
"try",
"chooser",
description="Schedule tasks by selecting them from a web interface.",
parser=get_parser("chooser"),
)
def try_chooser(command_context, **kwargs):
"""Push tasks selected from a web interface to try.
This selector will build the taskgraph and spin up a dynamically
created 'trychooser-like' web-page on the localhost. After a selection
has been made, pressing the 'Push' button will automatically push the
selection to try.
"""
init(command_context)
command_context.activate_virtualenv()
path = os.path.join(
"tools", "tryselect", "selectors", "chooser", "requirements.txt"
@SubCommand(
"try",
"scriptworker",
description="Run scriptworker tasks against a recent release.",
parser=get_parser("scriptworker"),
)
command_context.virtualenv_manager.install_pip_requirements(path, quiet=True)
def try_scriptworker(self, command_context, **kwargs):
"""Run scriptworker tasks against a recent release.
return run(command_context, **kwargs)
@SubCommand(
"try",
"auto",
description="Automatically determine which tasks to run. This runs the same "
"set of tasks that would be run on autoland. This "
"selector is EXPERIMENTAL.",
parser=get_parser("auto"),
)
def try_auto(command_context, **kwargs):
init(command_context)
return run(command_context, **kwargs)
@SubCommand(
"try",
"again",
description="Schedule a previously generated (non try syntax) push again.",
parser=get_parser("again"),
)
def try_again(command_context, **kwargs):
init(command_context)
return run(command_context, **kwargs)
@SubCommand(
"try",
"empty",
description="Push to try without scheduling any tasks.",
parser=get_parser("empty"),
)
def try_empty(command_context, **kwargs):
"""Push to try, running no builds or tests
This selector does not prompt you to run anything, it just pushes
your patches to try, running no builds or tests by default. After
the push finishes, you can manually add desired jobs to your push
via Treeherder's Add New Jobs feature, located in the per-push
menu.
"""
init(command_context)
return run(command_context, **kwargs)
@SubCommand(
"try",
"syntax",
description="Select tasks on try using try syntax",
parser=get_parser("syntax"),
)
def try_syntax(command_context, **kwargs):
"""Push the current tree to try, with the specified syntax.
Build options, platforms and regression tests may be selected
using the usual try options (-b, -p and -u respectively). In
addition, tests in a given directory may be automatically
selected by passing that directory as a positional argument to the
command. For example:
mach try -b d -p linux64 dom testing/web-platform/tests/dom
would schedule a try run for linux64 debug consisting of all
tests under dom/ and testing/web-platform/tests/dom.
Test selection using positional arguments is available for
mochitests, reftests, xpcshell tests and web-platform-tests.
Tests may be also filtered by passing --tag to the command,
which will run only tests marked as having the specified
tags e.g.
mach try -b d -p win64 --tag media
would run all tests tagged 'media' on Windows 64.
If both positional arguments or tags and -u are supplied, the
suites in -u will be run in full. Where tests are selected by
positional argument they will be run in a single chunk.
If no build option is selected, both debug and opt will be
scheduled. If no platform is selected a default is taken from
the AUTOTRY_PLATFORM_HINT environment variable, if set.
The command requires either its own mercurial extension ("push-to-try",
installable from mach vcs-setup) or a git repo using git-cinnabar
(installable from mach vcs-setup).
"""
init(command_context)
try:
if command_context.substs.get("MOZ_ARTIFACT_BUILDS"):
kwargs["local_artifact_build"] = True
except BuildEnvironmentNotFoundException:
# If we don't have a build locally, we can't tell whether
# an artifact build is desired, but we still want the
# command to succeed, if possible.
pass
config_status = os.path.join(command_context.topobjdir, "config.status")
if (kwargs["paths"] or kwargs["tags"]) and not config_status:
print(CONFIG_ENVIRONMENT_NOT_FOUND)
sys.exit(1)
return run(command_context, **kwargs)
@SubCommand(
"try",
"coverage",
description="Select tasks on try using coverage data",
parser=get_parser("coverage"),
)
def try_coverage(command_context, **kwargs):
"""Select which tasks to use using coverage data."""
init(command_context)
return run(command_context, **kwargs)
@SubCommand(
"try",
"release",
description="Push the current tree to try, configured for a staging release.",
parser=get_parser("release"),
)
def try_release(command_context, **kwargs):
"""Push the current tree to try, configured for a staging release."""
init(command_context)
return run(command_context, **kwargs)
@SubCommand(
"try",
"scriptworker",
description="Run scriptworker tasks against a recent release.",
parser=get_parser("scriptworker"),
)
def try_scriptworker(command_context, **kwargs):
"""Run scriptworker tasks against a recent release.
Requires VPN and shipit access.
"""
init(command_context)
return run(command_context, **kwargs)
Requires VPN and shipit access.
"""
self.init(command_context)
return self.run(command_context, **kwargs)

View File

@ -13,9 +13,12 @@ import logging
from mach.decorators import (
CommandArgument,
CommandProvider,
Command,
)
from mozbuild.base import MachCommandBase
import mozpack.path as mozpath
import json
@ -43,211 +46,221 @@ PR_REPOSITORIES = {
}
@Command(
"import-pr",
category="misc",
description="Import a pull request from Github to the local repo.",
)
@CommandArgument("-b", "--bug-number", help="Bug number to use in the commit messages.")
@CommandArgument(
"-t",
"--bugzilla-token",
help="Bugzilla API token used to file a new bug if no bug number is provided.",
)
@CommandArgument("-r", "--reviewer", help="Reviewer nick to apply to commit messages.")
@CommandArgument(
"pull_request",
help="URL to the pull request to import (e.g. "
"https://github.com/servo/webrender/pull/3665).",
)
def import_pr(
command_context,
pull_request,
bug_number=None,
bugzilla_token=None,
reviewer=None,
):
import requests
pr_number = None
repository = None
for r in PR_REPOSITORIES.values():
if pull_request.startswith(GITHUB_ROOT + r["github"] + "/pull/"):
# sanitize URL, dropping anything after the PR number
pr_number = int(re.search("/pull/([0-9]+)", pull_request).group(1))
pull_request = GITHUB_ROOT + r["github"] + "/pull/" + str(pr_number)
repository = r
break
if repository is None:
command_context.log(
logging.ERROR,
"unrecognized_repo",
{},
"The pull request URL was not recognized; add it to the list of "
"recognized repos in PR_REPOSITORIES in %s" % __file__,
)
sys.exit(1)
command_context.log(
logging.INFO,
"import_pr",
{"pr_url": pull_request},
"Attempting to import {pr_url}",
@CommandProvider
class PullRequestImporter(MachCommandBase):
@Command(
"import-pr",
category="misc",
description="Import a pull request from Github to the local repo.",
)
dirty = [
f
for f in command_context.repository.get_changed_files(mode="all")
if f.startswith(repository["path"])
]
if dirty:
command_context.log(
logging.ERROR,
"dirty_tree",
repository,
"Local {path} tree is dirty; aborting!",
)
sys.exit(1)
target_dir = mozpath.join(
command_context.topsrcdir, os.path.normpath(repository["path"])
@CommandArgument(
"-b", "--bug-number", help="Bug number to use in the commit messages."
)
@CommandArgument(
"-t",
"--bugzilla-token",
help="Bugzilla API token used to file a new bug if no bug number is "
"provided.",
)
@CommandArgument(
"-r", "--reviewer", help="Reviewer nick to apply to commit messages."
)
@CommandArgument(
"pull_request",
help="URL to the pull request to import (e.g. "
"https://github.com/servo/webrender/pull/3665).",
)
def import_pr(
self,
command_context,
pull_request,
bug_number=None,
bugzilla_token=None,
reviewer=None,
):
import requests
if bug_number is None:
if bugzilla_token is None:
command_context.log(
logging.WARNING,
"no_token",
{},
"No bug number or bugzilla API token provided; bug number will not "
"be added to commit messages.",
)
else:
bug_number = _file_bug(
command_context, bugzilla_token, repository, pr_number
)
elif bugzilla_token is not None:
command_context.log(
logging.WARNING,
"too_much_bug",
{},
"Providing a bugzilla token is unnecessary when a bug number is provided. "
"Using bug number; ignoring token.",
)
pr_number = None
repository = None
for r in PR_REPOSITORIES.values():
if pull_request.startswith(GITHUB_ROOT + r["github"] + "/pull/"):
# sanitize URL, dropping anything after the PR number
pr_number = int(re.search("/pull/([0-9]+)", pull_request).group(1))
pull_request = GITHUB_ROOT + r["github"] + "/pull/" + str(pr_number)
repository = r
break
pr_patch = requests.get(pull_request + ".patch")
pr_patch.raise_for_status()
for patch in _split_patches(pr_patch.content, bug_number, pull_request, reviewer):
command_context.log(
logging.INFO,
"commit_msg",
patch,
"Processing commit [{commit_summary}] by [{author}] at [{date}]",
)
patch_cmd = subprocess.Popen(
["patch", "-p1", "-s"], stdin=subprocess.PIPE, cwd=target_dir
)
patch_cmd.stdin.write(patch["diff"].encode("utf-8"))
patch_cmd.stdin.close()
patch_cmd.wait()
if patch_cmd.returncode != 0:
if repository is None:
command_context.log(
logging.ERROR,
"commit_fail",
"unrecognized_repo",
{},
'Error applying diff from commit via "patch -p1 -s". Aborting...',
"The pull request URL was not recognized; add it to the list of "
"recognized repos in PR_REPOSITORIES in %s" % __file__,
)
sys.exit(patch_cmd.returncode)
command_context.repository.commit(
patch["commit_msg"], patch["author"], patch["date"], [target_dir]
sys.exit(1)
command_context.log(
logging.INFO,
"import_pr",
{"pr_url": pull_request},
"Attempting to import {pr_url}",
)
dirty = [
f
for f in command_context.repository.get_changed_files(mode="all")
if f.startswith(repository["path"])
]
if dirty:
command_context.log(
logging.ERROR,
"dirty_tree",
repository,
"Local {path} tree is dirty; aborting!",
)
sys.exit(1)
target_dir = mozpath.join(
command_context.topsrcdir, os.path.normpath(repository["path"])
)
command_context.log(logging.INFO, "commit_pass", {}, "Committed successfully.")
def _file_bug(command_context, token, repo, pr_number):
import requests
bug = requests.post(
"https://bugzilla.mozilla.org/rest/bug?api_key=%s" % token,
json={
"product": repo["bugzilla_product"],
"component": repo["bugzilla_component"],
"summary": "Land %s#%s in mozilla-central" % (repo["github"], pr_number),
"version": "unspecified",
},
)
bug.raise_for_status()
command_context.log(logging.DEBUG, "new_bug", {}, bug.content)
bugnumber = json.loads(bug.content)["id"]
command_context.log(
logging.INFO, "new_bug", {"bugnumber": bugnumber}, "Filed bug {bugnumber}"
)
return bugnumber
def _split_patches(patchfile, bug_number, pull_request, reviewer):
INITIAL = 0
HEADERS = 1
STAT_AND_DIFF = 2
patch = b""
state = INITIAL
for line in patchfile.splitlines():
if state == INITIAL:
if line.startswith(b"From "):
state = HEADERS
elif state == HEADERS:
patch += line + b"\n"
if line == b"---":
state = STAT_AND_DIFF
elif state == STAT_AND_DIFF:
if line.startswith(b"From "):
yield _parse_patch(patch, bug_number, pull_request, reviewer)
patch = b""
state = HEADERS
if bug_number is None:
if bugzilla_token is None:
command_context.log(
logging.WARNING,
"no_token",
{},
"No bug number or bugzilla API token provided; bug number will not "
"be added to commit messages.",
)
else:
bug_number = self._file_bug(
command_context, bugzilla_token, repository, pr_number
)
elif bugzilla_token is not None:
command_context.log(
logging.WARNING,
"too_much_bug",
{},
"Providing a bugzilla token is unnecessary when a bug number is provided. "
"Using bug number; ignoring token.",
)
pr_patch = requests.get(pull_request + ".patch")
pr_patch.raise_for_status()
for patch in self._split_patches(
pr_patch.content, bug_number, pull_request, reviewer
):
command_context.log(
logging.INFO,
"commit_msg",
patch,
"Processing commit [{commit_summary}] by [{author}] at [{date}]",
)
patch_cmd = subprocess.Popen(
["patch", "-p1", "-s"], stdin=subprocess.PIPE, cwd=target_dir
)
patch_cmd.stdin.write(patch["diff"].encode("utf-8"))
patch_cmd.stdin.close()
patch_cmd.wait()
if patch_cmd.returncode != 0:
command_context.log(
logging.ERROR,
"commit_fail",
{},
'Error applying diff from commit via "patch -p1 -s". Aborting...',
)
sys.exit(patch_cmd.returncode)
command_context.repository.commit(
patch["commit_msg"], patch["author"], patch["date"], [target_dir]
)
command_context.log(
logging.INFO, "commit_pass", {}, "Committed successfully."
)
def _file_bug(self, command_context, token, repo, pr_number):
import requests
bug = requests.post(
"https://bugzilla.mozilla.org/rest/bug?api_key=%s" % token,
json={
"product": repo["bugzilla_product"],
"component": repo["bugzilla_component"],
"summary": "Land %s#%s in mozilla-central"
% (repo["github"], pr_number),
"version": "unspecified",
},
)
bug.raise_for_status()
command_context.log(logging.DEBUG, "new_bug", {}, bug.content)
bugnumber = json.loads(bug.content)["id"]
command_context.log(
logging.INFO, "new_bug", {"bugnumber": bugnumber}, "Filed bug {bugnumber}"
)
return bugnumber
def _split_patches(self, patchfile, bug_number, pull_request, reviewer):
INITIAL = 0
HEADERS = 1
STAT_AND_DIFF = 2
patch = b""
state = INITIAL
for line in patchfile.splitlines():
if state == INITIAL:
if line.startswith(b"From "):
state = HEADERS
elif state == HEADERS:
patch += line + b"\n"
if len(patch) > 0:
yield _parse_patch(patch, bug_number, pull_request, reviewer)
return
if line == b"---":
state = STAT_AND_DIFF
elif state == STAT_AND_DIFF:
if line.startswith(b"From "):
yield self._parse_patch(patch, bug_number, pull_request, reviewer)
patch = b""
state = HEADERS
else:
patch += line + b"\n"
if len(patch) > 0:
yield self._parse_patch(patch, bug_number, pull_request, reviewer)
return
def _parse_patch(self, patch, bug_number, pull_request, reviewer):
import email
from email import (
header,
policy,
)
def _parse_patch(patch, bug_number, pull_request, reviewer):
import email
from email import (
header,
policy,
)
parse_policy = policy.compat32.clone(max_line_length=None)
parsed_mail = email.message_from_bytes(patch, policy=parse_policy)
parse_policy = policy.compat32.clone(max_line_length=None)
parsed_mail = email.message_from_bytes(patch, policy=parse_policy)
def header_as_unicode(key):
decoded = header.decode_header(parsed_mail[key])
return str(header.make_header(decoded))
def header_as_unicode(key):
decoded = header.decode_header(parsed_mail[key])
return str(header.make_header(decoded))
author = header_as_unicode("From")
date = header_as_unicode("Date")
commit_summary = header_as_unicode("Subject")
email_body = parsed_mail.get_payload(decode=True).decode("utf-8")
(commit_body, diff) = ("\n" + email_body).rsplit("\n---\n", 1)
author = header_as_unicode("From")
date = header_as_unicode("Date")
commit_summary = header_as_unicode("Subject")
email_body = parsed_mail.get_payload(decode=True).decode("utf-8")
(commit_body, diff) = ("\n" + email_body).rsplit("\n---\n", 1)
bug_prefix = ""
if bug_number is not None:
bug_prefix = "Bug %s - " % bug_number
commit_summary = re.sub(r"^\[PATCH[0-9 /]*\] ", bug_prefix, commit_summary)
if reviewer is not None:
commit_summary += " r=" + reviewer
bug_prefix = ""
if bug_number is not None:
bug_prefix = "Bug %s - " % bug_number
commit_summary = re.sub(r"^\[PATCH[0-9 /]*\] ", bug_prefix, commit_summary)
if reviewer is not None:
commit_summary += " r=" + reviewer
commit_msg = commit_summary + "\n"
if len(commit_body) > 0:
commit_msg += commit_body + "\n"
commit_msg += "\n[import_pr] From " + pull_request + "\n"
commit_msg = commit_summary + "\n"
if len(commit_body) > 0:
commit_msg += commit_body + "\n"
commit_msg += "\n[import_pr] From " + pull_request + "\n"
patch_obj = {
"author": author,
"date": date,
"commit_summary": commit_summary,
"commit_msg": commit_msg,
"diff": diff,
}
return patch_obj
patch_obj = {
"author": author,
"date": date,
"commit_summary": commit_summary,
"commit_msg": commit_msg,
"diff": diff,
}
return patch_obj