Bug 1400895 - Better try support for test-verify. r=ahal

This commit is contained in:
Joel Maher 2018-05-23 10:00:03 -04:00
parent 8ff40400ce
commit 28e3660b54
6 changed files with 133 additions and 69 deletions

View File

@ -136,7 +136,7 @@ web-platform-tests-wdspec-headless:
test-verify-wpt:
description: "Extra verification of web-platform tests modified on this push"
suite: test-verify-wpt
suite: test-verify
treeherder-symbol: TVw
max-run-time: 10800
run-on-projects:

View File

@ -21,6 +21,7 @@ from .graph import Graph
from . import files_changed
from .taskgraph import TaskGraph
from .util.seta import is_low_value_task
from .util.perfile import perfile_number_of_chunks
from .util.taskcluster import find_task_id
from .util.parameterization import resolve_task_references
from mozbuild.util import memoize
@ -379,3 +380,17 @@ class SkipUnlessSchedules(OptimizationStrategy):
return False
return True
class TestVerify(OptimizationStrategy):
def should_remove_task(self, task, params, _):
# we would like to return 'False, None' while it's high_value_task
# and we wouldn't optimize it. Otherwise, it will return 'True, None'
env = params.get('try_task_config', {}) or {}
env = env.get('templates', {}).get('env', {})
if perfile_number_of_chunks(env.get('MOZHARNESS_TEST_PATHS', ''),
params.get('head_repository', ''),
params.get('head_rev', ''),
task):
return False
return True

View File

@ -23,8 +23,6 @@ from taskgraph.transforms.base import TransformSequence
from taskgraph.util.schema import resolve_keyed_by, OptimizationSchema
from taskgraph.util.treeherder import split_symbol, join_symbol, add_suffix
from taskgraph.util.platforms import platform_family
from taskgraph import files_changed
from mozpack.path import match as mozpackmatch
from taskgraph.util.schema import (
validate_schema,
optionally_keyed_by,
@ -33,6 +31,8 @@ from taskgraph.util.schema import (
from taskgraph.util.taskcluster import get_artifact_path
from mozbuild.schedules import INCLUSIVE_COMPONENTS
from taskgraph.util.perfile import perfile_number_of_chunks
from voluptuous import (
Any,
Optional,
@ -42,7 +42,6 @@ from voluptuous import (
import copy
import logging
import math
# default worker types keyed by instance-size
LINUX_WORKER_TYPES = {
@ -379,7 +378,6 @@ test_description_schema = Schema({
'test-platform',
Any(basestring, None),
),
}, required=True)
@ -791,9 +789,13 @@ def split_chunks(config, tests):
symbol."""
for test in tests:
if test['suite'].startswith('test-verify'):
test['chunks'] = perfile_number_of_chunks(config, test['test-name'])
if test['chunks'] == 0:
continue
env = config.params.get('try_task_config', {}) or {}
env = env.get('templates', {}).get('env', {})
test['chunks'] = perfile_number_of_chunks(env.get('MOZHARNESS_TEST_PATHS', ''),
config.params.get('head_repository', ''),
config.params.get('head_rev', ''),
test['test-name'])
# limit the number of chunks we run for test-verify mode because
# test-verify is comprehensive and takes a lot of time, if we have
# >30 tests changed, this is probably an import of external tests,
@ -802,7 +804,7 @@ def split_chunks(config, tests):
if test['chunks'] > maximum_number_verify_chunks:
test['chunks'] = maximum_number_verify_chunks
if test['chunks'] == 1:
if test['chunks'] <= 1:
test['this-chunk'] = 1
yield test
continue
@ -819,51 +821,6 @@ def split_chunks(config, tests):
yield chunked
def perfile_number_of_chunks(config, type):
# A rough estimate of how many chunks we need based on simple rules
# for determining what a test file is.
# TODO: Make this flexible based on coverage vs verify || test type
tests_per_chunk = 10.0
if type.startswith('test-verify-wpt'):
file_patterns = ['testing/web-platform/tests/**']
elif type.startswith('test-verify-gpu'):
file_patterns = ['**/*webgl*/**/test_*',
'**/dom/canvas/**/test_*',
'**/gfx/tests/**/test_*',
'**/devtools/canvasdebugger/**/browser_*',
'**/reftest*/**']
elif type.startswith('test-verify'):
file_patterns = ['**/test_*',
'**/browser_*',
'**/crashtest*/**',
'js/src/test/test/',
'js/src/test/non262/',
'js/src/test/test262/']
changed_files = files_changed.get_changed_files(config.params.get('head_repository'),
config.params.get('head_rev'))
test_count = 0
for pattern in file_patterns:
for path in changed_files:
if mozpackmatch(path, pattern):
gpu = False
if type == 'test-verify-e10s':
# file_patterns for test-verify will pick up some gpu tests, lets ignore
# in the case of reftest, we will not have any in the regular case
gpu_dirs = ['dom/canvas', 'gfx/tests', 'devtools/canvasdebugger', 'webgl']
for gdir in gpu_dirs:
if len(path.split(gdir)) > 1:
gpu = True
if not gpu:
test_count += 1
chunks = test_count/tests_per_chunk
return int(math.ceil(chunks))
@transforms.add
def allow_software_gl_layers(config, tests):
"""

View File

@ -0,0 +1,81 @@
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
from __future__ import absolute_import, print_function, unicode_literals
import logging
import math
from mozbuild.util import memoize
from mozpack.path import match as mozpackmatch
from mozversioncontrol import get_repository_object, InvalidRepoPath
from taskgraph import files_changed
from .. import GECKO
logger = logging.getLogger(__name__)
@memoize
def perfile_number_of_chunks(try_task_config, head_repository, head_rev, type):
# TODO: Make this flexible based on coverage vs verify || test type
tests_per_chunk = 10.0
if type.startswith('test-verify-wpt'):
file_patterns = ['testing/web-platform/tests/**',
'testing/web-platform/mozilla/tests/**']
elif type.startswith('test-verify-gpu'):
file_patterns = ['**/*webgl*/**/test_*',
'**/dom/canvas/**/test_*',
'**/gfx/tests/**/test_*',
'**/devtools/canvasdebugger/**/browser_*',
'**/reftest*/**']
elif type.startswith('test-verify'):
file_patterns = ['**/test_*',
'**/browser_*',
'**/crashtest*/**',
'js/src/test/test/**',
'js/src/test/non262/**',
'js/src/test/test262/**']
else:
# Returning 0 means no tests to run, this captures non test-verify tasks
return 1
changed_files = set()
specified_files = []
if try_task_config:
specified_files = try_task_config.split(":")
try:
vcs = get_repository_object(GECKO)
changed_files.update(vcs.get_outgoing_files('AM'))
except InvalidRepoPath:
vcs = None
if not changed_files:
changed_files.update(files_changed.get_changed_files(head_repository,
head_rev))
changed_files.update(specified_files)
test_count = 0
for pattern in file_patterns:
for path in changed_files:
# TODO: consider running tests if a manifest changes
if path.endswith('.list') or path.endswith('.ini'):
continue
if mozpackmatch(path, pattern):
gpu = False
if type == 'test-verify-e10s':
# file_patterns for test-verify will pick up some gpu tests, lets ignore
# in the case of reftest, we will not have any in the regular case
gpu_dirs = ['dom/canvas', 'gfx/tests', 'devtools/canvasdebugger', 'webgl']
for gdir in gpu_dirs:
if len(path.split(gdir)) > 1:
gpu = True
if not gpu:
test_count += 1
chunks = test_count/tests_per_chunk
return int(math.ceil(chunks))

View File

@ -63,61 +63,70 @@ TEST_SUITES = {
'aliases': ('a11y', 'ally'),
'mach_command': 'mochitest',
'kwargs': {'flavor': 'a11y', 'test_paths': None},
'task_regex': 'mochitest-a11y(?:-1)?$',
'task_regex': ['mochitest-a11y(?:-1)?$',
'test-verify(?:-e10s)?(?:-1)?$'],
},
'mochitest-browser': {
'aliases': ('bc', 'browser-chrome'),
'mach_command': 'mochitest',
'kwargs': {'flavor': 'browser-chrome', 'test_paths': None},
'task_regex': 'mochitest-browser-chrome(?:-e10s)?(?:-1)?$',
'task_regex': ['mochitest-browser-chrome(?:-e10s)?(?:-1)?$',
'test-verify(?:-e10s)?(?:-1)?$'],
},
'mochitest-chrome': {
'aliases': ('mc',),
'mach_command': 'mochitest',
'kwargs': {'flavor': 'chrome', 'test_paths': None},
'task_regex': 'mochitest-chrome(?:-e10s)?(?:-1)?$',
'task_regex': ['mochitest-chrome(?:-e10s)?(?:-1)?$',
'test-verify(?:-e10s)?(?:-1)?$'],
},
'mochitest-clipboard': {
'aliases': ('cl', 'clipboard',),
'mach_command': 'mochitest',
'kwargs': {'subsuite': 'clipboard', 'test_paths': None},
'task_regex': 'mochitest-clipboard(?:-e10s)?(?:-1)?$',
'task_regex': ['mochitest-clipboard(?:-e10s)?(?:-1)?$',
'test-verify(?:-e10s)?(?:-1)?$'],
},
'mochitest-devtools': {
'aliases': ('dt', 'devtools-chrome'),
'mach_command': 'mochitest',
'kwargs': {'flavor': 'browser-chrome', 'subsuite': 'devtools', 'test_paths': None},
'task_regex': 'mochitest-devtools-chrome(?:-e10s)?(?:-1)?$',
'task_regex': ['mochitest-devtools-chrome(?:-e10s)?(?:-1)?$',
'test-verify(?:-e10s)?(?:-1)?$'],
},
'mochitest-gpu': {
'aliases': ('gpu',),
'mach_command': 'mochitest',
'kwargs': {'subsuite': 'gpu', 'test_paths': None},
'task_regex': 'mochitest-gpu(?:-e10s)?(?:-1)?$',
'task_regex': ['mochitest-gpu(?:-e10s)?(?:-1)?$',
'test-verify(?:-gpu)?(?:-e10s)?(?:-1)?$'],
},
'mochitest-media': {
'aliases': ('mpm', 'plain-media'),
'mach_command': 'mochitest',
'kwargs': {'flavor': 'plain', 'subsuite': 'media', 'test_paths': None},
'task_regex': 'mochitest-media(?:-e10s)?(?:-1)?$',
'task_regex': ['mochitest-media(?:-e10s)?(?:-1)?$',
'test-verify(?:-e10s)?(?:-1)?$'],
},
'mochitest-plain': {
'aliases': ('mp', 'plain',),
'mach_command': 'mochitest',
'kwargs': {'flavor': 'plain', 'test_paths': None},
'task_regex': 'mochitest(?:-e10s)?(?:-1)?$',
'task_regex': ['mochitest(?:-e10s)?(?:-1)?$',
'test-verify(?:-e10s)?(?:-1)?$'],
},
'mochitest-screenshots': {
'aliases': ('ss', 'screenshots-chrome'),
'mach_command': 'mochitest',
'kwargs': {'flavor': 'browser-chrome', 'subsuite': 'screenshots', 'test_paths': None},
'task_regex': 'browser-screenshots(?:-e10s)?(?:-1)?$',
'task_regex': ['browser-screenshots(?:-e10s)?(?:-1)?$'],
},
'mochitest-webgl': {
'aliases': ('webgl',),
'mach_command': 'mochitest',
'kwargs': {'flavor': 'plain', 'subsuite': 'webgl', 'test_paths': None},
'task_regex': 'mochitest-webgl(?:-e10s)?(?:-1)?$',
'task_regex': ['mochitest-webgl(?:-e10s)?(?:-1)?$',
'test-verify(?:-gpu)?(?:-e10s)?(?:-1)?$'],
},
'python': {
'mach_command': 'python-test',
@ -127,18 +136,20 @@ TEST_SUITES = {
'aliases': ('rr',),
'mach_command': 'reftest',
'kwargs': {'tests': None},
'task_regex': '(opt|debug)-reftest(?:-no-accel|-gpu|-stylo)?(?:-e10s)?(?:-1)?$',
'task_regex': ['(opt|debug)-reftest(?:-no-accel|-gpu|-stylo)?(?:-e10s)?(?:-1)?$',
'test-verify-gpu(?:-e10s)?(?:-1)?$'],
},
'robocop': {
'mach_command': 'robocop',
'kwargs': {'test_paths': None},
'task_regex': 'robocop(?:-e10s)?(?:-1)?$',
'task_regex': ['robocop(?:-e10s)?(?:-1)?$'],
},
'web-platform-tests': {
'aliases': ('wpt',),
'mach_command': 'web-platform-tests',
'kwargs': {'include': []},
'task_regex': 'web-platform-tests(?:-reftests|-wdspec)?(?:-e10s)?(?:-1)?$',
'task_regex': ['web-platform-tests(?:-reftests|-wdspec)?(?:-e10s)?(?:-1)?$',
'test-verify-wpt-e10s'],
},
'valgrind': {
'aliases': ('v',),
@ -149,7 +160,7 @@ TEST_SUITES = {
'aliases': ('x',),
'mach_command': 'xpcshell-test',
'kwargs': {'test_file': 'all'},
'task_regex': 'xpcshell(?:-1)?$',
'task_regex': ['xpcshell(?:-1)?$', 'test-verify(?:-1)?$'],
},
}

View File

@ -186,7 +186,7 @@ def filter_by_paths(tasks, paths):
flavor, " and subsuite '{}'".format(subsuite) if subsuite else ""))
continue
task_regexes.add(suite['task_regex'])
task_regexes.update(suite['task_regex'])
def match_task(task):
return any(re.search(pattern, task) for pattern in task_regexes)