diff --git a/taskcluster/taskgraph/target_tasks.py b/taskcluster/taskgraph/target_tasks.py index 4c04b9f8ae2d..4540f188b3fc 100644 --- a/taskcluster/taskgraph/target_tasks.py +++ b/taskcluster/taskgraph/target_tasks.py @@ -210,10 +210,32 @@ def filter_out_missing_signoffs(task, parameters): return True +def filter_tests_without_manifests(task, parameters): + """Remove test tasks that have an empty 'test_manifests' attribute. + + This situation can arise when the test loader (e.g bugbug) decided there + weren't any important manifests to run for the given push. We filter tasks + out here rather than in the transforms so that the full task graph is still + aware that the task exists (which is needed by the backfill action). + """ + if ( + task.kind == "test" + and "test_manifests" in task.attributes + and not task.attributes["test_manifests"] + ): + return False + return True + + def standard_filter(task, parameters): return all( filter_func(task, parameters) for filter_func in - (filter_out_cron, filter_for_project, filter_for_hg_branch) + ( + filter_out_cron, + filter_for_project, + filter_for_hg_branch, + filter_tests_without_manifests, + ) ) diff --git a/taskcluster/taskgraph/test/test_target_tasks.py b/taskcluster/taskgraph/test/test_target_tasks.py index c808a6a8d664..56c13990068a 100644 --- a/taskcluster/taskgraph/test/test_target_tasks.py +++ b/taskcluster/taskgraph/test/test_target_tasks.py @@ -7,12 +7,14 @@ from __future__ import absolute_import, print_function, unicode_literals import contextlib import unittest +import pytest +from mozunit import main + from taskgraph import target_tasks from taskgraph import try_option_syntax from taskgraph.graph import Graph from taskgraph.taskgraph import TaskGraph from taskgraph.task import Task -from mozunit import main class FakeTryOptionSyntax(object): @@ -163,5 +165,45 @@ class TestTargetTasks(unittest.TestCase): self.assertEqual(method(tg, params, {}), ['a']) +# tests for specific filters + +@pytest.mark.parametrize( + "name,task,params,expected", + ( + pytest.param( + "filter_tests_without_manifests", + Task(kind="test", label="a", attributes={}, task={}), + None, + True, + id="filter_tests_without_manifests_not_in_attributes", + ), + pytest.param( + "filter_tests_without_manifests", + Task(kind="test", label="a", attributes={'test_manifests': ['foo']}, task={}), + None, + True, + id="filter_tests_without_manifests_has_test_manifests", + ), + pytest.param( + "filter_tests_without_manifests", + Task(kind="build", label="a", attributes={'test_manifests': None}, task={}), + None, + True, + id="filter_tests_without_manifests_not_a_test", + ), + pytest.param( + "filter_tests_without_manifests", + Task(kind="test", label="a", attributes={'test_manifests': None}, task={}), + None, + False, + id="filter_tests_without_manifests_has_no_test_manifests", + ), + ) +) +def test_filters(name, task, params, expected): + func = getattr(target_tasks, name) + assert func(task, params) is expected + + if __name__ == '__main__': main() diff --git a/taskcluster/taskgraph/transforms/tests.py b/taskcluster/taskgraph/transforms/tests.py index 29687f744f5b..cb7a895328e2 100644 --- a/taskcluster/taskgraph/transforms/tests.py +++ b/taskcluster/taskgraph/transforms/tests.py @@ -1531,11 +1531,6 @@ def set_test_manifests(config, tasks): frozenset(mozinfo.items()), ) - # Skip the task if the loader doesn't return any manifests for the - # associated suite. - if not task['test-manifests']['active'] and not task['test-manifests']['skipped']: - continue - # The default loader loads all manifests. If we use a non-default # loader, we'll only run some subset of manifests and the hardcoded # chunk numbers will no longer be valid. Dynamic chunking should yield @@ -1613,9 +1608,12 @@ def split_chunks(config, tasks): manifests['active'], ) - # Add all skipped manifests to the first chunk so they still show up in the - # logs. They won't impact runtime much. - chunked_manifests[0].extend(manifests['skipped']) + # Add all skipped manifests to the first chunk of backstop pushes + # so they still show up in the logs. They won't impact runtime much + # and this way tools like ActiveData are still aware that they + # exist. + if config.params["backstop"] and manifests["active"]: + chunked_manifests[0].extend(manifests['skipped']) for i in range(task['chunks']): this_chunk = i + 1 @@ -1625,12 +1623,7 @@ def split_chunks(config, tasks): chunked['this-chunk'] = this_chunk if chunked_manifests is not None: - manifests = sorted(chunked_manifests[i]) - if not manifests: - raise Exception( - 'Chunking algorithm yielded no manifests for chunk {} of {} on {}'.format( - this_chunk, task['test-name'], task['test-platform'])) - chunked['test-manifests'] = manifests + chunked['test-manifests'] = sorted(chunked_manifests[i]) group, symbol = split_symbol(chunked['treeherder-symbol']) if task['chunks'] > 1 or not symbol: @@ -1849,10 +1842,12 @@ def make_job_description(config, tasks): 'build_type': attr_build_type, 'test_platform': task['test-platform'], 'test_chunk': str(task['this-chunk']), - 'test_manifests': task.get('test-manifests'), attr_try_name: try_name, }) + if 'test-manifests' in task: + attributes['test_manifests'] = task['test-manifests'] + jobdesc = {} name = '{}-{}'.format(task['test-platform'], task['test-name']) jobdesc['name'] = name