Bug 1472792: give both retrigger actions the same name; r=bstack

This additionally reconsiders the order of all of the actions, spacing them 50
"units" apart and putting the more common actions first.

MozReview-Commit-ID: 98IOYKVMcGU

--HG--
extra : rebase_source : 1273a8b86625bd8e4dc3bddab80c6912241f88c8
extra : histedit_source : 16314284a2b4e0368da843b036e22aaedf485307
This commit is contained in:
Dustin J. Mitchell 2018-07-06 20:28:23 +00:00
parent d5a201ece6
commit 5ab4495828
13 changed files with 139 additions and 151 deletions

View File

@ -79,6 +79,9 @@ the entire task-group (result-set or push in Treeherder terminology). To create
an action that shows up in the context menu for a task we would specify the
``context`` parameter.
The ``order`` value is the sort key defining the order of actions in the
resulting ``actions.json`` file. If multiple actions have the same name and
match the same task, the action with the smallest ``order`` will be used.
Setting the Action Context
..........................

View File

@ -18,7 +18,7 @@ from .util import (create_tasks, fetch_graph_and_labels)
generic=True,
symbol='add-new',
description="Add new jobs using task labels.",
order=10000,
order=100,
context=[],
schema={
'type': 'object',

View File

@ -21,7 +21,7 @@ logger = logging.getLogger(__name__)
generic=True,
symbol='raT',
description="Add all Talos tasks to a push.",
order=100, # Useful for sheriffs, but not top of the list
order=150,
context=[],
schema={
'type': 'object',

View File

@ -31,7 +31,7 @@ logger = logging.getLogger(__name__)
description=('Take the label of the current task, '
'and trigger the task with that label '
'on previous pushes in the same project.'),
order=0,
order=200,
context=[{}], # This will be available for all tasks
schema={
'type': 'object',

View File

@ -19,7 +19,7 @@ from .registry import register_callback_action
description=(
'Cancel the given task'
),
order=100,
order=350,
context=[{}]
)
def cancel_action(parameters, graph_config, input, task_group_id, task_id, task):

View File

@ -47,7 +47,7 @@ def list_group(task_group_id, session):
'Cancel all running and pending tasks created by the decision task '
'this action task is associated with.'
),
order=100,
order=400,
context=[]
)
def cancel_all_action(parameters, graph_config, input, task_group_id, task_id, task):

View File

@ -34,7 +34,7 @@ task. You may need to wait for it to begin running.
description=(
'Create a a copy of the task that you can interact with'
),
order=1,
order=50,
context=[{'worker-implementation': 'docker-worker'}],
schema={
'type': 'object',

View File

@ -1,140 +0,0 @@
# -*- coding: utf-8 -*-
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
from __future__ import absolute_import, print_function, unicode_literals
import json
import logging
from slugid import nice as slugid
from .util import (create_task_from_def, fetch_graph_and_labels)
from .registry import register_callback_action
from taskgraph.util.parameterization import resolve_task_references
TASKCLUSTER_QUEUE_URL = "https://queue.taskcluster.net/v1/task"
logger = logging.getLogger(__name__)
@register_callback_action(
name='retrigger-mochitest-reftest-with-options',
title='Mochitest/Reftest Retrigger',
kind='hook',
generic=True,
symbol='tr',
description="Retriggers the specified mochitest/reftest job with additional options",
context=[{'test-type': 'mochitest'},
{'test-type': 'reftest'}],
order=0,
schema={
'type': 'object',
'properties': {
'path': {
'type': 'string',
'maxLength': 255,
'default': '',
'title': 'Path name',
'description': 'Path of test to retrigger'
},
'logLevel': {
'type': 'string',
'enum': ['debug', 'info', 'warning', 'error', 'critical'],
'default': 'debug',
'title': 'Log level',
'description': 'Log level for output (default is DEBUG, which is highest)'
},
'runUntilFail': {
'type': 'boolean',
'default': True,
'title': 'Run until failure',
'description': ('Runs the specified set of tests repeatedly '
'until failure (or 30 times)')
},
'repeat': {
'type': 'integer',
'default': 30,
'minimum': 1,
'title': 'Run tests N times',
'description': ('Run tests repeatedly (usually used in '
'conjunction with runUntilFail)')
},
'environment': {
'type': 'object',
'default': {'MOZ_LOG': ''},
'title': 'Extra environment variables',
'description': 'Extra environment variables to use for this run',
'additionalProperties': {'type': 'string'}
},
'preferences': {
'type': 'object',
'default': {'mygeckopreferences.pref': 'myvalue2'},
'title': 'Extra gecko (about:config) preferences',
'description': 'Extra gecko (about:config) preferences to use for this run',
'additionalProperties': {'type': 'string'}
}
},
'additionalProperties': False,
'required': ['path']
}
)
def mochitest_retrigger_action(parameters, graph_config, input, task_group_id, task_id, task):
decision_task_id, full_task_graph, label_to_taskid = fetch_graph_and_labels(
parameters, graph_config)
pre_task = full_task_graph.tasks[task['metadata']['name']]
# fix up the task's dependencies, similar to how optimization would
# have done in the decision
dependencies = {name: label_to_taskid[label]
for name, label in pre_task.dependencies.iteritems()}
new_task_definition = resolve_task_references(pre_task.label, pre_task.task, dependencies)
new_task_definition.setdefault('dependencies', []).extend(dependencies.itervalues())
# don't want to run mozharness tests, want a custom mach command instead
new_task_definition['payload']['command'] += ['--no-run-tests']
custom_mach_command = [task['tags']['test-type']]
# mochitests may specify a flavor
if new_task_definition['payload']['env'].get('MOCHITEST_FLAVOR'):
custom_mach_command += [
'--keep-open=false',
'-f',
new_task_definition['payload']['env']['MOCHITEST_FLAVOR']
]
enable_e10s = json.loads(new_task_definition['payload']['env'].get(
'ENABLE_E10S', 'true'))
if not enable_e10s:
custom_mach_command += ['--disable-e10s']
custom_mach_command += ['--log-tbpl=-',
'--log-tbpl-level={}'.format(input.get('logLevel', 'debug'))]
if input.get('runUntilFail'):
custom_mach_command += ['--run-until-failure']
if input.get('repeat'):
custom_mach_command += ['--repeat', str(input.get('repeat', 30))]
# add any custom gecko preferences
for (key, val) in input.get('preferences', {}).iteritems():
custom_mach_command += ['--setpref', '{}={}'.format(key, val)]
custom_mach_command += [input['path']]
new_task_definition['payload']['env']['CUSTOM_MACH_COMMAND'] = ' '.join(
custom_mach_command)
# update environment
new_task_definition['payload']['env'].update(input.get('environment', {}))
# tweak the treeherder symbol
new_task_definition['extra']['treeherder']['symbol'] += '-custom'
logging.info("New task definition: %s", new_task_definition)
# actually create the new task
new_task_id = slugid()
create_task_from_def(new_task_id, new_task_definition, parameters['level'])

View File

@ -24,7 +24,7 @@ logger = logging.getLogger(__name__)
'Purge any caches associated with this task '
'across all workers of the same workertype as the task.'
),
order=100,
order=450,
context=[{'worker-implementation': 'docker-worker'}]
)
def purge_caches_action(parameters, graph_config, input, task_group_id, task_id, task):

View File

@ -56,7 +56,7 @@ def get_flavors(graph_config, param):
title='Release Promotion',
symbol='${input.release_promotion_flavor}',
description="Promote a release.",
order=10000,
order=500,
context=[],
available=is_release_promotion_available,
schema=lambda graph_config: {

View File

@ -32,7 +32,7 @@ RERUN_STATES = ('exception', 'failed')
'This only works on failed or exception tasks in the original taskgraph,'
' and is CoT friendly.'
),
order=1,
order=300,
context=[{}],
schema={
'type': 'object',

View File

@ -6,18 +6,143 @@
from __future__ import absolute_import, print_function, unicode_literals
import json
import logging
from slugid import nice as slugid
from .util import (
combine_task_graph_files,
create_tasks,
create_task_from_def,
fetch_graph_and_labels
)
from ..util.parameterization import resolve_task_references
from .registry import register_callback_action
logger = logging.getLogger(__name__)
@register_callback_action(
name='retrigger',
cb_name='retrigger-mochitest',
title='Retrigger Mochitest/Reftest',
symbol='rt',
kind='hook',
generic=True,
description="Retriggers the specified mochitest/reftest job with additional options",
context=[{'test-type': 'mochitest'},
{'test-type': 'reftest'}],
order=10,
schema={
'type': 'object',
'properties': {
'path': {
'type': 'string',
'maxLength': 255,
'default': '',
'title': 'Path name',
'description': 'Path of test to retrigger'
},
'logLevel': {
'type': 'string',
'enum': ['debug', 'info', 'warning', 'error', 'critical'],
'default': 'debug',
'title': 'Log level',
'description': 'Log level for output (default is DEBUG, which is highest)'
},
'runUntilFail': {
'type': 'boolean',
'default': True,
'title': 'Run until failure',
'description': ('Runs the specified set of tests repeatedly '
'until failure (or 30 times)')
},
'repeat': {
'type': 'integer',
'default': 30,
'minimum': 1,
'title': 'Run tests N times',
'description': ('Run tests repeatedly (usually used in '
'conjunction with runUntilFail)')
},
'environment': {
'type': 'object',
'default': {'MOZ_LOG': ''},
'title': 'Extra environment variables',
'description': 'Extra environment variables to use for this run',
'additionalProperties': {'type': 'string'}
},
'preferences': {
'type': 'object',
'default': {'mygeckopreferences.pref': 'myvalue2'},
'title': 'Extra gecko (about:config) preferences',
'description': 'Extra gecko (about:config) preferences to use for this run',
'additionalProperties': {'type': 'string'}
}
},
'additionalProperties': False,
'required': ['path']
}
)
def mochitest_retrigger_action(parameters, graph_config, input, task_group_id, task_id, task):
decision_task_id, full_task_graph, label_to_taskid = fetch_graph_and_labels(
parameters, graph_config)
pre_task = full_task_graph.tasks[task['metadata']['name']]
# fix up the task's dependencies, similar to how optimization would
# have done in the decision
dependencies = {name: label_to_taskid[label]
for name, label in pre_task.dependencies.iteritems()}
new_task_definition = resolve_task_references(pre_task.label, pre_task.task, dependencies)
new_task_definition.setdefault('dependencies', []).extend(dependencies.itervalues())
# don't want to run mozharness tests, want a custom mach command instead
new_task_definition['payload']['command'] += ['--no-run-tests']
custom_mach_command = [task['tags']['test-type']]
# mochitests may specify a flavor
if new_task_definition['payload']['env'].get('MOCHITEST_FLAVOR'):
custom_mach_command += [
'--keep-open=false',
'-f',
new_task_definition['payload']['env']['MOCHITEST_FLAVOR']
]
enable_e10s = json.loads(new_task_definition['payload']['env'].get(
'ENABLE_E10S', 'true'))
if not enable_e10s:
custom_mach_command += ['--disable-e10s']
custom_mach_command += ['--log-tbpl=-',
'--log-tbpl-level={}'.format(input.get('logLevel', 'debug'))]
if input.get('runUntilFail'):
custom_mach_command += ['--run-until-failure']
if input.get('repeat'):
custom_mach_command += ['--repeat', str(input.get('repeat', 30))]
# add any custom gecko preferences
for (key, val) in input.get('preferences', {}).iteritems():
custom_mach_command += ['--setpref', '{}={}'.format(key, val)]
custom_mach_command += [input['path']]
new_task_definition['payload']['env']['CUSTOM_MACH_COMMAND'] = ' '.join(
custom_mach_command)
# update environment
new_task_definition['payload']['env'].update(input.get('environment', {}))
# tweak the treeherder symbol
new_task_definition['extra']['treeherder']['symbol'] += '-custom'
logging.info("New task definition: %s", new_task_definition)
# actually create the new task
new_task_id = slugid()
create_task_from_def(new_task_id, new_task_definition, parameters['level'])
@register_callback_action(
title='Retrigger',
name='retrigger',
@ -27,7 +152,7 @@ logger = logging.getLogger(__name__)
description=(
'Create a clone of the task.\n\n'
),
order=1,
order=11, # must be greater than other orders in this file, as this is the fallback version
context=[{}],
schema={
'type': 'object',

View File

@ -27,7 +27,7 @@ logger = logging.getLogger(__name__)
"This action is for use on pushes that will be merged into another branch,"
"to check that optimization hasn't hidden any failures."
),
order=100, # Useful for sheriffs, but not top of the list
order=250,
context=[], # Applies to decision task
)
def run_missing_tests(parameters, graph_config, input, task_group_id, task_id, task):