mirror of
https://github.com/mozilla/gecko-dev.git
synced 2024-11-24 21:31:04 +00:00
Backed out changeset a470e4f70f80 (bug 1754496) as requested by ahal
This commit is contained in:
parent
0634f5b81c
commit
5c198ed236
@ -264,11 +264,12 @@ def artifact_toolchain(
|
||||
"""Download, cache and install pre-built toolchains."""
|
||||
from mozbuild.artifacts import ArtifactCache
|
||||
from mozbuild.action.tooltool import FileRecord, open_manifest, unpack_file
|
||||
from taskgraph.util.taskcluster import get_artifact_url
|
||||
import redo
|
||||
import requests
|
||||
import time
|
||||
|
||||
from gecko_taskgraph.util.taskcluster import get_artifact_url
|
||||
|
||||
start = time.time()
|
||||
command_context._set_log_level(verbose)
|
||||
# Normally, we'd use command_context.log_manager.enable_unstructured(),
|
||||
|
@ -56,7 +56,11 @@ from contextlib import contextmanager
|
||||
from io import BufferedReader
|
||||
|
||||
import pylru
|
||||
from taskgraph.util.taskcluster import find_task_id, get_artifact_url, list_artifacts
|
||||
from gecko_taskgraph.util.taskcluster import (
|
||||
find_task_id,
|
||||
get_artifact_url,
|
||||
list_artifacts,
|
||||
)
|
||||
|
||||
from mach.util import UserError
|
||||
|
||||
|
@ -9,8 +9,8 @@ import re
|
||||
import sys
|
||||
from functools import partial
|
||||
|
||||
from taskgraph.util.taskcluster import get_task_definition
|
||||
|
||||
from gecko_taskgraph.util.taskcluster import get_task_definition
|
||||
from .registry import register_callback_action
|
||||
from .util import (
|
||||
combine_task_graph_files,
|
||||
|
@ -6,8 +6,7 @@
|
||||
import logging
|
||||
import requests
|
||||
|
||||
from taskgraph.util.taskcluster import cancel_task
|
||||
|
||||
from gecko_taskgraph.util.taskcluster import cancel_task
|
||||
from .registry import register_callback_action
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
@ -6,11 +6,13 @@
|
||||
import concurrent.futures as futures
|
||||
import logging
|
||||
import os
|
||||
|
||||
import requests
|
||||
from taskgraph.util.taskcluster import CONCURRENCY, cancel_task
|
||||
|
||||
from gecko_taskgraph.util.taskcluster import list_task_group_incomplete_task_ids
|
||||
from gecko_taskgraph.util.taskcluster import (
|
||||
list_task_group_incomplete_task_ids,
|
||||
cancel_task,
|
||||
CONCURRENCY,
|
||||
)
|
||||
from .registry import register_callback_action
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
@ -6,12 +6,15 @@
|
||||
import logging
|
||||
import os
|
||||
import re
|
||||
|
||||
import taskcluster_urls
|
||||
from taskgraph.util.taskcluster import get_task_definition, get_root_url, send_email
|
||||
|
||||
from gecko_taskgraph.actions.registry import register_callback_action
|
||||
from gecko_taskgraph.actions.util import create_tasks, fetch_graph_and_labels
|
||||
from .util import create_tasks, fetch_graph_and_labels
|
||||
from gecko_taskgraph.util.taskcluster import (
|
||||
send_email,
|
||||
get_root_url,
|
||||
)
|
||||
from .registry import register_callback_action
|
||||
from gecko_taskgraph.util import taskcluster
|
||||
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
@ -110,7 +113,7 @@ def create_interactive_action(parameters, graph_config, input, task_group_id, ta
|
||||
decision_task_id, full_task_graph, label_to_taskid = fetch_graph_and_labels(
|
||||
parameters, graph_config
|
||||
)
|
||||
task = get_task_definition(task_id)
|
||||
task = taskcluster.get_task_definition(task_id)
|
||||
label = task["metadata"]["name"]
|
||||
|
||||
def edit(task):
|
||||
|
@ -7,12 +7,13 @@ import logging
|
||||
|
||||
import requests
|
||||
from requests.exceptions import HTTPError
|
||||
from taskgraph.util.taskcluster import get_artifact_from_index, get_task_definition
|
||||
|
||||
from .registry import register_callback_action
|
||||
from .util import create_tasks, combine_task_graph_files
|
||||
from gecko_taskgraph.util.taskcluster import get_artifact_from_index
|
||||
from gecko_taskgraph.util.taskgraph import find_decision_task
|
||||
from gecko_taskgraph.taskgraph import TaskGraph
|
||||
from gecko_taskgraph.util import taskcluster
|
||||
|
||||
PUSHLOG_TMPL = "{}/json-pushes?version=2&startID={}&endID={}"
|
||||
INDEX_TMPL = "gecko.v2.{}.pushlog-id.{}.decision"
|
||||
@ -36,7 +37,7 @@ logger = logging.getLogger(__name__)
|
||||
available=lambda parameters: True,
|
||||
)
|
||||
def geckoprofile_action(parameters, graph_config, input, task_group_id, task_id):
|
||||
task = get_task_definition(task_id)
|
||||
task = taskcluster.get_task_definition(task_id)
|
||||
label = task["metadata"]["name"]
|
||||
pushes = []
|
||||
depth = 2
|
||||
|
@ -9,8 +9,11 @@ import logging
|
||||
import os
|
||||
import re
|
||||
|
||||
from taskgraph.util.taskcluster import get_artifact, get_task_definition, list_artifacts
|
||||
|
||||
from gecko_taskgraph.util.taskcluster import (
|
||||
list_artifacts,
|
||||
get_artifact,
|
||||
get_task_definition,
|
||||
)
|
||||
from ..util.parameterization import resolve_task_references
|
||||
from .registry import register_callback_action
|
||||
from .util import create_task_from_def, fetch_graph_and_labels, add_args_to_command
|
||||
|
@ -5,9 +5,9 @@
|
||||
|
||||
import logging
|
||||
|
||||
from taskgraph.util.taskcluster import get_task_definition, purge_cache
|
||||
|
||||
from gecko_taskgraph.util.taskcluster import purge_cache
|
||||
from .registry import register_callback_action
|
||||
from gecko_taskgraph.util import taskcluster
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
@ -24,7 +24,7 @@ logger = logging.getLogger(__name__)
|
||||
context=[{"worker-implementation": "docker-worker"}],
|
||||
)
|
||||
def purge_caches_action(parameters, graph_config, input, task_group_id, task_id):
|
||||
task = get_task_definition(task_id)
|
||||
task = taskcluster.get_task_definition(task_id)
|
||||
if task["payload"].get("cache"):
|
||||
for cache in task["payload"]["cache"]:
|
||||
purge_cache(
|
||||
|
@ -10,11 +10,11 @@ from collections import namedtuple
|
||||
|
||||
from mozbuild.util import memoize
|
||||
from taskgraph.parameters import Parameters
|
||||
from taskgraph.util import taskcluster, yaml
|
||||
from taskgraph.util import yaml
|
||||
|
||||
from gecko_taskgraph import create
|
||||
from gecko_taskgraph.config import load_graph_config
|
||||
from gecko_taskgraph.util import hash
|
||||
from gecko_taskgraph.util import taskcluster, hash
|
||||
from gecko_taskgraph.util.python_path import import_sibling_modules
|
||||
|
||||
|
||||
|
@ -7,9 +7,9 @@ import json
|
||||
import os
|
||||
|
||||
from taskgraph.parameters import Parameters
|
||||
from taskgraph.util.taskcluster import get_artifact
|
||||
|
||||
from gecko_taskgraph.actions.registry import register_callback_action
|
||||
from gecko_taskgraph.util.taskcluster import get_artifact
|
||||
from gecko_taskgraph.util.taskgraph import (
|
||||
find_decision_task,
|
||||
find_existing_tasks_from_previous_kinds,
|
||||
|
@ -3,11 +3,10 @@
|
||||
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
|
||||
|
||||
import logging
|
||||
import textwrap
|
||||
import sys
|
||||
|
||||
from taskgraph.util.taskcluster import get_task_definition, rerun_task
|
||||
import logging
|
||||
import textwrap
|
||||
|
||||
from .util import (
|
||||
combine_task_graph_files,
|
||||
@ -20,7 +19,7 @@ from .util import (
|
||||
rename_browsertime_vismet_task,
|
||||
)
|
||||
from .registry import register_callback_action
|
||||
from gecko_taskgraph.util.taskcluster import state_task
|
||||
from gecko_taskgraph.util import taskcluster
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
@ -68,7 +67,7 @@ def retrigger_decision_action(parameters, graph_config, input, task_group_id, ta
|
||||
|
||||
# make all of the timestamps relative; they will then be turned back into
|
||||
# absolute timestamps relative to the current time.
|
||||
task = get_task_definition(task_id)
|
||||
task = taskcluster.get_task_definition(task_id)
|
||||
task = relativize_datestamps(task)
|
||||
create_task_from_def(
|
||||
task, parameters["level"], action_tag="retrigger-decision-task"
|
||||
@ -150,7 +149,7 @@ def retrigger_action(parameters, graph_config, input, task_group_id, task_id):
|
||||
parameters, graph_config
|
||||
)
|
||||
|
||||
task = get_task_definition(task_id)
|
||||
task = taskcluster.get_task_definition(task_id)
|
||||
label = task["metadata"]["name"]
|
||||
|
||||
is_browsertime = "browsertime" in label
|
||||
@ -207,7 +206,7 @@ def retrigger_action(parameters, graph_config, input, task_group_id, task_id):
|
||||
schema={"type": "object", "properties": {}},
|
||||
)
|
||||
def rerun_action(parameters, graph_config, input, task_group_id, task_id):
|
||||
task = get_task_definition(task_id)
|
||||
task = taskcluster.get_task_definition(task_id)
|
||||
parameters = dict(parameters)
|
||||
decision_task_id, full_task_graph, label_to_taskid = fetch_graph_and_labels(
|
||||
parameters, graph_config
|
||||
@ -224,7 +223,7 @@ def rerun_action(parameters, graph_config, input, task_group_id, task_id):
|
||||
|
||||
|
||||
def _rerun_task(task_id, label):
|
||||
state = state_task(task_id)
|
||||
state = taskcluster.state_task(task_id)
|
||||
if state not in RERUN_STATES:
|
||||
logger.warning(
|
||||
"No need to rerun {}: state '{}' not in {}!".format(
|
||||
@ -232,7 +231,7 @@ def _rerun_task(task_id, label):
|
||||
)
|
||||
)
|
||||
return
|
||||
rerun_task(task_id)
|
||||
taskcluster.rerun_task(task_id)
|
||||
logger.info(f"Reran {label}")
|
||||
|
||||
|
||||
|
@ -6,8 +6,8 @@
|
||||
import json
|
||||
import logging
|
||||
|
||||
from taskgraph.util.taskcluster import get_task_definition
|
||||
|
||||
from ..util import taskcluster
|
||||
from ..util.parameterization import resolve_task_references
|
||||
from .registry import register_callback_action
|
||||
from .util import create_task_from_def, fetch_graph_and_labels
|
||||
@ -118,7 +118,7 @@ def basic_custom_retrigger_action_basic(
|
||||
|
||||
|
||||
def handle_custom_retrigger(parameters, graph_config, input, task_group_id, task_id):
|
||||
task = get_task_definition(task_id)
|
||||
task = taskcluster.get_task_definition(task_id)
|
||||
decision_task_id, full_task_graph, label_to_taskid = fetch_graph_and_labels(
|
||||
parameters, graph_config
|
||||
)
|
||||
|
@ -5,10 +5,9 @@
|
||||
|
||||
import logging
|
||||
|
||||
from taskgraph.util.taskcluster import get_artifact
|
||||
|
||||
from .registry import register_callback_action
|
||||
from .util import create_tasks, fetch_graph_and_labels
|
||||
from gecko_taskgraph.util.taskcluster import get_artifact
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
@ -14,21 +14,21 @@ import jsone
|
||||
import requests
|
||||
from requests.exceptions import HTTPError
|
||||
from slugid import nice as slugid
|
||||
from taskgraph.util.taskcluster import (
|
||||
find_task_id,
|
||||
get_artifact,
|
||||
get_session,
|
||||
get_task_definition,
|
||||
list_tasks,
|
||||
parse_time,
|
||||
CONCURRENCY,
|
||||
)
|
||||
|
||||
from gecko_taskgraph import create
|
||||
from gecko_taskgraph.decision import read_artifact, write_artifact, rename_artifact
|
||||
from gecko_taskgraph.taskgraph import TaskGraph
|
||||
from gecko_taskgraph.optimize import optimize_task_graph
|
||||
from gecko_taskgraph.util.taskcluster import trigger_hook
|
||||
from gecko_taskgraph.util.taskcluster import (
|
||||
find_task_id,
|
||||
get_artifact,
|
||||
get_task_definition,
|
||||
get_session,
|
||||
list_tasks,
|
||||
parse_time,
|
||||
trigger_hook,
|
||||
CONCURRENCY,
|
||||
)
|
||||
from gecko_taskgraph.util.taskgraph import find_decision_task
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
@ -10,10 +10,9 @@ import logging
|
||||
|
||||
|
||||
from slugid import nice as slugid
|
||||
from taskgraph.util.taskcluster import get_session, CONCURRENCY
|
||||
|
||||
from gecko_taskgraph.util.parameterization import resolve_timestamps
|
||||
from gecko_taskgraph.util.time import current_json_time
|
||||
from gecko_taskgraph.util.taskcluster import get_session, CONCURRENCY
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
@ -13,7 +13,6 @@ from collections import defaultdict
|
||||
import yaml
|
||||
from redo import retry
|
||||
from taskgraph.parameters import Parameters
|
||||
from taskgraph.util.taskcluster import get_artifact
|
||||
from taskgraph.util.yaml import load_yaml
|
||||
from voluptuous import Required, Optional, Any
|
||||
|
||||
@ -31,7 +30,7 @@ from .util.hg import get_hg_revision_branch, get_hg_commit_message
|
||||
from .util.partials import populate_release_history
|
||||
from .util.python_path import find_object
|
||||
from .util.schema import validate_schema, Schema
|
||||
from .util.taskcluster import insert_index
|
||||
from .util.taskcluster import get_artifact, insert_index
|
||||
from .util.taskgraph import find_decision_task, find_existing_tasks_from_previous_kinds
|
||||
|
||||
|
||||
|
@ -9,11 +9,14 @@ import tarfile
|
||||
from io import BytesIO
|
||||
|
||||
from taskgraph.parameters import Parameters
|
||||
from taskgraph.util.taskcluster import get_session, get_artifact_url
|
||||
|
||||
from gecko_taskgraph.generator import load_tasks_for_kind
|
||||
from gecko_taskgraph.optimize.strategies import IndexSearch
|
||||
from gecko_taskgraph.util import docker
|
||||
from gecko_taskgraph.util.taskcluster import (
|
||||
get_artifact_url,
|
||||
get_session,
|
||||
)
|
||||
from . import GECKO
|
||||
|
||||
|
||||
|
@ -9,11 +9,10 @@ from datetime import datetime
|
||||
import mozpack.path as mozpath
|
||||
from mozbuild.base import MozbuildObject
|
||||
from mozbuild.util import memoize
|
||||
from taskgraph.util.taskcluster import find_task_id
|
||||
|
||||
from gecko_taskgraph import files_changed
|
||||
from gecko_taskgraph.optimize import register_strategy, OptimizationStrategy
|
||||
from gecko_taskgraph.util.taskcluster import status_task
|
||||
from gecko_taskgraph.util.taskcluster import find_task_id, status_task
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
@ -9,7 +9,6 @@ import re
|
||||
|
||||
from redo import retry
|
||||
from taskgraph.parameters import Parameters
|
||||
from taskgraph.util.taskcluster import find_task_id
|
||||
|
||||
from gecko_taskgraph import try_option_syntax
|
||||
from gecko_taskgraph.util.attributes import (
|
||||
@ -17,6 +16,7 @@ from gecko_taskgraph.util.attributes import (
|
||||
match_run_on_hg_branches,
|
||||
)
|
||||
from gecko_taskgraph.util.platforms import platform_family
|
||||
from gecko_taskgraph.util.taskcluster import find_task_id
|
||||
|
||||
_target_task_methods = {}
|
||||
|
||||
|
@ -28,6 +28,7 @@ subsuite = taskgraph
|
||||
[test_util_python_path.py]
|
||||
[test_util_runnable_jobs.py]
|
||||
[test_util_schema.py]
|
||||
[test_util_taskcluster.py]
|
||||
[test_util_templates.py]
|
||||
[test_util_time.py]
|
||||
[test_util_treeherder.py]
|
||||
|
@ -6,11 +6,10 @@
|
||||
import unittest
|
||||
import json
|
||||
from pprint import pprint
|
||||
from unittest.mock import patch
|
||||
|
||||
import pytest
|
||||
from unittest.mock import patch
|
||||
from mozunit import main, MockedOpen
|
||||
from taskgraph.util import taskcluster
|
||||
|
||||
from gecko_taskgraph import actions, create
|
||||
from gecko_taskgraph.decision import read_artifact
|
||||
@ -18,6 +17,7 @@ from gecko_taskgraph.actions.util import (
|
||||
combine_task_graph_files,
|
||||
relativize_datestamps,
|
||||
)
|
||||
from gecko_taskgraph.util import taskcluster
|
||||
|
||||
TASK_DEF = {
|
||||
"created": "2017-10-10T18:33:03.460Z",
|
||||
|
@ -9,7 +9,6 @@ from time import mktime
|
||||
|
||||
import pytest
|
||||
from mozunit import main
|
||||
from taskgraph.util.taskcluster import get_artifact_url, get_index_url, get_task_url
|
||||
|
||||
from gecko_taskgraph.util.backstop import (
|
||||
is_backstop,
|
||||
@ -17,6 +16,11 @@ from gecko_taskgraph.util.backstop import (
|
||||
BACKSTOP_PUSH_INTERVAL,
|
||||
BACKSTOP_TIME_INTERVAL,
|
||||
)
|
||||
from gecko_taskgraph.util.taskcluster import (
|
||||
get_artifact_url,
|
||||
get_index_url,
|
||||
get_task_url,
|
||||
)
|
||||
|
||||
LAST_BACKSTOP_ID = 0
|
||||
LAST_BACKSTOP_PUSHDATE = mktime(datetime.now().timetuple())
|
||||
|
20
taskcluster/gecko_taskgraph/test/test_util_taskcluster.py
Normal file
20
taskcluster/gecko_taskgraph/test/test_util_taskcluster.py
Normal file
@ -0,0 +1,20 @@
|
||||
# This Source Code Form is subject to the terms of the Mozilla Public
|
||||
# License, v. 2.0. If a copy of the MPL was not distributed with this
|
||||
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
|
||||
|
||||
import datetime
|
||||
import unittest
|
||||
|
||||
import mozunit
|
||||
from gecko_taskgraph.util.taskcluster import parse_time
|
||||
|
||||
|
||||
class TestTCUtils(unittest.TestCase):
|
||||
def test_parse_time(self):
|
||||
exp = datetime.datetime(2018, 10, 10, 18, 33, 3, 463000)
|
||||
assert parse_time("2018-10-10T18:33:03.463Z") == exp
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
mozunit.main()
|
@ -9,14 +9,14 @@ Support for running spidermonkey jobs via dedicated scripts
|
||||
import os
|
||||
import re
|
||||
|
||||
from taskgraph.util.taskcluster import get_root_url
|
||||
from gecko_taskgraph.util.schema import Schema
|
||||
from voluptuous import Any, Optional, Required
|
||||
|
||||
from gecko_taskgraph.transforms.job import run_job_using
|
||||
from gecko_taskgraph.transforms.job.common import add_artifacts
|
||||
|
||||
from gecko_taskgraph.util.hash import hash_path
|
||||
from gecko_taskgraph.util.schema import Schema
|
||||
from gecko_taskgraph.util.taskcluster import get_root_url
|
||||
from gecko_taskgraph import GECKO
|
||||
import gecko_taskgraph
|
||||
|
||||
|
@ -7,9 +7,9 @@ import json
|
||||
import os
|
||||
import re
|
||||
|
||||
from taskgraph.util.taskcluster import get_artifact_url
|
||||
from voluptuous import Extra, Optional, Required
|
||||
|
||||
from gecko_taskgraph.util.taskcluster import get_artifact_url
|
||||
from gecko_taskgraph.transforms.job import (
|
||||
configure_taskdesc_for_run,
|
||||
run_job_using,
|
||||
|
@ -5,10 +5,10 @@
|
||||
Transform the mac notarization poller task into an actual task description.
|
||||
"""
|
||||
|
||||
from taskgraph.util.taskcluster import get_artifact_url
|
||||
|
||||
from gecko_taskgraph.transforms.base import TransformSequence
|
||||
from gecko_taskgraph.util.attributes import copy_attributes_from_dependent_job
|
||||
from gecko_taskgraph.util.taskcluster import get_artifact_url
|
||||
from gecko_taskgraph.util.treeherder import add_suffix, join_symbol
|
||||
|
||||
|
||||
|
@ -8,7 +8,6 @@ import re
|
||||
|
||||
from mozbuild.schedules import INCLUSIVE_COMPONENTS
|
||||
from mozbuild.util import ReadOnlyDict
|
||||
from taskgraph.util.taskcluster import get_index_url
|
||||
from voluptuous import (
|
||||
Any,
|
||||
Optional,
|
||||
@ -19,7 +18,10 @@ from gecko_taskgraph.transforms.base import TransformSequence
|
||||
from gecko_taskgraph.transforms.test.variant import TEST_VARIANTS
|
||||
from gecko_taskgraph.util.attributes import keymatch
|
||||
from gecko_taskgraph.util.keyed_by import evaluate_keyed_by
|
||||
from gecko_taskgraph.util.taskcluster import get_artifact_path
|
||||
from gecko_taskgraph.util.taskcluster import (
|
||||
get_artifact_path,
|
||||
get_index_url,
|
||||
)
|
||||
from gecko_taskgraph.util.platforms import platform_family
|
||||
from gecko_taskgraph.util.schema import (
|
||||
resolve_keyed_by,
|
||||
|
@ -5,10 +5,12 @@
|
||||
|
||||
from requests import HTTPError
|
||||
|
||||
from taskgraph.util.taskcluster import find_task_id, get_artifact
|
||||
|
||||
from gecko_taskgraph.util.attributes import INTEGRATION_PROJECTS, TRY_PROJECTS
|
||||
from gecko_taskgraph.util.taskcluster import state_task
|
||||
from gecko_taskgraph.util.taskcluster import (
|
||||
find_task_id,
|
||||
get_artifact,
|
||||
state_task,
|
||||
)
|
||||
|
||||
|
||||
BACKSTOP_PUSH_INTERVAL = 20
|
||||
|
@ -10,8 +10,8 @@ import time
|
||||
|
||||
import requests
|
||||
from mozbuild.util import memoize
|
||||
from taskgraph.util.taskcluster import requests_retry_session
|
||||
|
||||
from gecko_taskgraph.util.taskcluster import requests_retry_session
|
||||
from gecko_taskgraph import create
|
||||
|
||||
try:
|
||||
|
@ -5,9 +5,9 @@
|
||||
|
||||
import re
|
||||
|
||||
from taskgraph.util.taskcluster import get_artifact_url
|
||||
|
||||
from gecko_taskgraph.util.time import json_time_from_now
|
||||
from gecko_taskgraph.util.taskcluster import get_artifact_url
|
||||
|
||||
TASK_REFERENCE_PATTERN = re.compile("<([^>]+)>")
|
||||
ARTIFACT_REFERENCE_PATTERN = re.compile("<([^/]+)/([^>]+)>")
|
||||
|
@ -4,24 +4,165 @@
|
||||
|
||||
|
||||
import os
|
||||
import datetime
|
||||
import functools
|
||||
import requests
|
||||
import logging
|
||||
|
||||
import taskcluster_urls as liburls
|
||||
from mozbuild.util import memoize
|
||||
from requests.packages.urllib3.util.retry import Retry
|
||||
from taskcluster import Hooks
|
||||
from taskgraph.util import taskcluster as tc_util
|
||||
from taskgraph.util.taskcluster import (
|
||||
_do_request,
|
||||
get_index_url,
|
||||
get_root_url,
|
||||
get_task_definition,
|
||||
get_task_url,
|
||||
)
|
||||
from taskgraph.util import yaml
|
||||
|
||||
from gecko_taskgraph.task import Task
|
||||
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
# this is set to true for `mach taskgraph action-callback --test`
|
||||
testing = False
|
||||
|
||||
# Default rootUrl to use if none is given in the environment; this should point
|
||||
# to the production Taskcluster deployment used for CI.
|
||||
PRODUCTION_TASKCLUSTER_ROOT_URL = "https://firefox-ci-tc.services.mozilla.com"
|
||||
|
||||
# the maximum number of parallel Taskcluster API calls to make
|
||||
CONCURRENCY = 50
|
||||
|
||||
|
||||
@memoize
|
||||
def get_root_url(use_proxy):
|
||||
"""Get the current TASKCLUSTER_ROOT_URL. When running in a task, this must
|
||||
come from $TASKCLUSTER_ROOT_URL; when run on the command line, we apply a
|
||||
defualt that points to the production deployment of Taskcluster. If use_proxy
|
||||
is set, this attempts to get TASKCLUSTER_PROXY_URL instead, failing if it
|
||||
is not set."""
|
||||
if use_proxy:
|
||||
try:
|
||||
return liburls.normalize_root_url(os.environ["TASKCLUSTER_PROXY_URL"])
|
||||
except KeyError:
|
||||
if "TASK_ID" not in os.environ:
|
||||
raise RuntimeError(
|
||||
"taskcluster-proxy is not available when not executing in a task"
|
||||
)
|
||||
else:
|
||||
raise RuntimeError("taskcluster-proxy is not enabled for this task")
|
||||
|
||||
if "TASKCLUSTER_ROOT_URL" not in os.environ:
|
||||
if "TASK_ID" in os.environ:
|
||||
raise RuntimeError(
|
||||
"$TASKCLUSTER_ROOT_URL must be set when running in a task"
|
||||
)
|
||||
else:
|
||||
logger.debug("Using default TASKCLUSTER_ROOT_URL (Firefox CI production)")
|
||||
return liburls.normalize_root_url(PRODUCTION_TASKCLUSTER_ROOT_URL)
|
||||
logger.debug(
|
||||
"Running in Taskcluster instance {}{}".format(
|
||||
os.environ["TASKCLUSTER_ROOT_URL"],
|
||||
" with taskcluster-proxy" if "TASKCLUSTER_PROXY_URL" in os.environ else "",
|
||||
)
|
||||
)
|
||||
return liburls.normalize_root_url(os.environ["TASKCLUSTER_ROOT_URL"])
|
||||
|
||||
|
||||
def requests_retry_session(
|
||||
retries,
|
||||
backoff_factor=0.1,
|
||||
status_forcelist=(500, 502, 504),
|
||||
concurrency=CONCURRENCY,
|
||||
session=None,
|
||||
):
|
||||
session = session or requests.Session()
|
||||
retry = Retry(
|
||||
total=retries,
|
||||
read=retries,
|
||||
connect=retries,
|
||||
backoff_factor=backoff_factor,
|
||||
status_forcelist=status_forcelist,
|
||||
)
|
||||
|
||||
# Default HTTPAdapter uses 10 connections. Mount custom adapter to increase
|
||||
# that limit. Connections are established as needed, so using a large value
|
||||
# should not negatively impact performance.
|
||||
http_adapter = requests.adapters.HTTPAdapter(
|
||||
pool_connections=concurrency,
|
||||
pool_maxsize=concurrency,
|
||||
max_retries=retry,
|
||||
)
|
||||
session.mount("http://", http_adapter)
|
||||
session.mount("https://", http_adapter)
|
||||
|
||||
return session
|
||||
|
||||
|
||||
@memoize
|
||||
def get_session():
|
||||
return requests_retry_session(retries=5)
|
||||
|
||||
|
||||
def _do_request(url, method=None, **kwargs):
|
||||
if method is None:
|
||||
method = "post" if kwargs else "get"
|
||||
|
||||
session = get_session()
|
||||
if method == "get":
|
||||
kwargs["stream"] = True
|
||||
response = getattr(session, method)(url, **kwargs)
|
||||
|
||||
if response.status_code >= 400:
|
||||
# Consume content before raise_for_status, so that the connection can be
|
||||
# reused.
|
||||
response.content
|
||||
response.raise_for_status()
|
||||
return response
|
||||
|
||||
|
||||
def _handle_artifact(path, response):
|
||||
if path.endswith(".json"):
|
||||
return response.json()
|
||||
if path.endswith(".yml"):
|
||||
return yaml.load_stream(response.text)
|
||||
response.raw.read = functools.partial(response.raw.read, decode_content=True)
|
||||
return response.raw
|
||||
|
||||
|
||||
def get_artifact_url(task_id, path, use_proxy=False):
|
||||
artifact_tmpl = liburls.api(
|
||||
get_root_url(False), "queue", "v1", "task/{}/artifacts/{}"
|
||||
)
|
||||
data = artifact_tmpl.format(task_id, path)
|
||||
if use_proxy:
|
||||
# Until Bug 1405889 is deployed, we can't download directly
|
||||
# from the taskcluster-proxy. Work around by using the /bewit
|
||||
# endpoint instead.
|
||||
# The bewit URL is the body of a 303 redirect, which we don't
|
||||
# want to follow (which fetches a potentially large resource).
|
||||
response = _do_request(
|
||||
os.environ["TASKCLUSTER_PROXY_URL"] + "/bewit",
|
||||
data=data,
|
||||
allow_redirects=False,
|
||||
)
|
||||
return response.text
|
||||
return data
|
||||
|
||||
|
||||
def get_artifact(task_id, path, use_proxy=False):
|
||||
"""
|
||||
Returns the artifact with the given path for the given task id.
|
||||
|
||||
If the path ends with ".json" or ".yml", the content is deserialized as,
|
||||
respectively, json or yaml, and the corresponding python data (usually
|
||||
dict) is returned.
|
||||
For other types of content, a file-like object is returned.
|
||||
"""
|
||||
response = _do_request(get_artifact_url(task_id, path, use_proxy))
|
||||
return _handle_artifact(path, response)
|
||||
|
||||
|
||||
def list_artifacts(task_id, use_proxy=False):
|
||||
response = _do_request(get_artifact_url(task_id, "", use_proxy).rstrip("/"))
|
||||
return response.json()["artifacts"]
|
||||
|
||||
|
||||
def get_artifact_prefix(task):
|
||||
prefix = None
|
||||
@ -38,6 +179,53 @@ def get_artifact_path(task, path):
|
||||
return f"{get_artifact_prefix(task)}/{path}"
|
||||
|
||||
|
||||
def get_index_url(index_path, use_proxy=False, multiple=False):
|
||||
index_tmpl = liburls.api(get_root_url(use_proxy), "index", "v1", "task{}/{}")
|
||||
return index_tmpl.format("s" if multiple else "", index_path)
|
||||
|
||||
|
||||
def find_task_id(index_path):
|
||||
try:
|
||||
response = _do_request(get_index_url(index_path))
|
||||
except requests.exceptions.HTTPError as e:
|
||||
if e.response.status_code == 404:
|
||||
raise KeyError(f"index path {index_path} not found")
|
||||
raise
|
||||
return response.json()["taskId"]
|
||||
|
||||
|
||||
def get_artifact_from_index(index_path, artifact_path, use_proxy=False):
|
||||
full_path = index_path + "/artifacts/" + artifact_path
|
||||
response = _do_request(get_index_url(full_path, use_proxy))
|
||||
return _handle_artifact(full_path, response)
|
||||
|
||||
|
||||
def list_tasks(index_path, use_proxy=False):
|
||||
"""
|
||||
Returns a list of task_ids where each task_id is indexed under a path
|
||||
in the index. Results are sorted by expiration date from oldest to newest.
|
||||
"""
|
||||
results = []
|
||||
data = {}
|
||||
while True:
|
||||
response = _do_request(
|
||||
get_index_url(index_path, use_proxy, multiple=True), json=data
|
||||
)
|
||||
response = response.json()
|
||||
results += response["tasks"]
|
||||
if response.get("continuationToken"):
|
||||
data = {"continuationToken": response.get("continuationToken")}
|
||||
else:
|
||||
break
|
||||
|
||||
# We can sort on expires because in the general case
|
||||
# all of these tasks should be created with the same expires time so they end up in
|
||||
# order from earliest to latest action. If more correctness is needed, consider
|
||||
# fetching each task and sorting on the created date.
|
||||
results.sort(key=lambda t: parse_time(t["expires"]))
|
||||
return [t["taskId"] for t in results]
|
||||
|
||||
|
||||
def insert_index(index_path, task_id, data=None, use_proxy=False):
|
||||
index_url = get_index_url(index_path, use_proxy=use_proxy)
|
||||
|
||||
@ -57,6 +245,30 @@ def insert_index(index_path, task_id, data=None, use_proxy=False):
|
||||
return response
|
||||
|
||||
|
||||
def parse_time(timestamp):
|
||||
"""Turn a "JSON timestamp" as used in TC APIs into a datetime"""
|
||||
return datetime.datetime.strptime(timestamp, "%Y-%m-%dT%H:%M:%S.%fZ")
|
||||
|
||||
|
||||
def get_task_url(task_id, use_proxy=False):
|
||||
task_tmpl = liburls.api(get_root_url(use_proxy), "queue", "v1", "task/{}")
|
||||
return task_tmpl.format(task_id)
|
||||
|
||||
|
||||
def get_task_definition(task_id, use_proxy=False):
|
||||
response = _do_request(get_task_url(task_id, use_proxy))
|
||||
return response.json()
|
||||
|
||||
|
||||
def cancel_task(task_id, use_proxy=False):
|
||||
"""Cancels a task given a task_id. In testing mode, just logs that it would
|
||||
have cancelled."""
|
||||
if testing:
|
||||
logger.info(f"Would have cancelled {task_id}.")
|
||||
else:
|
||||
_do_request(get_task_url(task_id, use_proxy) + "/cancel", json={})
|
||||
|
||||
|
||||
def status_task(task_id, use_proxy=False):
|
||||
"""Gets the status of a task given a task_id.
|
||||
|
||||
@ -70,7 +282,7 @@ def status_task(task_id, use_proxy=False):
|
||||
dict: A dictionary object as defined here:
|
||||
https://docs.taskcluster.net/docs/reference/platform/queue/api#status
|
||||
"""
|
||||
if tc_util.testing:
|
||||
if testing:
|
||||
logger.info(f"Would have gotten status for {task_id}.")
|
||||
else:
|
||||
resp = _do_request(get_task_url(task_id, use_proxy) + "/status")
|
||||
@ -92,13 +304,22 @@ def state_task(task_id, use_proxy=False):
|
||||
str: The state of the task, one of
|
||||
``pending, running, completed, failed, exception, unknown``.
|
||||
"""
|
||||
if tc_util.testing:
|
||||
if testing:
|
||||
logger.info(f"Would have gotten state for {task_id}.")
|
||||
else:
|
||||
status = status_task(task_id, use_proxy=use_proxy).get("state") or "unknown"
|
||||
return status
|
||||
|
||||
|
||||
def rerun_task(task_id):
|
||||
"""Reruns a task given a task_id. In testing mode, just logs that it would
|
||||
have reran."""
|
||||
if testing:
|
||||
logger.info(f"Would have rerun {task_id}.")
|
||||
else:
|
||||
_do_request(get_task_url(task_id, use_proxy=True) + "/rerun", json={})
|
||||
|
||||
|
||||
def trigger_hook(hook_group_id, hook_id, hook_payload):
|
||||
hooks = Hooks({"rootUrl": get_root_url(True)})
|
||||
response = hooks.triggerHook(hook_group_id, hook_id, hook_payload)
|
||||
@ -111,6 +332,50 @@ def trigger_hook(hook_group_id, hook_id, hook_payload):
|
||||
)
|
||||
|
||||
|
||||
def get_current_scopes():
|
||||
"""Get the current scopes. This only makes sense in a task with the Taskcluster
|
||||
proxy enabled, where it returns the actual scopes accorded to the task."""
|
||||
auth_url = liburls.api(get_root_url(True), "auth", "v1", "scopes/current")
|
||||
resp = _do_request(auth_url)
|
||||
return resp.json().get("scopes", [])
|
||||
|
||||
|
||||
def get_purge_cache_url(provisioner_id, worker_type, use_proxy=False):
|
||||
url_tmpl = liburls.api(
|
||||
get_root_url(use_proxy), "purge-cache", "v1", "purge-cache/{}/{}"
|
||||
)
|
||||
return url_tmpl.format(provisioner_id, worker_type)
|
||||
|
||||
|
||||
def purge_cache(provisioner_id, worker_type, cache_name, use_proxy=False):
|
||||
"""Requests a cache purge from the purge-caches service."""
|
||||
if testing:
|
||||
logger.info(
|
||||
"Would have purged {}/{}/{}.".format(
|
||||
provisioner_id, worker_type, cache_name
|
||||
)
|
||||
)
|
||||
else:
|
||||
logger.info(f"Purging {provisioner_id}/{worker_type}/{cache_name}.")
|
||||
purge_cache_url = get_purge_cache_url(provisioner_id, worker_type, use_proxy)
|
||||
_do_request(purge_cache_url, json={"cacheName": cache_name})
|
||||
|
||||
|
||||
def send_email(address, subject, content, link, use_proxy=False):
|
||||
"""Sends an email using the notify service"""
|
||||
logger.info(f"Sending email to {address}.")
|
||||
url = liburls.api(get_root_url(use_proxy), "notify", "v1", "email")
|
||||
_do_request(
|
||||
url,
|
||||
json={
|
||||
"address": address,
|
||||
"subject": subject,
|
||||
"content": content,
|
||||
"link": link,
|
||||
},
|
||||
)
|
||||
|
||||
|
||||
def list_task_group_tasks(task_group_id):
|
||||
"""Generate the tasks in a task group"""
|
||||
params = {}
|
||||
|
@ -6,7 +6,11 @@
|
||||
Tools for interacting with existing taskgraphs.
|
||||
"""
|
||||
|
||||
from taskgraph.util.taskcluster import find_task_id, get_artifact
|
||||
|
||||
from gecko_taskgraph.util.taskcluster import (
|
||||
find_task_id,
|
||||
get_artifact,
|
||||
)
|
||||
|
||||
|
||||
def find_decision_task(parameters, graph_config):
|
||||
|
@ -65,7 +65,7 @@ def run_perfdocs(config, logger=None, paths=None, generate=True):
|
||||
|
||||
decision_task_id = os.environ.get("DECISION_TASK_ID", None)
|
||||
if decision_task_id:
|
||||
from taskgraph.util.taskcluster import get_artifact
|
||||
from gecko_taskgraph.util.taskcluster import get_artifact
|
||||
|
||||
task_graph = get_artifact(decision_task_id, "public/full-task-graph.json")
|
||||
else:
|
||||
|
@ -7,8 +7,8 @@ import sys
|
||||
|
||||
import requests
|
||||
from taskgraph.parameters import Parameters
|
||||
from taskgraph.util.taskcluster import find_task_id, get_artifact, get_session
|
||||
|
||||
from gecko_taskgraph.util.taskcluster import find_task_id, get_artifact, get_session
|
||||
from gecko_taskgraph.util.taskgraph import find_existing_tasks
|
||||
|
||||
from ..cli import BaseTryParser
|
||||
|
Loading…
Reference in New Issue
Block a user