Bug 1492128: Vendor taskcluster==4.0.1; r=firefox-build-system-reviewers,gps

We can't use taskcluster 5.0.0 yet, because taskcluster-proxy does not
support new-style URLs.

Differential Revision: https://phabricator.services.mozilla.com/D10146

--HG--
extra : moz-landing-system : lando
This commit is contained in:
Tom Prince 2018-10-30 17:50:49 +00:00
parent de09f5861b
commit a83d0fd435
74 changed files with 20851 additions and 642 deletions

View File

@ -18,6 +18,7 @@ mozilla.pth:third_party/python/enum34
mozilla.pth:third_party/python/fluent
mozilla.pth:third_party/python/funcsigs
mozilla.pth:third_party/python/futures
mozilla.pth:third_party/python/mohawk
mozilla.pth:third_party/python/more-itertools
mozilla.pth:third_party/python/mozilla-version
mozilla.pth:third_party/python/pathlib2
@ -36,6 +37,8 @@ mozilla.pth:third_party/python/requests
mozilla.pth:third_party/python/requests-unixsocket
mozilla.pth:third_party/python/scandir
mozilla.pth:third_party/python/slugid
mozilla.pth:third_party/python/taskcluster
mozilla.pth:third_party/python/taskcluster-urls
mozilla.pth:third_party/python/py
mozilla.pth:third_party/python/pytest/src
mozilla.pth:third_party/python/pytoml

19
third_party/python/mohawk/PKG-INFO vendored Normal file
View File

@ -0,0 +1,19 @@
Metadata-Version: 1.1
Name: mohawk
Version: 0.3.4
Summary: Library for Hawk HTTP authorization
Home-page: https://github.com/kumar303/mohawk
Author: Kumar McMillan, Austin King
Author-email: kumar.mcmillan@gmail.com
License: MPL 2.0 (Mozilla Public License)
Description: UNKNOWN
Platform: UNKNOWN
Classifier: Intended Audience :: Developers
Classifier: Natural Language :: English
Classifier: Operating System :: OS Independent
Classifier: Programming Language :: Python :: 2
Classifier: Programming Language :: Python :: 3
Classifier: Programming Language :: Python :: 2.6
Classifier: Programming Language :: Python :: 2.7
Classifier: Programming Language :: Python :: 3.3
Classifier: Topic :: Internet :: WWW/HTTP

25
third_party/python/mohawk/README.rst vendored Normal file
View File

@ -0,0 +1,25 @@
======
Mohawk
======
.. image:: https://img.shields.io/pypi/v/mohawk.svg
:target: https://pypi.python.org/pypi/mohawk
:alt: Latest PyPI release
.. image:: https://img.shields.io/pypi/dm/mohawk.svg
:target: https://pypi.python.org/pypi/mohawk
:alt: PyPI monthly download stats
.. image:: https://travis-ci.org/kumar303/mohawk.svg?branch=master
:target: https://travis-ci.org/kumar303/mohawk
:alt: Travis master branch status
.. image:: https://readthedocs.org/projects/mohawk/badge/?version=latest
:target: https://mohawk.readthedocs.io/en/latest/?badge=latest
:alt: Documentation status
Mohawk is an alternate Python implementation of the
`Hawk HTTP authorization scheme`_.
Full documentation: https://mohawk.readthedocs.io/
.. _`Hawk HTTP authorization scheme`: https://github.com/hueniverse/hawk

View File

@ -0,0 +1,2 @@
from .sender import *
from .receiver import *

230
third_party/python/mohawk/mohawk/base.py vendored Normal file
View File

@ -0,0 +1,230 @@
import logging
import math
import pprint
import six
from six.moves.urllib.parse import urlparse
from .exc import (AlreadyProcessed,
MacMismatch,
MisComputedContentHash,
TokenExpired)
from .util import (calculate_mac,
calculate_payload_hash,
calculate_ts_mac,
prepare_header_val,
random_string,
strings_match,
utc_now)
default_ts_skew_in_seconds = 60
log = logging.getLogger(__name__)
class HawkAuthority:
def _authorize(self, mac_type, parsed_header, resource,
their_timestamp=None,
timestamp_skew_in_seconds=default_ts_skew_in_seconds,
localtime_offset_in_seconds=0,
accept_untrusted_content=False):
now = utc_now(offset_in_seconds=localtime_offset_in_seconds)
their_hash = parsed_header.get('hash', '')
their_mac = parsed_header.get('mac', '')
mac = calculate_mac(mac_type, resource, their_hash)
if not strings_match(mac, their_mac):
raise MacMismatch('MACs do not match; ours: {ours}; '
'theirs: {theirs}'
.format(ours=mac, theirs=their_mac))
if 'hash' not in parsed_header and accept_untrusted_content:
# The request did not hash its content.
log.debug('NOT calculating/verifiying payload hash '
'(no hash in header)')
check_hash = False
content_hash = None
else:
check_hash = True
content_hash = resource.gen_content_hash()
if check_hash and not their_hash:
log.info('request unexpectedly did not hash its content')
if check_hash:
if not strings_match(content_hash, their_hash):
# The hash declared in the header is incorrect.
# Content could have been tampered with.
log.debug('mismatched content: {content}'
.format(content=repr(resource.content)))
log.debug('mismatched content-type: {typ}'
.format(typ=repr(resource.content_type)))
raise MisComputedContentHash(
'Our hash {ours} ({algo}) did not '
'match theirs {theirs}'
.format(ours=content_hash,
theirs=their_hash,
algo=resource.credentials['algorithm']))
if resource.seen_nonce:
if resource.seen_nonce(resource.credentials['id'],
parsed_header['nonce'],
parsed_header['ts']):
raise AlreadyProcessed('Nonce {nonce} with timestamp {ts} '
'has already been processed for {id}'
.format(nonce=parsed_header['nonce'],
ts=parsed_header['ts'],
id=resource.credentials['id']))
else:
log.warn('seen_nonce was None; not checking nonce. '
'You may be vulnerable to replay attacks')
their_ts = int(their_timestamp or parsed_header['ts'])
if math.fabs(their_ts - now) > timestamp_skew_in_seconds:
message = ('token with UTC timestamp {ts} has expired; '
'it was compared to {now}'
.format(ts=their_ts, now=now))
tsm = calculate_ts_mac(now, resource.credentials)
if isinstance(tsm, six.binary_type):
tsm = tsm.decode('ascii')
www_authenticate = ('Hawk ts="{ts}", tsm="{tsm}", error="{error}"'
.format(ts=now, tsm=tsm, error=message))
raise TokenExpired(message,
localtime_in_seconds=now,
www_authenticate=www_authenticate)
log.debug('authorized OK')
def _make_header(self, resource, mac, additional_keys=None):
keys = additional_keys
if not keys:
# These are the default header keys that you'd send with a
# request header. Response headers are odd because they
# exclude a bunch of keys.
keys = ('id', 'ts', 'nonce', 'ext', 'app', 'dlg')
header = u'Hawk mac="{mac}"'.format(mac=prepare_header_val(mac))
if resource.content_hash:
header = u'{header}, hash="{hash}"'.format(
header=header,
hash=prepare_header_val(resource.content_hash))
if 'id' in keys:
header = u'{header}, id="{id}"'.format(
header=header,
id=prepare_header_val(resource.credentials['id']))
if 'ts' in keys:
header = u'{header}, ts="{ts}"'.format(
header=header, ts=prepare_header_val(resource.timestamp))
if 'nonce' in keys:
header = u'{header}, nonce="{nonce}"'.format(
header=header, nonce=prepare_header_val(resource.nonce))
# These are optional so we need to check if they have values first.
if 'ext' in keys and resource.ext:
header = u'{header}, ext="{ext}"'.format(
header=header, ext=prepare_header_val(resource.ext))
if 'app' in keys and resource.app:
header = u'{header}, app="{app}"'.format(
header=header, app=prepare_header_val(resource.app))
if 'dlg' in keys and resource.dlg:
header = u'{header}, dlg="{dlg}"'.format(
header=header, dlg=prepare_header_val(resource.dlg))
log.debug('Hawk header for URL={url} method={method}: {header}'
.format(url=resource.url, method=resource.method,
header=header))
return header
class Resource:
"""
Normalized request/response resource.
"""
def __init__(self, **kw):
self.credentials = kw.pop('credentials')
self.method = kw.pop('method').upper()
self.content = kw.pop('content', None)
self.content_type = kw.pop('content_type', None)
self.always_hash_content = kw.pop('always_hash_content', True)
self.ext = kw.pop('ext', None)
self.app = kw.pop('app', None)
self.dlg = kw.pop('dlg', None)
self.timestamp = str(kw.pop('timestamp', None) or utc_now())
self.nonce = kw.pop('nonce', None)
if self.nonce is None:
self.nonce = random_string(6)
# This is a lookup function for checking nonces.
self.seen_nonce = kw.pop('seen_nonce', None)
self.url = kw.pop('url')
if not self.url:
raise ValueError('url was empty')
url_parts = self.parse_url(self.url)
log.debug('parsed URL parts: \n{parts}'
.format(parts=pprint.pformat(url_parts)))
self.name = url_parts['resource'] or ''
self.host = url_parts['hostname'] or ''
self.port = str(url_parts['port'])
if kw.keys():
raise TypeError('Unknown keyword argument(s): {0}'
.format(kw.keys()))
@property
def content_hash(self):
if not hasattr(self, '_content_hash'):
raise AttributeError(
'Cannot access content_hash because it has not been generated')
return self._content_hash
def gen_content_hash(self):
if self.content is None or self.content_type is None:
if self.always_hash_content:
# Be really strict about allowing developers to skip content
# hashing. If they get this far they may be unintentiionally
# skipping it.
raise ValueError(
'payload content and/or content_type cannot be '
'empty without an explicit allowance')
log.debug('NOT hashing content')
self._content_hash = None
else:
self._content_hash = calculate_payload_hash(
self.content, self.credentials['algorithm'],
self.content_type)
return self.content_hash
def parse_url(self, url):
url_parts = urlparse(url)
url_dict = {
'scheme': url_parts.scheme,
'hostname': url_parts.hostname,
'port': url_parts.port,
'path': url_parts.path,
'resource': url_parts.path,
'query': url_parts.query,
}
if len(url_dict['query']) > 0:
url_dict['resource'] = '%s?%s' % (url_dict['resource'],
url_dict['query'])
if url_parts.port is None:
if url_parts.scheme == 'http':
url_dict['port'] = 80
elif url_parts.scheme == 'https':
url_dict['port'] = 443
return url_dict

View File

@ -0,0 +1,167 @@
from base64 import urlsafe_b64encode, b64decode
from collections import namedtuple
import logging
import re
import six
from .base import Resource
from .util import (calculate_mac,
utc_now)
from .exc import (CredentialsLookupError,
InvalidBewit,
MacMismatch,
TokenExpired)
log = logging.getLogger(__name__)
def get_bewit(resource):
"""
Returns a bewit identifier for the resource as a string.
:param resource:
Resource to generate a bewit for
:type resource: `mohawk.base.Resource`
"""
if resource.method != 'GET':
raise ValueError('bewits can only be generated for GET requests')
if resource.nonce != '':
raise ValueError('bewits must use an empty nonce')
mac = calculate_mac(
'bewit',
resource,
None,
)
if isinstance(mac, six.binary_type):
mac = mac.decode('ascii')
if resource.ext is None:
ext = ''
else:
ext = resource.ext
# Strip out \ from the client id
# since that can break parsing the response
# NB that the canonical implementation does not do this as of
# Oct 28, 2015, so this could break compat.
# We can leave \ in ext since validators can limit how many \ they split
# on (although again, the canonical implementation does not do this)
client_id = six.text_type(resource.credentials['id'])
if "\\" in client_id:
log.warn("Stripping backslash character(s) '\\' from client_id")
client_id = client_id.replace("\\", "")
# b64encode works only with bytes in python3, but all of our parameters are
# in unicode, so we need to encode them. The cleanest way to do this that
# works in both python 2 and 3 is to use string formatting to get a
# unicode string, and then explicitly encode it to bytes.
inner_bewit = u"{id}\\{exp}\\{mac}\\{ext}".format(
id=client_id,
exp=resource.timestamp,
mac=mac,
ext=ext,
)
inner_bewit_bytes = inner_bewit.encode('ascii')
bewit_bytes = urlsafe_b64encode(inner_bewit_bytes)
# Now decode the resulting bytes back to a unicode string
return bewit_bytes.decode('ascii')
bewittuple = namedtuple('bewittuple', 'id expiration mac ext')
def parse_bewit(bewit):
"""
Returns a `bewittuple` representing the parts of an encoded bewit string.
This has the following named attributes:
(id, expiration, mac, ext)
:param bewit:
A base64 encoded bewit string
:type bewit: str
"""
decoded_bewit = b64decode(bewit).decode('ascii')
bewit_parts = decoded_bewit.split("\\", 3)
if len(bewit_parts) != 4:
raise InvalidBewit('Expected 4 parts to bewit: %s' % decoded_bewit)
return bewittuple(*decoded_bewit.split("\\", 3))
def strip_bewit(url):
"""
Strips the bewit parameter out of a url.
Returns (encoded_bewit, stripped_url)
Raises InvalidBewit if no bewit found.
:param url:
The url containing a bewit parameter
:type url: str
"""
m = re.search('[?&]bewit=([^&]+)', url)
if not m:
raise InvalidBewit('no bewit data found')
bewit = m.group(1)
stripped_url = url[:m.start()] + url[m.end():]
return bewit, stripped_url
def check_bewit(url, credential_lookup, now=None):
"""
Validates the given bewit.
Returns True if the resource has a valid bewit parameter attached,
or raises a subclass of HawkFail otherwise.
:param credential_lookup:
Callable to look up the credentials dict by sender ID.
The credentials dict must have the keys:
``id``, ``key``, and ``algorithm``.
See :ref:`receiving-request` for an example.
:type credential_lookup: callable
:param now=None:
Unix epoch time for the current time to determine if bewit has expired.
If None, then the current time as given by utc_now() is used.
:type now=None: integer
"""
raw_bewit, stripped_url = strip_bewit(url)
bewit = parse_bewit(raw_bewit)
try:
credentials = credential_lookup(bewit.id)
except LookupError:
raise CredentialsLookupError('Could not find credentials for ID {0}'
.format(bewit.id))
res = Resource(url=stripped_url,
method='GET',
credentials=credentials,
timestamp=bewit.expiration,
nonce='',
ext=bewit.ext,
)
mac = calculate_mac('bewit', res, None)
mac = mac.decode('ascii')
if mac != bewit.mac:
raise MacMismatch('bewit with mac {bewit_mac} did not match expected mac {expected_mac}'
.format(bewit_mac=bewit.mac,
expected_mac=mac))
# Check that the timestamp isn't expired
if now is None:
# TODO: Add offset/skew
now = utc_now()
if int(bewit.expiration) < now:
# TODO: Refactor TokenExpired to handle this better
raise TokenExpired('bewit with UTC timestamp {ts} has expired; '
'it was compared to {now}'
.format(ts=bewit.expiration, now=now),
localtime_in_seconds=now,
www_authenticate=''
)
return True

98
third_party/python/mohawk/mohawk/exc.py vendored Normal file
View File

@ -0,0 +1,98 @@
"""
If you want to catch any exception that might be raised,
catch :class:`mohawk.exc.HawkFail`.
"""
class HawkFail(Exception):
"""
All Mohawk exceptions derive from this base.
"""
class MissingAuthorization(HawkFail):
"""
No authorization header was sent by the client.
"""
class InvalidCredentials(HawkFail):
"""
The specified Hawk credentials are invalid.
For example, the dict could be formatted incorrectly.
"""
class CredentialsLookupError(HawkFail):
"""
A :class:`mohawk.Receiver` could not look up the
credentials for an incoming request.
"""
class BadHeaderValue(HawkFail):
"""
There was an error with an attribute or value when parsing
or creating a Hawk header.
"""
class MacMismatch(HawkFail):
"""
The locally calculated MAC did not match the MAC that was sent.
"""
class MisComputedContentHash(HawkFail):
"""
The signature of the content did not match the actual content.
"""
class TokenExpired(HawkFail):
"""
The timestamp on a message received has expired.
You may also receive this message if your server clock is out of sync.
Consider synchronizing it with something like `TLSdate`_.
If you are unable to synchronize your clock universally,
The `Hawk`_ spec mentions how you can `adjust`_
your sender's time to match that of the receiver in the case
of unexpected expiration.
The ``www_authenticate`` attribute of this exception is a header
that can be returned to the client. If the value is not None, it
will include a timestamp HMAC'd with the sender's credentials.
This will allow the client
to verify the value and safely apply an offset.
.. _`Hawk`: https://github.com/hueniverse/hawk
.. _`adjust`: https://github.com/hueniverse/hawk#future-time-manipulation
.. _`TLSdate`: http://linux-audit.com/tlsdate-the-secure-alternative-for-ntpd-ntpdate-and-rdate/
"""
#: Current local time in seconds that was used to compare timestamps.
localtime_in_seconds = None
# A header containing an HMAC'd server timestamp that the sender can verify.
www_authenticate = None
def __init__(self, *args, **kw):
self.localtime_in_seconds = kw.pop('localtime_in_seconds')
self.www_authenticate = kw.pop('www_authenticate')
super(HawkFail, self).__init__(*args, **kw)
class AlreadyProcessed(HawkFail):
"""
The message has already been processed and cannot be re-processed.
See :ref:`nonce` for details.
"""
class InvalidBewit(HawkFail):
"""
The bewit is invalid; e.g. it doesn't contain the right number of
parameters.
"""

View File

@ -0,0 +1,170 @@
import logging
import sys
from .base import default_ts_skew_in_seconds, HawkAuthority, Resource
from .exc import CredentialsLookupError, MissingAuthorization
from .util import (calculate_mac,
parse_authorization_header,
validate_credentials)
__all__ = ['Receiver']
log = logging.getLogger(__name__)
class Receiver(HawkAuthority):
"""
A Hawk authority that will receive and respond to requests.
:param credentials_map:
Callable to look up the credentials dict by sender ID.
The credentials dict must have the keys:
``id``, ``key``, and ``algorithm``.
See :ref:`receiving-request` for an example.
:type credentials_map: callable
:param request_header:
A `Hawk`_ ``Authorization`` header
such as one created by :class:`mohawk.Sender`.
:type request_header: str
:param url: Absolute URL of the request.
:type url: str
:param method: Method of the request. E.G. POST, GET
:type method: str
:param content=None: Byte string of request body.
:type content=None: str
:param content_type=None: content-type header value for request.
:type content_type=None: str
:param accept_untrusted_content=False:
When True, allow requests that do not hash their content or
allow None type ``content`` and ``content_type``
arguments. Read :ref:`skipping-content-checks`
to learn more.
:type accept_untrusted_content=False: bool
:param localtime_offset_in_seconds=0:
Seconds to add to local time in case it's out of sync.
:type localtime_offset_in_seconds=0: float
:param timestamp_skew_in_seconds=60:
Max seconds until a message expires. Upon expiry,
:class:`mohawk.exc.TokenExpired` is raised.
:type timestamp_skew_in_seconds=60: float
.. _`Hawk`: https://github.com/hueniverse/hawk
"""
#: Value suitable for a ``Server-Authorization`` header.
response_header = None
def __init__(self,
credentials_map,
request_header,
url,
method,
content=None,
content_type=None,
seen_nonce=None,
localtime_offset_in_seconds=0,
accept_untrusted_content=False,
timestamp_skew_in_seconds=default_ts_skew_in_seconds,
**auth_kw):
self.response_header = None # make into property that can raise exc?
self.credentials_map = credentials_map
self.seen_nonce = seen_nonce
log.debug('accepting request {header}'.format(header=request_header))
if not request_header:
raise MissingAuthorization()
parsed_header = parse_authorization_header(request_header)
try:
credentials = self.credentials_map(parsed_header['id'])
except LookupError:
etype, val, tb = sys.exc_info()
log.debug('Catching {etype}: {val}'.format(etype=etype, val=val))
raise CredentialsLookupError(
'Could not find credentials for ID {0}'
.format(parsed_header['id']))
validate_credentials(credentials)
resource = Resource(url=url,
method=method,
ext=parsed_header.get('ext', None),
app=parsed_header.get('app', None),
dlg=parsed_header.get('dlg', None),
credentials=credentials,
nonce=parsed_header['nonce'],
seen_nonce=self.seen_nonce,
content=content,
timestamp=parsed_header['ts'],
content_type=content_type)
self._authorize(
'header', parsed_header, resource,
timestamp_skew_in_seconds=timestamp_skew_in_seconds,
localtime_offset_in_seconds=localtime_offset_in_seconds,
accept_untrusted_content=accept_untrusted_content,
**auth_kw)
# Now that we verified an incoming request, we can re-use some of its
# properties to build our response header.
self.parsed_header = parsed_header
self.resource = resource
def respond(self,
content=None,
content_type=None,
always_hash_content=True,
ext=None):
"""
Respond to the request.
This generates the :attr:`mohawk.Receiver.response_header`
attribute.
:param content=None: Byte string of response body that will be sent.
:type content=None: str
:param content_type=None: content-type header value for response.
:type content_type=None: str
:param always_hash_content=True:
When True, ``content`` and ``content_type`` cannot be None.
Read :ref:`skipping-content-checks` to learn more.
:type always_hash_content=True: bool
:param ext=None:
An external `Hawk`_ string. If not None, this value will be
signed so that the sender can trust it.
:type ext=None: str
.. _`Hawk`: https://github.com/hueniverse/hawk
"""
log.debug('generating response header')
resource = Resource(url=self.resource.url,
credentials=self.resource.credentials,
ext=ext,
app=self.parsed_header.get('app', None),
dlg=self.parsed_header.get('dlg', None),
method=self.resource.method,
content=content,
content_type=content_type,
always_hash_content=always_hash_content,
nonce=self.parsed_header['nonce'],
timestamp=self.parsed_header['ts'])
mac = calculate_mac('response', resource, resource.gen_content_hash())
self.response_header = self._make_header(resource, mac,
additional_keys=['ext'])
return self.response_header

View File

@ -0,0 +1,178 @@
import logging
from .base import default_ts_skew_in_seconds, HawkAuthority, Resource
from .util import (calculate_mac,
parse_authorization_header,
validate_credentials)
__all__ = ['Sender']
log = logging.getLogger(__name__)
class Sender(HawkAuthority):
"""
A Hawk authority that will emit requests and verify responses.
:param credentials: Dict of credentials with keys ``id``, ``key``,
and ``algorithm``. See :ref:`usage` for an example.
:type credentials: dict
:param url: Absolute URL of the request.
:type url: str
:param method: Method of the request. E.G. POST, GET
:type method: str
:param content=None: Byte string of request body.
:type content=None: str
:param content_type=None: content-type header value for request.
:type content_type=None: str
:param always_hash_content=True:
When True, ``content`` and ``content_type`` cannot be None.
Read :ref:`skipping-content-checks` to learn more.
:type always_hash_content=True: bool
:param nonce=None:
A string that when coupled with the timestamp will
uniquely identify this request to prevent replays.
If None, a nonce will be generated for you.
:type nonce=None: str
:param ext=None:
An external `Hawk`_ string. If not None, this value will be signed
so that the receiver can trust it.
:type ext=None: str
:param app=None:
A `Hawk`_ application string. If not None, this value will be signed
so that the receiver can trust it.
:type app=None: str
:param dlg=None:
A `Hawk`_ delegation string. If not None, this value will be signed
so that the receiver can trust it.
:type dlg=None: str
:param seen_nonce=None:
A callable that returns True if a nonce has been seen.
See :ref:`nonce` for details.
:type seen_nonce=None: callable
.. _`Hawk`: https://github.com/hueniverse/hawk
"""
#: Value suitable for an ``Authorization`` header.
request_header = None
def __init__(self, credentials,
url,
method,
content=None,
content_type=None,
always_hash_content=True,
nonce=None,
ext=None,
app=None,
dlg=None,
seen_nonce=None,
# For easier testing:
_timestamp=None):
self.reconfigure(credentials)
self.request_header = None
self.seen_nonce = seen_nonce
log.debug('generating request header')
self.req_resource = Resource(url=url,
credentials=self.credentials,
ext=ext,
app=app,
dlg=dlg,
nonce=nonce,
method=method,
content=content,
always_hash_content=always_hash_content,
timestamp=_timestamp,
content_type=content_type)
mac = calculate_mac('header', self.req_resource,
self.req_resource.gen_content_hash())
self.request_header = self._make_header(self.req_resource, mac)
def accept_response(self,
response_header,
content=None,
content_type=None,
accept_untrusted_content=False,
localtime_offset_in_seconds=0,
timestamp_skew_in_seconds=default_ts_skew_in_seconds,
**auth_kw):
"""
Accept a response to this request.
:param response_header:
A `Hawk`_ ``Server-Authorization`` header
such as one created by :class:`mohawk.Receiver`.
:type response_header: str
:param content=None: Byte string of the response body received.
:type content=None: str
:param content_type=None:
Content-Type header value of the response received.
:type content_type=None: str
:param accept_untrusted_content=False:
When True, allow responses that do not hash their content or
allow None type ``content`` and ``content_type``
arguments. Read :ref:`skipping-content-checks`
to learn more.
:type accept_untrusted_content=False: bool
:param localtime_offset_in_seconds=0:
Seconds to add to local time in case it's out of sync.
:type localtime_offset_in_seconds=0: float
:param timestamp_skew_in_seconds=60:
Max seconds until a message expires. Upon expiry,
:class:`mohawk.exc.TokenExpired` is raised.
:type timestamp_skew_in_seconds=60: float
.. _`Hawk`: https://github.com/hueniverse/hawk
"""
log.debug('accepting response {header}'
.format(header=response_header))
parsed_header = parse_authorization_header(response_header)
resource = Resource(ext=parsed_header.get('ext', None),
content=content,
content_type=content_type,
# The following response attributes are
# in reference to the original request,
# not to the reponse header:
timestamp=self.req_resource.timestamp,
nonce=self.req_resource.nonce,
url=self.req_resource.url,
method=self.req_resource.method,
app=self.req_resource.app,
dlg=self.req_resource.dlg,
credentials=self.credentials,
seen_nonce=self.seen_nonce)
self._authorize(
'response', parsed_header, resource,
# Per Node lib, a responder macs the *sender's* timestamp.
# It does not create its own timestamp.
# I suppose a slow response could time out here. Maybe only check
# mac failures, not timeouts?
their_timestamp=resource.timestamp,
timestamp_skew_in_seconds=timestamp_skew_in_seconds,
localtime_offset_in_seconds=localtime_offset_in_seconds,
accept_untrusted_content=accept_untrusted_content,
**auth_kw)
def reconfigure(self, credentials):
validate_credentials(credentials)
self.credentials = credentials

View File

@ -0,0 +1,823 @@
import sys
from unittest import TestCase
from base64 import b64decode, urlsafe_b64encode
import mock
from nose.tools import eq_, raises
import six
from . import Receiver, Sender
from .base import Resource
from .exc import (AlreadyProcessed,
BadHeaderValue,
CredentialsLookupError,
InvalidCredentials,
MacMismatch,
MisComputedContentHash,
MissingAuthorization,
TokenExpired,
InvalidBewit)
from .util import (parse_authorization_header,
utc_now,
calculate_ts_mac,
validate_credentials)
from .bewit import (get_bewit,
check_bewit,
strip_bewit,
parse_bewit)
class Base(TestCase):
def setUp(self):
self.credentials = {
'id': 'my-hawk-id',
'key': 'my hAwK sekret',
'algorithm': 'sha256',
}
# This callable might be replaced by tests.
def seen_nonce(id, nonce, ts):
return False
self.seen_nonce = seen_nonce
def credentials_map(self, id):
# Pretend this is doing something more interesting like looking up
# a credentials by ID in a database.
if self.credentials['id'] != id:
raise LookupError('No credentialsuration for Hawk ID {id}'
.format(id=id))
return self.credentials
class TestConfig(Base):
@raises(InvalidCredentials)
def test_no_id(self):
c = self.credentials.copy()
del c['id']
validate_credentials(c)
@raises(InvalidCredentials)
def test_no_key(self):
c = self.credentials.copy()
del c['key']
validate_credentials(c)
@raises(InvalidCredentials)
def test_no_algo(self):
c = self.credentials.copy()
del c['algorithm']
validate_credentials(c)
@raises(InvalidCredentials)
def test_no_credentials(self):
validate_credentials(None)
def test_non_dict_credentials(self):
class WeirdThing(object):
def __getitem__(self, key):
return 'whatever'
validate_credentials(WeirdThing())
class TestSender(Base):
def setUp(self):
super(TestSender, self).setUp()
self.url = 'http://site.com/foo?bar=1'
def Sender(self, method='GET', **kw):
credentials = kw.pop('credentials', self.credentials)
kw.setdefault('content', '')
kw.setdefault('content_type', '')
sender = Sender(credentials, self.url, method, **kw)
return sender
def receive(self, request_header, url=None, method='GET', **kw):
credentials_map = kw.pop('credentials_map', self.credentials_map)
kw.setdefault('content', '')
kw.setdefault('content_type', '')
kw.setdefault('seen_nonce', self.seen_nonce)
return Receiver(credentials_map, request_header,
url or self.url, method, **kw)
def test_get_ok(self):
method = 'GET'
sn = self.Sender(method=method)
self.receive(sn.request_header, method=method)
def test_post_ok(self):
method = 'POST'
sn = self.Sender(method=method)
self.receive(sn.request_header, method=method)
def test_post_content_ok(self):
method = 'POST'
content = 'foo=bar&baz=2'
sn = self.Sender(method=method, content=content)
self.receive(sn.request_header, method=method, content=content)
def test_post_content_type_ok(self):
method = 'POST'
content = '{"bar": "foobs"}'
content_type = 'application/json'
sn = self.Sender(method=method, content=content,
content_type=content_type)
self.receive(sn.request_header, method=method, content=content,
content_type=content_type)
def test_post_content_type_with_trailing_charset(self):
method = 'POST'
content = '{"bar": "foobs"}'
content_type = 'application/json; charset=utf8'
sn = self.Sender(method=method, content=content,
content_type=content_type)
self.receive(sn.request_header, method=method, content=content,
content_type='application/json; charset=other')
@raises(ValueError)
def test_missing_payload_details(self):
self.Sender(method='POST', content=None, content_type=None)
def test_skip_payload_hashing(self):
method = 'POST'
content = '{"bar": "foobs"}'
content_type = 'application/json'
sn = self.Sender(method=method, content=None, content_type=None,
always_hash_content=False)
self.receive(sn.request_header, method=method, content=content,
content_type=content_type,
accept_untrusted_content=True)
@raises(ValueError)
def test_cannot_skip_content_only(self):
self.Sender(method='POST', content=None,
content_type='application/json')
@raises(ValueError)
def test_cannot_skip_content_type_only(self):
self.Sender(method='POST', content='{"foo": "bar"}',
content_type=None)
@raises(MacMismatch)
def test_tamper_with_host(self):
sn = self.Sender()
self.receive(sn.request_header, url='http://TAMPERED-WITH.com')
@raises(MacMismatch)
def test_tamper_with_method(self):
sn = self.Sender(method='GET')
self.receive(sn.request_header, method='POST')
@raises(MacMismatch)
def test_tamper_with_path(self):
sn = self.Sender()
self.receive(sn.request_header,
url='http://site.com/TAMPERED?bar=1')
@raises(MacMismatch)
def test_tamper_with_query(self):
sn = self.Sender()
self.receive(sn.request_header,
url='http://site.com/foo?bar=TAMPERED')
@raises(MacMismatch)
def test_tamper_with_scheme(self):
sn = self.Sender()
self.receive(sn.request_header, url='https://site.com/foo?bar=1')
@raises(MacMismatch)
def test_tamper_with_port(self):
sn = self.Sender()
self.receive(sn.request_header,
url='http://site.com:8000/foo?bar=1')
@raises(MisComputedContentHash)
def test_tamper_with_content(self):
sn = self.Sender()
self.receive(sn.request_header, content='stuff=nope')
def test_non_ascii_content(self):
content = u'Ivan Kristi\u0107'
sn = self.Sender(content=content)
self.receive(sn.request_header, content=content)
@raises(MacMismatch)
def test_tamper_with_content_type(self):
sn = self.Sender(method='POST')
self.receive(sn.request_header, content_type='application/json')
@raises(AlreadyProcessed)
def test_nonce_fail(self):
def seen_nonce(id, nonce, ts):
return True
sn = self.Sender()
self.receive(sn.request_header, seen_nonce=seen_nonce)
def test_nonce_ok(self):
def seen_nonce(id, nonce, ts):
return False
sn = self.Sender(seen_nonce=seen_nonce)
self.receive(sn.request_header)
@raises(TokenExpired)
def test_expired_ts(self):
now = utc_now() - 120
sn = self.Sender(_timestamp=now)
self.receive(sn.request_header)
def test_expired_exception_reports_localtime(self):
now = utc_now()
ts = now - 120
sn = self.Sender(_timestamp=ts) # force expiry
exc = None
with mock.patch('mohawk.base.utc_now') as fake_now:
fake_now.return_value = now
try:
self.receive(sn.request_header)
except:
etype, exc, tb = sys.exc_info()
eq_(type(exc), TokenExpired)
eq_(exc.localtime_in_seconds, now)
def test_localtime_offset(self):
now = utc_now() - 120
sn = self.Sender(_timestamp=now)
# Without an offset this will raise an expired exception.
self.receive(sn.request_header, localtime_offset_in_seconds=-120)
def test_localtime_skew(self):
now = utc_now() - 120
sn = self.Sender(_timestamp=now)
# Without an offset this will raise an expired exception.
self.receive(sn.request_header, timestamp_skew_in_seconds=120)
@raises(MacMismatch)
def test_hash_tampering(self):
sn = self.Sender()
header = sn.request_header.replace('hash="', 'hash="nope')
self.receive(header)
@raises(MacMismatch)
def test_bad_secret(self):
cfg = {
'id': 'my-hawk-id',
'key': 'INCORRECT; YOU FAIL',
'algorithm': 'sha256',
}
sn = self.Sender(credentials=cfg)
self.receive(sn.request_header)
@raises(MacMismatch)
def test_unexpected_algorithm(self):
cr = self.credentials.copy()
cr['algorithm'] = 'sha512'
sn = self.Sender(credentials=cr)
# Validate with mismatched credentials (sha256).
self.receive(sn.request_header)
@raises(InvalidCredentials)
def test_invalid_credentials(self):
cfg = self.credentials.copy()
# Create an invalid credentials.
del cfg['algorithm']
self.Sender(credentials=cfg)
@raises(CredentialsLookupError)
def test_unknown_id(self):
cr = self.credentials.copy()
cr['id'] = 'someone-else'
sn = self.Sender(credentials=cr)
self.receive(sn.request_header)
@raises(MacMismatch)
def test_bad_ext(self):
sn = self.Sender(ext='my external data')
header = sn.request_header.replace('my external data', 'TAMPERED')
self.receive(header)
def test_ext_with_quotes(self):
sn = self.Sender(ext='quotes=""')
self.receive(sn.request_header)
parsed = parse_authorization_header(sn.request_header)
eq_(parsed['ext'], 'quotes=""')
def test_ext_with_new_line(self):
sn = self.Sender(ext="new line \n in the middle")
self.receive(sn.request_header)
parsed = parse_authorization_header(sn.request_header)
eq_(parsed['ext'], "new line \n in the middle")
def test_ext_with_equality_sign(self):
sn = self.Sender(ext="foo=bar&foo2=bar2;foo3=bar3")
self.receive(sn.request_header)
parsed = parse_authorization_header(sn.request_header)
eq_(parsed['ext'], "foo=bar&foo2=bar2;foo3=bar3")
@raises(BadHeaderValue)
def test_ext_with_illegal_chars(self):
self.Sender(ext="something like \t is illegal")
@raises(BadHeaderValue)
def test_ext_with_illegal_unicode(self):
self.Sender(ext=u'Ivan Kristi\u0107')
@raises(BadHeaderValue)
def test_ext_with_illegal_utf8(self):
# This isn't allowed because the escaped byte chars are out of
# range. It's a little odd but this is what the Node lib does
# implicitly with its regex.
self.Sender(ext=u'Ivan Kristi\u0107'.encode('utf8'))
def test_app_ok(self):
app = 'custom-app'
sn = self.Sender(app=app)
self.receive(sn.request_header)
parsed = parse_authorization_header(sn.request_header)
eq_(parsed['app'], app)
@raises(MacMismatch)
def test_tampered_app(self):
app = 'custom-app'
sn = self.Sender(app=app)
header = sn.request_header.replace(app, 'TAMPERED-WITH')
self.receive(header)
def test_dlg_ok(self):
dlg = 'custom-dlg'
sn = self.Sender(dlg=dlg)
self.receive(sn.request_header)
parsed = parse_authorization_header(sn.request_header)
eq_(parsed['dlg'], dlg)
@raises(MacMismatch)
def test_tampered_dlg(self):
dlg = 'custom-dlg'
sn = self.Sender(dlg=dlg, app='some-app')
header = sn.request_header.replace(dlg, 'TAMPERED-WITH')
self.receive(header)
class TestReceiver(Base):
def setUp(self):
super(TestReceiver, self).setUp()
self.url = 'http://site.com/'
self.sender = None
self.receiver = None
def receive(self, method='GET', **kw):
url = kw.pop('url', self.url)
sender = kw.pop('sender', None)
sender_kw = kw.pop('sender_kw', {})
sender_kw.setdefault('content', '')
sender_kw.setdefault('content_type', '')
sender_url = kw.pop('sender_url', url)
credentials_map = kw.pop('credentials_map',
lambda id: self.credentials)
if sender:
self.sender = sender
else:
self.sender = Sender(self.credentials, sender_url, method,
**sender_kw)
kw.setdefault('content', '')
kw.setdefault('content_type', '')
self.receiver = Receiver(credentials_map,
self.sender.request_header, url, method,
**kw)
def respond(self, **kw):
accept_kw = kw.pop('accept_kw', {})
accept_kw.setdefault('content', '')
accept_kw.setdefault('content_type', '')
receiver = kw.pop('receiver', self.receiver)
kw.setdefault('content', '')
kw.setdefault('content_type', '')
receiver.respond(**kw)
self.sender.accept_response(receiver.response_header, **accept_kw)
return receiver.response_header
@raises(InvalidCredentials)
def test_invalid_credentials_lookup(self):
# Return invalid credentials.
self.receive(credentials_map=lambda *a: {})
def test_get_ok(self):
method = 'GET'
self.receive(method=method)
self.respond()
def test_post_ok(self):
method = 'POST'
self.receive(method=method)
self.respond()
@raises(MisComputedContentHash)
def test_respond_with_wrong_content(self):
self.receive()
self.respond(content='real content',
accept_kw=dict(content='TAMPERED WITH'))
@raises(MisComputedContentHash)
def test_respond_with_wrong_content_type(self):
self.receive()
self.respond(content_type='text/html',
accept_kw=dict(content_type='application/json'))
@raises(MissingAuthorization)
def test_missing_authorization(self):
Receiver(lambda id: self.credentials, None, '/', 'GET')
@raises(MacMismatch)
def test_respond_with_wrong_url(self):
self.receive(url='http://fakesite.com')
wrong_receiver = self.receiver
self.receive(url='http://realsite.com')
self.respond(receiver=wrong_receiver)
@raises(MacMismatch)
def test_respond_with_wrong_method(self):
self.receive(method='GET')
wrong_receiver = self.receiver
self.receive(method='POST')
self.respond(receiver=wrong_receiver)
@raises(MacMismatch)
def test_respond_with_wrong_nonce(self):
self.receive(sender_kw=dict(nonce='another-nonce'))
wrong_receiver = self.receiver
self.receive()
# The nonce must match the one sent in the original request.
self.respond(receiver=wrong_receiver)
def test_respond_with_unhashed_content(self):
self.receive()
self.respond(always_hash_content=False, content=None,
content_type=None,
accept_kw=dict(accept_untrusted_content=True))
@raises(TokenExpired)
def test_respond_with_expired_ts(self):
self.receive()
hdr = self.receiver.respond(content='', content_type='')
with mock.patch('mohawk.base.utc_now') as fn:
fn.return_value = 0 # force an expiry
try:
self.sender.accept_response(hdr, content='', content_type='')
except TokenExpired:
etype, exc, tb = sys.exc_info()
hdr = parse_authorization_header(exc.www_authenticate)
calculated = calculate_ts_mac(fn(), self.credentials)
if isinstance(calculated, six.binary_type):
calculated = calculated.decode('ascii')
eq_(hdr['tsm'], calculated)
raise
def test_respond_with_bad_ts_skew_ok(self):
now = utc_now() - 120
self.receive()
hdr = self.receiver.respond(content='', content_type='')
with mock.patch('mohawk.base.utc_now') as fn:
fn.return_value = now
# Without an offset this will raise an expired exception.
self.sender.accept_response(hdr, content='', content_type='',
timestamp_skew_in_seconds=120)
def test_respond_with_ext(self):
self.receive()
ext = 'custom-ext'
self.respond(ext=ext)
header = parse_authorization_header(self.receiver.response_header)
eq_(header['ext'], ext)
@raises(MacMismatch)
def test_respond_with_wrong_app(self):
self.receive(sender_kw=dict(app='TAMPERED-WITH', dlg='delegation'))
self.receiver.respond(content='', content_type='')
wrong_receiver = self.receiver
self.receive(sender_kw=dict(app='real-app', dlg='delegation'))
self.sender.accept_response(wrong_receiver.response_header,
content='', content_type='')
@raises(MacMismatch)
def test_respond_with_wrong_dlg(self):
self.receive(sender_kw=dict(app='app', dlg='TAMPERED-WITH'))
self.receiver.respond(content='', content_type='')
wrong_receiver = self.receiver
self.receive(sender_kw=dict(app='app', dlg='real-dlg'))
self.sender.accept_response(wrong_receiver.response_header,
content='', content_type='')
@raises(MacMismatch)
def test_receive_wrong_method(self):
self.receive(method='GET')
wrong_sender = self.sender
self.receive(method='POST', sender=wrong_sender)
@raises(MacMismatch)
def test_receive_wrong_url(self):
self.receive(url='http://fakesite.com/')
wrong_sender = self.sender
self.receive(url='http://realsite.com/', sender=wrong_sender)
@raises(MisComputedContentHash)
def test_receive_wrong_content(self):
self.receive(sender_kw=dict(content='real request'),
content='real request')
wrong_sender = self.sender
self.receive(content='TAMPERED WITH', sender=wrong_sender)
@raises(MisComputedContentHash)
def test_unexpected_unhashed_content(self):
self.receive(sender_kw=dict(content=None, content_type=None,
always_hash_content=False))
@raises(ValueError)
def test_cannot_receive_empty_content_only(self):
content_type = 'text/plain'
self.receive(sender_kw=dict(content='<content>',
content_type=content_type),
content=None, content_type=content_type)
@raises(ValueError)
def test_cannot_receive_empty_content_type_only(self):
content = '<content>'
self.receive(sender_kw=dict(content=content,
content_type='text/plain'),
content=content, content_type=None)
@raises(MisComputedContentHash)
def test_receive_wrong_content_type(self):
self.receive(sender_kw=dict(content_type='text/html'),
content_type='text/html')
wrong_sender = self.sender
self.receive(content_type='application/json',
sender=wrong_sender)
class TestSendAndReceive(Base):
def test(self):
credentials = {
'id': 'some-id',
'key': 'some secret',
'algorithm': 'sha256'
}
url = 'https://my-site.com/'
method = 'POST'
# The client sends a request with a Hawk header.
content = 'foo=bar&baz=nooz'
content_type = 'application/x-www-form-urlencoded'
sender = Sender(credentials,
url, method,
content=content,
content_type=content_type)
# The server receives a request and authorizes access.
receiver = Receiver(lambda id: credentials,
sender.request_header,
url, method,
content=content,
content_type=content_type)
# The server responds with a similar Hawk header.
content = 'we are friends'
content_type = 'text/plain'
receiver.respond(content=content,
content_type=content_type)
# The client receives a response and authorizes access.
sender.accept_response(receiver.response_header,
content=content,
content_type=content_type)
class TestBewit(Base):
# Test cases copied from
# https://github.com/hueniverse/hawk/blob/492632da51ecedd5f59ce96f081860ad24ce6532/test/uri.js
def setUp(self):
self.credentials = {
'id': '123456',
'key': '2983d45yun89q',
'algorithm': 'sha256',
}
def make_credential_lookup(self, credentials_map):
# Helper function to make a lookup function given a dictionary of
# credentials
def lookup(client_id):
# Will raise a KeyError if missing; which is a subclass of
# LookupError
return credentials_map[client_id]
return lookup
def test_bewit(self):
res = Resource(url='https://example.com/somewhere/over/the/rainbow',
method='GET', credentials=self.credentials,
timestamp=1356420407 + 300,
nonce='',
)
bewit = get_bewit(res)
expected = '123456\\1356420707\\IGYmLgIqLrCe8CxvKPs4JlWIA+UjWJJouwgARiVhCAg=\\'
eq_(b64decode(bewit).decode('ascii'), expected)
def test_bewit_with_binary_id(self):
# Check for exceptions in get_bewit call with binary id
binary_credentials = self.credentials.copy()
binary_credentials['id'] = binary_credentials['id'].encode('ascii')
res = Resource(url='https://example.com/somewhere/over/the/rainbow',
method='GET', credentials=binary_credentials,
timestamp=1356420407 + 300,
nonce='',
)
get_bewit(res)
def test_bewit_with_ext(self):
res = Resource(url='https://example.com/somewhere/over/the/rainbow',
method='GET', credentials=self.credentials,
timestamp=1356420407 + 300,
nonce='',
ext='xandyandz'
)
bewit = get_bewit(res)
expected = '123456\\1356420707\\kscxwNR2tJpP1T1zDLNPbB5UiKIU9tOSJXTUdG7X9h8=\\xandyandz'
eq_(b64decode(bewit).decode('ascii'), expected)
def test_bewit_with_ext_and_backslashes(self):
credentials = self.credentials
credentials['id'] = '123\\456'
res = Resource(url='https://example.com/somewhere/over/the/rainbow',
method='GET', credentials=self.credentials,
timestamp=1356420407 + 300,
nonce='',
ext='xand\\yandz'
)
bewit = get_bewit(res)
expected = '123456\\1356420707\\b82LLIxG5UDkaChLU953mC+SMrbniV1sb8KiZi9cSsc=\\xand\\yandz'
eq_(b64decode(bewit).decode('ascii'), expected)
def test_bewit_with_port(self):
res = Resource(url='https://example.com:8080/somewhere/over/the/rainbow',
method='GET', credentials=self.credentials,
timestamp=1356420407 + 300, nonce='', ext='xandyandz')
bewit = get_bewit(res)
expected = '123456\\1356420707\\hZbJ3P2cKEo4ky0C8jkZAkRyCZueg4WSNbxV7vq3xHU=\\xandyandz'
eq_(b64decode(bewit).decode('ascii'), expected)
@raises(ValueError)
def test_bewit_with_nonce(self):
res = Resource(url='https://example.com/somewhere/over/the/rainbow',
method='GET', credentials=self.credentials,
timestamp=1356420407 + 300,
nonce='n1')
get_bewit(res)
@raises(ValueError)
def test_bewit_invalid_method(self):
res = Resource(url='https://example.com:8080/somewhere/over/the/rainbow',
method='POST', credentials=self.credentials,
timestamp=1356420407 + 300, nonce='')
get_bewit(res)
def test_strip_bewit(self):
bewit = b'123456\\1356420707\\IGYmLgIqLrCe8CxvKPs4JlWIA+UjWJJouwgARiVhCAg=\\'
bewit = urlsafe_b64encode(bewit).decode('ascii')
url = "https://example.com/somewhere/over/the/rainbow?bewit={bewit}".format(bewit=bewit)
raw_bewit, stripped_url = strip_bewit(url)
self.assertEquals(raw_bewit, bewit)
self.assertEquals(stripped_url, "https://example.com/somewhere/over/the/rainbow")
@raises(InvalidBewit)
def test_strip_url_without_bewit(self):
url = "https://example.com/somewhere/over/the/rainbow"
strip_bewit(url)
def test_parse_bewit(self):
bewit = b'123456\\1356420707\\IGYmLgIqLrCe8CxvKPs4JlWIA+UjWJJouwgARiVhCAg=\\'
bewit = urlsafe_b64encode(bewit).decode('ascii')
bewit = parse_bewit(bewit)
self.assertEquals(bewit.id, '123456')
self.assertEquals(bewit.expiration, '1356420707')
self.assertEquals(bewit.mac, 'IGYmLgIqLrCe8CxvKPs4JlWIA+UjWJJouwgARiVhCAg=')
self.assertEquals(bewit.ext, '')
def test_parse_bewit_with_ext(self):
bewit = b'123456\\1356420707\\IGYmLgIqLrCe8CxvKPs4JlWIA+UjWJJouwgARiVhCAg=\\xandyandz'
bewit = urlsafe_b64encode(bewit).decode('ascii')
bewit = parse_bewit(bewit)
self.assertEquals(bewit.id, '123456')
self.assertEquals(bewit.expiration, '1356420707')
self.assertEquals(bewit.mac, 'IGYmLgIqLrCe8CxvKPs4JlWIA+UjWJJouwgARiVhCAg=')
self.assertEquals(bewit.ext, 'xandyandz')
def test_parse_bewit_with_ext_and_backslashes(self):
bewit = b'123456\\1356420707\\IGYmLgIqLrCe8CxvKPs4JlWIA+UjWJJouwgARiVhCAg=\\xand\\yandz'
bewit = urlsafe_b64encode(bewit).decode('ascii')
bewit = parse_bewit(bewit)
self.assertEquals(bewit.id, '123456')
self.assertEquals(bewit.expiration, '1356420707')
self.assertEquals(bewit.mac, 'IGYmLgIqLrCe8CxvKPs4JlWIA+UjWJJouwgARiVhCAg=')
self.assertEquals(bewit.ext, 'xand\\yandz')
@raises(InvalidBewit)
def test_parse_invalid_bewit_with_only_one_part(self):
bewit = b'12345'
bewit = urlsafe_b64encode(bewit).decode('ascii')
bewit = parse_bewit(bewit)
@raises(InvalidBewit)
def test_parse_invalid_bewit_with_only_two_parts(self):
bewit = b'1\\2'
bewit = urlsafe_b64encode(bewit).decode('ascii')
bewit = parse_bewit(bewit)
def test_validate_bewit(self):
bewit = b'123456\\1356420707\\IGYmLgIqLrCe8CxvKPs4JlWIA+UjWJJouwgARiVhCAg=\\'
bewit = urlsafe_b64encode(bewit).decode('ascii')
url = "https://example.com/somewhere/over/the/rainbow?bewit={bewit}".format(bewit=bewit)
credential_lookup = self.make_credential_lookup({
self.credentials['id']: self.credentials,
})
self.assertTrue(check_bewit(url, credential_lookup=credential_lookup, now=1356420407 + 10))
def test_validate_bewit_with_ext(self):
bewit = b'123456\\1356420707\\kscxwNR2tJpP1T1zDLNPbB5UiKIU9tOSJXTUdG7X9h8=\\xandyandz'
bewit = urlsafe_b64encode(bewit).decode('ascii')
url = "https://example.com/somewhere/over/the/rainbow?bewit={bewit}".format(bewit=bewit)
credential_lookup = self.make_credential_lookup({
self.credentials['id']: self.credentials,
})
self.assertTrue(check_bewit(url, credential_lookup=credential_lookup, now=1356420407 + 10))
def test_validate_bewit_with_ext_and_backslashes(self):
bewit = b'123456\\1356420707\\b82LLIxG5UDkaChLU953mC+SMrbniV1sb8KiZi9cSsc=\\xand\\yandz'
bewit = urlsafe_b64encode(bewit).decode('ascii')
url = "https://example.com/somewhere/over/the/rainbow?bewit={bewit}".format(bewit=bewit)
credential_lookup = self.make_credential_lookup({
self.credentials['id']: self.credentials,
})
self.assertTrue(check_bewit(url, credential_lookup=credential_lookup, now=1356420407 + 10))
@raises(TokenExpired)
def test_validate_expired_bewit(self):
bewit = b'123456\\1356420707\\IGYmLgIqLrCe8CxvKPs4JlWIA+UjWJJouwgARiVhCAg=\\'
bewit = urlsafe_b64encode(bewit).decode('ascii')
url = "https://example.com/somewhere/over/the/rainbow?bewit={bewit}".format(bewit=bewit)
credential_lookup = self.make_credential_lookup({
self.credentials['id']: self.credentials,
})
check_bewit(url, credential_lookup=credential_lookup, now=1356420407 + 1000)
@raises(CredentialsLookupError)
def test_validate_bewit_with_unknown_credentials(self):
bewit = b'123456\\1356420707\\IGYmLgIqLrCe8CxvKPs4JlWIA+UjWJJouwgARiVhCAg=\\'
bewit = urlsafe_b64encode(bewit).decode('ascii')
url = "https://example.com/somewhere/over/the/rainbow?bewit={bewit}".format(bewit=bewit)
credential_lookup = self.make_credential_lookup({
'other_id': self.credentials,
})
check_bewit(url, credential_lookup=credential_lookup, now=1356420407 + 10)

267
third_party/python/mohawk/mohawk/util.py vendored Normal file
View File

@ -0,0 +1,267 @@
from base64 import b64encode, urlsafe_b64encode
import calendar
import hashlib
import hmac
import logging
import math
import os
import pprint
import re
import sys
import time
import six
from .exc import (
BadHeaderValue,
HawkFail,
InvalidCredentials)
HAWK_VER = 1
log = logging.getLogger(__name__)
allowable_header_keys = set(['id', 'ts', 'tsm', 'nonce', 'hash',
'error', 'ext', 'mac', 'app', 'dlg'])
def validate_credentials(creds):
if not hasattr(creds, '__getitem__'):
raise InvalidCredentials('credentials must be a dict-like object')
try:
creds['id']
creds['key']
creds['algorithm']
except KeyError:
etype, val, tb = sys.exc_info()
raise InvalidCredentials('{etype}: {val}'
.format(etype=etype, val=val))
def random_string(length):
"""Generates a random string for a given length."""
# this conservatively gets 8*length bits and then returns 6*length of
# them. Grabbing (6/8)*length bits could lose some entropy off the ends.
return urlsafe_b64encode(os.urandom(length))[:length]
def calculate_payload_hash(payload, algorithm, content_type):
"""Calculates a hash for a given payload."""
p_hash = hashlib.new(algorithm)
parts = []
parts.append('hawk.' + str(HAWK_VER) + '.payload\n')
parts.append(parse_content_type(content_type) + '\n')
parts.append(payload or '')
parts.append('\n')
for i, p in enumerate(parts):
# Make sure we are about to hash binary strings.
if not isinstance(p, six.binary_type):
p = p.encode('utf8')
p_hash.update(p)
parts[i] = p
log.debug('calculating payload hash from:\n{parts}'
.format(parts=pprint.pformat(parts)))
return b64encode(p_hash.digest())
def calculate_mac(mac_type, resource, content_hash):
"""Calculates a message authorization code (MAC)."""
normalized = normalize_string(mac_type, resource, content_hash)
log.debug(u'normalized resource for mac calc: {norm}'
.format(norm=normalized))
digestmod = getattr(hashlib, resource.credentials['algorithm'])
# Make sure we are about to hash binary strings.
if not isinstance(normalized, six.binary_type):
normalized = normalized.encode('utf8')
key = resource.credentials['key']
if not isinstance(key, six.binary_type):
key = key.encode('ascii')
result = hmac.new(key, normalized, digestmod)
return b64encode(result.digest())
def calculate_ts_mac(ts, credentials):
"""Calculates a message authorization code (MAC) for a timestamp."""
normalized = ('hawk.{hawk_ver}.ts\n{ts}\n'
.format(hawk_ver=HAWK_VER, ts=ts))
log.debug(u'normalized resource for ts mac calc: {norm}'
.format(norm=normalized))
digestmod = getattr(hashlib, credentials['algorithm'])
if not isinstance(normalized, six.binary_type):
normalized = normalized.encode('utf8')
key = credentials['key']
if not isinstance(key, six.binary_type):
key = key.encode('ascii')
result = hmac.new(key, normalized, digestmod)
return b64encode(result.digest())
def normalize_string(mac_type, resource, content_hash):
"""Serializes mac_type and resource into a HAWK string."""
normalized = [
'hawk.' + str(HAWK_VER) + '.' + mac_type,
normalize_header_attr(resource.timestamp),
normalize_header_attr(resource.nonce),
normalize_header_attr(resource.method or ''),
normalize_header_attr(resource.name or ''),
normalize_header_attr(resource.host),
normalize_header_attr(resource.port),
normalize_header_attr(content_hash or '')
]
# The blank lines are important. They follow what the Node Hawk lib does.
normalized.append(normalize_header_attr(resource.ext or ''))
if resource.app:
normalized.append(normalize_header_attr(resource.app))
normalized.append(normalize_header_attr(resource.dlg or ''))
# Add trailing new line.
normalized.append('')
normalized = '\n'.join(normalized)
return normalized
def parse_content_type(content_type):
"""Cleans up content_type."""
if content_type:
return content_type.split(';')[0].strip().lower()
else:
return ''
def parse_authorization_header(auth_header):
"""
Example Authorization header:
'Hawk id="dh37fgj492je", ts="1367076201", nonce="NPHgnG", ext="and
welcome!", mac="CeWHy4d9kbLGhDlkyw2Nh3PJ7SDOdZDa267KH4ZaNMY="'
"""
attributes = {}
# Make sure we have a unicode object for consistency.
if isinstance(auth_header, six.binary_type):
auth_header = auth_header.decode('utf8')
parts = auth_header.split(',')
auth_scheme_parts = parts[0].split(' ')
if 'hawk' != auth_scheme_parts[0].lower():
raise HawkFail("Unknown scheme '{scheme}' when parsing header"
.format(scheme=auth_scheme_parts[0].lower()))
# Replace 'Hawk key: value' with 'key: value'
# which matches the rest of parts
parts[0] = auth_scheme_parts[1]
for part in parts:
attr_parts = part.split('=')
key = attr_parts[0].strip()
if key not in allowable_header_keys:
raise HawkFail("Unknown Hawk key '{key}' when parsing header"
.format(key=key))
if len(attr_parts) > 2:
attr_parts[1] = '='.join(attr_parts[1:])
# Chop of quotation marks
value = attr_parts[1]
if attr_parts[1].find('"') == 0:
value = attr_parts[1][1:]
if value.find('"') > -1:
value = value[0:-1]
validate_header_attr(value, name=key)
value = unescape_header_attr(value)
attributes[key] = value
log.debug('parsed Hawk header: {header} into: \n{parsed}'
.format(header=auth_header, parsed=pprint.pformat(attributes)))
return attributes
def strings_match(a, b):
# Constant time string comparision, mitigates side channel attacks.
if len(a) != len(b):
return False
result = 0
def byte_ints(buf):
for ch in buf:
# In Python 3, if we have a bytes object, iterating it will
# already get the integer value. In older pythons, we need
# to use ord().
if not isinstance(ch, int):
ch = ord(ch)
yield ch
for x, y in zip(byte_ints(a), byte_ints(b)):
result |= x ^ y
return result == 0
def utc_now(offset_in_seconds=0.0):
# TODO: add support for SNTP server? See ntplib module.
return int(math.floor(calendar.timegm(time.gmtime()) +
float(offset_in_seconds)))
# Allowed value characters:
# !#$%&'()*+,-./:;<=>?@[]^_`{|}~ and space, a-z, A-Z, 0-9, \, "
_header_attribute_chars = re.compile(
r"^[ a-zA-Z0-9_\!#\$%&'\(\)\*\+,\-\./\:;<\=>\?@\[\]\^`\{\|\}~\"\\]*$")
def validate_header_attr(val, name=None):
if not _header_attribute_chars.match(val):
raise BadHeaderValue('header value name={name} value={val} '
'contained an illegal character'
.format(name=name or '?', val=repr(val)))
def escape_header_attr(val):
# Ensure we are working with Unicode for consistency.
if isinstance(val, six.binary_type):
val = val.decode('utf8')
# Escape quotes and slash like the hawk reference code.
val = val.replace('\\', '\\\\')
val = val.replace('"', '\\"')
val = val.replace('\n', '\\n')
return val
def unescape_header_attr(val):
# Un-do the hawk escaping.
val = val.replace('\\n', '\n')
val = val.replace('\\\\', '\\').replace('\\"', '"')
return val
def prepare_header_val(val):
val = escape_header_attr(val)
validate_header_attr(val)
return val
def normalize_header_attr(val):
if not val:
val = ''
# Normalize like the hawk reference code.
val = escape_header_attr(val)
return val

5
third_party/python/mohawk/setup.cfg vendored Normal file
View File

@ -0,0 +1,5 @@
[egg_info]
tag_build =
tag_date = 0
tag_svn_revision = 0

25
third_party/python/mohawk/setup.py vendored Normal file
View File

@ -0,0 +1,25 @@
from setuptools import setup, find_packages
setup(name='mohawk',
version='0.3.4',
description="Library for Hawk HTTP authorization",
long_description='',
author='Kumar McMillan, Austin King',
author_email='kumar.mcmillan@gmail.com',
license='MPL 2.0 (Mozilla Public License)',
url='https://github.com/kumar303/mohawk',
include_package_data=True,
classifiers=[
'Intended Audience :: Developers',
'Natural Language :: English',
'Operating System :: OS Independent',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.3',
'Topic :: Internet :: WWW/HTTP',
],
packages=find_packages(exclude=['tests']),
install_requires=['six'])

View File

@ -44,6 +44,9 @@ with Files('lldbutils/**'):
with Files('mock-1.0.0/**'):
BUG_COMPONENT = ('Firefox Build System', 'General')
with Files('mohawk/**'):
BUG_COMPONENT = ('Taskcluster', 'Platform Libraries')
with Files('mozilla-version/**'):
BUG_COMPONENT = ('Release Engineering', 'General Automation')
@ -89,6 +92,9 @@ with Files('rsa/**'):
with Files('slugid/**'):
BUG_COMPONENT = ('Taskcluster', 'Platform Libraries')
with Files('taskcluster/**'):
BUG_COMPONENT = ('Taskcluster', 'Platform Libraries')
with Files('virtualenv/**'):
BUG_COMPONENT = ('Firefox Build System', 'General')

View File

@ -11,5 +11,6 @@ pytest==3.6.2
python-hglib==2.4
requests==2.9.1
six==1.10.0
taskcluster==4.0.1
virtualenv==15.2.0
voluptuous==0.11.5

View File

@ -31,6 +31,10 @@ jsmin==2.1.0 \
--hash=sha256:5d07bf0251a4128e5e8e8eef603849b6b5741c337bff087731a248f9cc774f56
json-e==2.7.0 \
--hash=sha256:d8c1ec3f5bbc7728c3a504ebe58829f283c64eca230871e4eefe974b4cdaae4a
mohawk==0.3.4 \
--hash=sha256:b3f85ffa93a5c7d2f9cc591246ef9f8ac4a9fa716bfd5bae0377699a2d89d78c \
--hash=sha256:e98b331d9fa9ece7b8be26094cbe2d57613ae882133cc755167268a984bc0ab3 \
# via taskcluster
more-itertools==4.3.0 \
--hash=sha256:c187a73da93e7a8acc0001572aebc7e3c69daf7bf6881a2cea10650bd4420092 \
--hash=sha256:c476b5d3a34e12d40130bc2f935028b5f636df8f372dc2c1c01dc19681b2039e \
@ -90,6 +94,13 @@ scandir==1.9.0 \
six==1.10.0 \
--hash=sha256:0ff78c403d9bccf5a425a6d31a12aa6b47f1c21ca4dc2573a7e2f32a97335eb1 \
--hash=sha256:105f8d68616f8248e24bf0e9372ef04d3cc10104f1980f54d57b2ce73a5ad56a
slugid==1.0.7 \
--hash=sha256:6dab3c7eef0bb423fb54cb7752e0f466ddd0ee495b78b763be60e8a27f69e779 \
# via taskcluster
taskcluster==4.0.1 \
--hash=sha256:27256511044346ac71a495d3c636f2add95c102b9b09f90d6fb1ea3e9949d311 \
--hash=sha256:99dd90bc1c566968868c8b07ede32f8e031cbccd52c7195a61e802679d461447 \
--hash=sha256:d0360063c1a3fcaaa514bb31c03954ba573d2b671df40a2ecfdfd9339cc8e93e
virtualenv-clone==0.3.0 \
--hash=sha256:4507071d81013fd03ea9930ec26bc8648b997927a11fa80e8ee81198b57e0ac7 \
--hash=sha256:b5cfe535d14dc68dfc1d1bb4ac1209ea28235b91156e2bba8e250d291c3fb4f8 \

View File

@ -1,57 +0,0 @@
# Byte-compiled / optimized / DLL files
__pycache__/
*.py[cod]
# C extensions
*.so
# Distribution / packaging
.Python
env/
build/
develop-eggs/
dist/
downloads/
eggs/
.eggs/
lib/
lib64/
parts/
sdist/
var/
*.egg-info/
.installed.cfg
*.egg
# PyInstaller
# Usually these files are written by a python script from a template
# before PyInstaller builds the exe, so as to inject date/other infos into it.
*.manifest
*.spec
# Installer logs
pip-log.txt
pip-delete-this-directory.txt
# Unit test / coverage reports
htmlcov/
.tox/
.coverage
.coverage.*
.cache
nosetests.xml
coverage.xml
*,cover
# Translations
*.mo
*.pot
# Django stuff:
*.log
# Sphinx documentation
docs/_build/
# PyBuilder
target/

View File

@ -1,27 +0,0 @@
language: python
python:
- 2.7
install:
- pip install -r requirements.txt
script:
- tox
after_script:
- tox -e coveralls
# currently cannot customise per user fork, see:
# https://github.com/travis-ci/travis-ci/issues/1094
# please comment out this section in your personal fork!
notifications:
irc:
channels:
- "irc.mozilla.org#taskcluster-bots"
on_success: always
on_failure: always
template:
- "\x02%{repository}\x0314#%{build_number}\x03\x02 (%{branch} - %{commit} : %{author}): \x02\x0312%{message}\x02\x03"
- "\x02Change view\x02 : \x0314%{compare_url}\x03"
- "\x02Build details\x02 : \x0314%{build_url}\x03"
- "\x02Commit message\x02 : \x0314%{commit_message}\x03"

View File

@ -1,363 +0,0 @@
Mozilla Public License, version 2.0
1. Definitions
1.1. "Contributor"
means each individual or legal entity that creates, contributes to the
creation of, or owns Covered Software.
1.2. "Contributor Version"
means the combination of the Contributions of others (if any) used by a
Contributor and that particular Contributor's Contribution.
1.3. "Contribution"
means Covered Software of a particular Contributor.
1.4. "Covered Software"
means Source Code Form to which the initial Contributor has attached the
notice in Exhibit A, the Executable Form of such Source Code Form, and
Modifications of such Source Code Form, in each case including portions
thereof.
1.5. "Incompatible With Secondary Licenses"
means
a. that the initial Contributor has attached the notice described in
Exhibit B to the Covered Software; or
b. that the Covered Software was made available under the terms of
version 1.1 or earlier of the License, but not also under the terms of
a Secondary License.
1.6. "Executable Form"
means any form of the work other than Source Code Form.
1.7. "Larger Work"
means a work that combines Covered Software with other material, in a
separate file or files, that is not Covered Software.
1.8. "License"
means this document.
1.9. "Licensable"
means having the right to grant, to the maximum extent possible, whether
at the time of the initial grant or subsequently, any and all of the
rights conveyed by this License.
1.10. "Modifications"
means any of the following:
a. any file in Source Code Form that results from an addition to,
deletion from, or modification of the contents of Covered Software; or
b. any new file in Source Code Form that contains any Covered Software.
1.11. "Patent Claims" of a Contributor
means any patent claim(s), including without limitation, method,
process, and apparatus claims, in any patent Licensable by such
Contributor that would be infringed, but for the grant of the License,
by the making, using, selling, offering for sale, having made, import,
or transfer of either its Contributions or its Contributor Version.
1.12. "Secondary License"
means either the GNU General Public License, Version 2.0, the GNU Lesser
General Public License, Version 2.1, the GNU Affero General Public
License, Version 3.0, or any later versions of those licenses.
1.13. "Source Code Form"
means the form of the work preferred for making modifications.
1.14. "You" (or "Your")
means an individual or a legal entity exercising rights under this
License. For legal entities, "You" includes any entity that controls, is
controlled by, or is under common control with You. For purposes of this
definition, "control" means (a) the power, direct or indirect, to cause
the direction or management of such entity, whether by contract or
otherwise, or (b) ownership of more than fifty percent (50%) of the
outstanding shares or beneficial ownership of such entity.
2. License Grants and Conditions
2.1. Grants
Each Contributor hereby grants You a world-wide, royalty-free,
non-exclusive license:
a. under intellectual property rights (other than patent or trademark)
Licensable by such Contributor to use, reproduce, make available,
modify, display, perform, distribute, and otherwise exploit its
Contributions, either on an unmodified basis, with Modifications, or
as part of a Larger Work; and
b. under Patent Claims of such Contributor to make, use, sell, offer for
sale, have made, import, and otherwise transfer either its
Contributions or its Contributor Version.
2.2. Effective Date
The licenses granted in Section 2.1 with respect to any Contribution
become effective for each Contribution on the date the Contributor first
distributes such Contribution.
2.3. Limitations on Grant Scope
The licenses granted in this Section 2 are the only rights granted under
this License. No additional rights or licenses will be implied from the
distribution or licensing of Covered Software under this License.
Notwithstanding Section 2.1(b) above, no patent license is granted by a
Contributor:
a. for any code that a Contributor has removed from Covered Software; or
b. for infringements caused by: (i) Your and any other third party's
modifications of Covered Software, or (ii) the combination of its
Contributions with other software (except as part of its Contributor
Version); or
c. under Patent Claims infringed by Covered Software in the absence of
its Contributions.
This License does not grant any rights in the trademarks, service marks,
or logos of any Contributor (except as may be necessary to comply with
the notice requirements in Section 3.4).
2.4. Subsequent Licenses
No Contributor makes additional grants as a result of Your choice to
distribute the Covered Software under a subsequent version of this
License (see Section 10.2) or under the terms of a Secondary License (if
permitted under the terms of Section 3.3).
2.5. Representation
Each Contributor represents that the Contributor believes its
Contributions are its original creation(s) or it has sufficient rights to
grant the rights to its Contributions conveyed by this License.
2.6. Fair Use
This License is not intended to limit any rights You have under
applicable copyright doctrines of fair use, fair dealing, or other
equivalents.
2.7. Conditions
Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in
Section 2.1.
3. Responsibilities
3.1. Distribution of Source Form
All distribution of Covered Software in Source Code Form, including any
Modifications that You create or to which You contribute, must be under
the terms of this License. You must inform recipients that the Source
Code Form of the Covered Software is governed by the terms of this
License, and how they can obtain a copy of this License. You may not
attempt to alter or restrict the recipients' rights in the Source Code
Form.
3.2. Distribution of Executable Form
If You distribute Covered Software in Executable Form then:
a. such Covered Software must also be made available in Source Code Form,
as described in Section 3.1, and You must inform recipients of the
Executable Form how they can obtain a copy of such Source Code Form by
reasonable means in a timely manner, at a charge no more than the cost
of distribution to the recipient; and
b. You may distribute such Executable Form under the terms of this
License, or sublicense it under different terms, provided that the
license for the Executable Form does not attempt to limit or alter the
recipients' rights in the Source Code Form under this License.
3.3. Distribution of a Larger Work
You may create and distribute a Larger Work under terms of Your choice,
provided that You also comply with the requirements of this License for
the Covered Software. If the Larger Work is a combination of Covered
Software with a work governed by one or more Secondary Licenses, and the
Covered Software is not Incompatible With Secondary Licenses, this
License permits You to additionally distribute such Covered Software
under the terms of such Secondary License(s), so that the recipient of
the Larger Work may, at their option, further distribute the Covered
Software under the terms of either this License or such Secondary
License(s).
3.4. Notices
You may not remove or alter the substance of any license notices
(including copyright notices, patent notices, disclaimers of warranty, or
limitations of liability) contained within the Source Code Form of the
Covered Software, except that You may alter any license notices to the
extent required to remedy known factual inaccuracies.
3.5. Application of Additional Terms
You may choose to offer, and to charge a fee for, warranty, support,
indemnity or liability obligations to one or more recipients of Covered
Software. However, You may do so only on Your own behalf, and not on
behalf of any Contributor. You must make it absolutely clear that any
such warranty, support, indemnity, or liability obligation is offered by
You alone, and You hereby agree to indemnify every Contributor for any
liability incurred by such Contributor as a result of warranty, support,
indemnity or liability terms You offer. You may include additional
disclaimers of warranty and limitations of liability specific to any
jurisdiction.
4. Inability to Comply Due to Statute or Regulation
If it is impossible for You to comply with any of the terms of this License
with respect to some or all of the Covered Software due to statute,
judicial order, or regulation then You must: (a) comply with the terms of
this License to the maximum extent possible; and (b) describe the
limitations and the code they affect. Such description must be placed in a
text file included with all distributions of the Covered Software under
this License. Except to the extent prohibited by statute or regulation,
such description must be sufficiently detailed for a recipient of ordinary
skill to be able to understand it.
5. Termination
5.1. The rights granted under this License will terminate automatically if You
fail to comply with any of its terms. However, if You become compliant,
then the rights granted under this License from a particular Contributor
are reinstated (a) provisionally, unless and until such Contributor
explicitly and finally terminates Your grants, and (b) on an ongoing
basis, if such Contributor fails to notify You of the non-compliance by
some reasonable means prior to 60 days after You have come back into
compliance. Moreover, Your grants from a particular Contributor are
reinstated on an ongoing basis if such Contributor notifies You of the
non-compliance by some reasonable means, this is the first time You have
received notice of non-compliance with this License from such
Contributor, and You become compliant prior to 30 days after Your receipt
of the notice.
5.2. If You initiate litigation against any entity by asserting a patent
infringement claim (excluding declaratory judgment actions,
counter-claims, and cross-claims) alleging that a Contributor Version
directly or indirectly infringes any patent, then the rights granted to
You by any and all Contributors for the Covered Software under Section
2.1 of this License shall terminate.
5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user
license agreements (excluding distributors and resellers) which have been
validly granted by You or Your distributors under this License prior to
termination shall survive termination.
6. Disclaimer of Warranty
Covered Software is provided under this License on an "as is" basis,
without warranty of any kind, either expressed, implied, or statutory,
including, without limitation, warranties that the Covered Software is free
of defects, merchantable, fit for a particular purpose or non-infringing.
The entire risk as to the quality and performance of the Covered Software
is with You. Should any Covered Software prove defective in any respect,
You (not any Contributor) assume the cost of any necessary servicing,
repair, or correction. This disclaimer of warranty constitutes an essential
part of this License. No use of any Covered Software is authorized under
this License except under this disclaimer.
7. Limitation of Liability
Under no circumstances and under no legal theory, whether tort (including
negligence), contract, or otherwise, shall any Contributor, or anyone who
distributes Covered Software as permitted above, be liable to You for any
direct, indirect, special, incidental, or consequential damages of any
character including, without limitation, damages for lost profits, loss of
goodwill, work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses, even if such party shall have been
informed of the possibility of such damages. This limitation of liability
shall not apply to liability for death or personal injury resulting from
such party's negligence to the extent applicable law prohibits such
limitation. Some jurisdictions do not allow the exclusion or limitation of
incidental or consequential damages, so this exclusion and limitation may
not apply to You.
8. Litigation
Any litigation relating to this License may be brought only in the courts
of a jurisdiction where the defendant maintains its principal place of
business and such litigation shall be governed by laws of that
jurisdiction, without reference to its conflict-of-law provisions. Nothing
in this Section shall prevent a party's ability to bring cross-claims or
counter-claims.
9. Miscellaneous
This License represents the complete agreement concerning the subject
matter hereof. If any provision of this License is held to be
unenforceable, such provision shall be reformed only to the extent
necessary to make it enforceable. Any law or regulation which provides that
the language of a contract shall be construed against the drafter shall not
be used to construe this License against a Contributor.
10. Versions of the License
10.1. New Versions
Mozilla Foundation is the license steward. Except as provided in Section
10.3, no one other than the license steward has the right to modify or
publish new versions of this License. Each version will be given a
distinguishing version number.
10.2. Effect of New Versions
You may distribute the Covered Software under the terms of the version
of the License under which You originally received the Covered Software,
or under the terms of any subsequent version published by the license
steward.
10.3. Modified Versions
If you create software not governed by this License, and you want to
create a new license for such software, you may create and use a
modified version of this License if you rename the license and remove
any references to the name of the license steward (except to note that
such modified license differs from this License).
10.4. Distributing Source Code Form that is Incompatible With Secondary
Licenses If You choose to distribute Source Code Form that is
Incompatible With Secondary Licenses under the terms of this version of
the License, the notice described in Exhibit B of this License must be
attached.
Exhibit A - Source Code Form License Notice
This Source Code Form is subject to the
terms of the Mozilla Public License, v.
2.0. If a copy of the MPL was not
distributed with this file, You can
obtain one at
http://mozilla.org/MPL/2.0/.
If it is not possible or desirable to put the notice in a particular file,
then You may include the notice in a location (such as a LICENSE file in a
relevant directory) where a recipient would be likely to look for such a
notice.
You may add additional accurate notices of copyright ownership.
Exhibit B - "Incompatible With Secondary Licenses" Notice
This Source Code Form is "Incompatible
With Secondary Licenses", as defined by
the Mozilla Public License, v. 2.0.

View File

@ -1,2 +0,0 @@
tox
twine

View File

@ -1,167 +0,0 @@
# Licensed under the Mozilla Public Licence 2.0.
# https://www.mozilla.org/en-US/MPL/2.0
import uuid
import slugid
def testEncode():
""" Test that we can correctly encode a "non-nice" uuid (with first bit
set) to its known slug. The specific uuid was chosen since it has a slug
which contains both `-` and `_` characters."""
# 10000000010011110011111111001000110111111100101101001011000001101000100111111011101011101111101011010101111000011000011101010100....
# <8 ><0 ><4 ><f ><3 ><f ><c ><8 ><d ><f ><c ><b ><4 ><b ><0 ><6 ><8 ><9 ><f ><b ><a ><e ><f ><a ><d ><5 ><e ><1 ><8 ><7 ><5 ><4 >
# < g >< E >< 8 >< _ >< y >< N >< _ >< L >< S >< w >< a >< J >< - >< 6 >< 7 >< 6 >< 1 >< e >< G >< H >< V >< A >
uuid_ = uuid.UUID('{804f3fc8-dfcb-4b06-89fb-aefad5e18754}')
expectedSlug = 'gE8_yN_LSwaJ-6761eGHVA'
actualSlug = slugid.encode(uuid_)
assert expectedSlug == actualSlug, "UUID not correctly encoded into slug: '" + expectedSlug + "' != '" + actualSlug + "'"
def testDecode():
""" Test that we can decode a "non-nice" slug (first bit of uuid is set)
that begins with `-`"""
# 11111011111011111011111011111011111011111011111001000011111011111011111111111111111111111111111111111111111111111111111111111101....
# <f ><b ><e ><f ><b ><e ><f ><b ><e ><f ><b ><e ><4 ><3 ><e ><f ><b ><f ><f ><f ><f ><f ><f ><f ><f ><f ><f ><f ><f ><f ><f ><d >
# < - >< - >< - >< - >< - >< - >< - >< - >< Q >< - >< - >< - >< _ >< _ >< _ >< _ >< _ >< _ >< _ >< _ >< _ >< Q >
slug = '--------Q--__________Q'
expectedUuid = uuid.UUID('{fbefbefb-efbe-43ef-bfff-fffffffffffd}')
actualUuid = slugid.decode(slug)
assert expectedUuid == actualUuid, "Slug not correctly decoded into uuid: '" + str(expectedUuid) + "' != '" + str(actualUuid) + "'"
def testUuidEncodeDecode():
""" Test that 10000 v4 uuids are unchanged after encoding and then decoding them"""
for i in range(0, 10000):
uuid1 = uuid.uuid4()
slug = slugid.encode(uuid1)
uuid2 = slugid.decode(slug)
assert uuid1 == uuid2, "Encode and decode isn't identity: '" + str(uuid1) + "' != '" + str(uuid2) + "'"
def testSlugDecodeEncode():
""" Test that 10000 v4 slugs are unchanged after decoding and then encoding them."""
for i in range(0, 10000):
slug1 = slugid.v4()
uuid_ = slugid.decode(slug1)
slug2 = slugid.encode(uuid_)
assert slug1 == slug2, "Decode and encode isn't identity"
def testSpreadNice():
""" Make sure that all allowed characters can appear in all allowed
positions within the "nice" slug. In this test we generate over a thousand
slugids, and make sure that every possible allowed character per position
appears at least once in the sample of all slugids generated. We also make
sure that no other characters appear in positions in which they are not
allowed.
base 64 encoding char -> value:
ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789-_
0 1 2 3 4 5 6
0123456789012345678901234567890123456789012345678901234567890123
e.g. from this we can see 'j' represents 35 in base64
The following comments show the 128 bits of the v4 uuid in binary, hex and
base 64 encodings. The 6 fixed bits (`0`/`1`) according to RFC 4122, plus
the first (most significant) fixed bit (`0`) are shown among the 121
arbitrary value bits (`.`/`x`). The `x` means the same as `.` but just
highlights which bits are grouped together for the respective encoding.
schema:
<..........time_low............><...time_mid...><time_hi_+_vers><clk_hi><clk_lo><.....................node.....................>
bin: 0xxx............................................0100............10xx............................................................
hex: $A <01><02><03><04><05><06><07><08><09><10><11> 4 <13><14><15> $B <17><18><19><20><21><22><23><24><25><26><27><28><29><30><31>
=> $A in {0, 1, 2, 3, 4, 5, 6, 7} (0b0xxx)
=> $B in {8, 9, A, B} (0b10xx)
bin: 0xxxxx..........................................0100xx......xxxx10............................................................xx0000
b64: $C < 01 >< 02 >< 03 >< 04 >< 05 >< 06 >< 07 > $D < 09 > $E < 11 >< 12 >< 13 >< 14 >< 15 >< 16 >< 17 >< 18 >< 19 >< 20 > $F
=> $C in {A, B, C, D, E, F, G, H, I, J, K, L, M, N, O, P, Q, R, S, T, U, V, W, X, Y, Z, a, b, c, d, e, f} (0b0xxxxx)
=> $D in {Q, R, S, T} (0b0100xx)
=> $E in {C, G, K, O, S, W, a, e, i, m, q, u, y, 2, 6, -} (0bxxxx10)
=> $F in {A, Q, g, w} (0bxx0000)"""
charsAll = ''.join(sorted('ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789-_'))
# 0 - 31: 0b0xxxxx
charsC = ''.join(sorted('ABCDEFGHIJKLMNOPQRSTUVWXYZabcdef'))
# 16, 17, 18, 19: 0b0100xx
charsD = ''.join(sorted('QRST'))
# 2, 6, 10, 14, 18, 22, 26, 30, 34, 38, 42, 46, 50, 54, 58, 62: 0bxxxx10
charsE = ''.join(sorted('CGKOSWaeimquy26-'))
# 0, 16, 32, 48: 0bxx0000
charsF = ''.join(sorted('AQgw'))
expected = [charsC, charsAll, charsAll, charsAll, charsAll, charsAll, charsAll, charsAll, charsD, charsAll, charsE, charsAll, charsAll, charsAll, charsAll, charsAll, charsAll, charsAll, charsAll, charsAll, charsAll, charsF]
spreadTest(slugid.nice, expected)
def testSpreadV4():
""" This test is the same as niceSpreadTest but for slugid.v4() rather than
slugid.nice(). The only difference is that a v4() slug can start with any of
the base64 characters since the first six bits of the uuid are random."""
charsAll = ''.join(sorted('ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789-_'))
# 16, 17, 18, 19: 0b0100xx
charsD = ''.join(sorted('QRST'))
# 2, 6, 10, 14, 18, 22, 26, 30, 34, 38, 42, 46, 50, 54, 58, 62: 0bxxxx10
charsE = ''.join(sorted('CGKOSWaeimquy26-'))
# 0, 16, 32, 48: 0bxx0000
charsF = ''.join(sorted('AQgw'))
expected = [charsAll, charsAll, charsAll, charsAll, charsAll, charsAll, charsAll, charsAll, charsD, charsAll, charsE, charsAll, charsAll, charsAll, charsAll, charsAll, charsAll, charsAll, charsAll, charsAll, charsAll, charsF]
spreadTest(slugid.v4, expected)
def spreadTest(generator, expected):
""" `spreadTest` runs a test against the `generator` function, to check that
when calling it 64*40 times, the range of characters per string position it
returns matches the array `expected`, where each entry in `expected` is a
string of all possible characters that should appear in that position in the
string, at least once in the sample of 64*40 responses from the `generator`
function"""
# k is an array which stores which characters were found at which
# positions. It has one entry per slugid character, therefore 22 entries.
# Each entry is a dict with a key for each character found, and its value
# as the number of times that character appeared at that position in the
# slugid in the large sample of slugids generated in this test.
k = [{}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}]
# Generate a large sample of slugids, and record what characters appeared
# where... A monte-carlo test has demonstrated that with 64 * 20
# iterations, no failure occurred in 1000 simulations, so 64 * 40 should be
# suitably large to rule out false positives.
for i in range(0, 64 * 40):
slug = generator()
assert len(slug) == 22
for j in range(0, 22):
if slug[j] in k[j]:
k[j][slug[j]] = k[j][slug[j]] + 1
else:
k[j][slug[j]] = 1
# Compose results into an array `actual`, for comparison with `expected`
actual = []
for j in range(0, len(k)):
actual.append('')
for a in k[j].keys():
if k[j][a] > 0:
actual[j] += a
# sort for easy comparison
actual[j] = ''.join(sorted(actual[j]))
assert arraysEqual(expected, actual), "In a large sample of generated slugids, the range of characters found per character position in the sample did not match expected results.\n\nExpected: " + str(expected) + "\n\nActual: " + str(actual)
def arraysEqual(a, b):
""" returns True if arrays a and b are equal"""
return cmp(a, b) == 0

View File

@ -1,26 +0,0 @@
[tox]
envlist = py27
[base]
deps =
coverage
nose
rednose
commands =
coverage run --source slugid --branch {envbindir}/nosetests -v --with-xunit --rednose --force-color
[testenv:py27]
deps=
{[base]deps}
basepython = python2.7
commands =
{[base]commands}
[testenv:coveralls]
deps=
python-coveralls
commands=
coveralls

13
third_party/python/taskcluster/PKG-INFO vendored Normal file
View File

@ -0,0 +1,13 @@
Metadata-Version: 1.1
Name: taskcluster
Version: 4.0.1
Summary: Python client for Taskcluster
Home-page: https://github.com/taskcluster/taskcluster-client.py
Author: John Ford
Author-email: jhford@mozilla.com
License: UNKNOWN
Description: UNKNOWN
Platform: UNKNOWN
Classifier: Programming Language :: Python :: 2.7
Classifier: Programming Language :: Python :: 3.5
Classifier: Programming Language :: Python :: 3.6

4266
third_party/python/taskcluster/README.md vendored Normal file

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,8 @@
[nosetests]
verbosity = 1
detailed-errors = 1
[egg_info]
tag_build =
tag_date = 0

88
third_party/python/taskcluster/setup.py vendored Normal file
View File

@ -0,0 +1,88 @@
#!/usr/bin/env python
from setuptools import setup
from setuptools.command.test import test as TestCommand
import sys
# The VERSION variable is automagically changed
# by release.sh. Make sure you understand how
# that script works if you want to change this
VERSION = '4.0.1'
tests_require = [
'nose==1.3.7',
'nose-exclude==0.5.0',
'httmock==1.2.6',
'rednose==1.2.1',
'mock==1.0.1',
'setuptools-lint==0.3',
'flake8==2.5.0',
'psutil==2.1.3',
'hypothesis==3.6.1',
'tox==2.3.2',
'coverage==4.1b2',
'python-dateutil==2.6.0',
]
# requests has a policy of not breaking apis between major versions
# http://docs.python-requests.org/en/latest/community/release-process/
install_requires = [
'requests>=2.4.3,<3',
'mohawk>=0.3.4,<0.4',
'slugid>=1.0.7,<2',
'six>=1.10.0,<2',
]
# from http://testrun.org/tox/latest/example/basic.html
class Tox(TestCommand):
user_options = [('tox-args=', 'a', "Arguments to pass to tox")]
def initialize_options(self):
TestCommand.initialize_options(self)
self.tox_args = None
def finalize_options(self):
TestCommand.finalize_options(self)
self.test_args = []
self.test_suite = True
def run_tests(self):
# import here, cause outside the eggs aren't loaded
import tox
import shlex
args = self.tox_args
if args:
args = shlex.split(self.tox_args)
errno = tox.cmdline(args=args)
sys.exit(errno)
if sys.version_info.major == 2:
tests_require.extend([
'subprocess32==3.2.6',
])
elif sys.version_info[:2] < (3, 5):
raise Exception('This library does not support Python 3 versions below 3.5')
elif sys.version_info[:2] >= (3, 5):
install_requires.extend([
'aiohttp>=2.0.0,<4',
'async_timeout>=2.0.0,<4',
])
if __name__ == '__main__':
setup(
name='taskcluster',
version=VERSION,
description='Python client for Taskcluster',
author='John Ford',
author_email='jhford@mozilla.com',
url='https://github.com/taskcluster/taskcluster-client.py',
packages=['taskcluster', 'taskcluster.aio'],
install_requires=install_requires,
test_suite="nose.collector",
tests_require=tests_require,
cmdclass={'test': Tox},
zip_safe=False,
classifiers=['Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6'],
)

View File

@ -0,0 +1,17 @@
""" Python client for Taskcluster """
from __future__ import absolute_import, division, print_function, unicode_literals
import logging
import os
from .client import createSession # NOQA
from taskcluster.utils import * # NOQA
from taskcluster.exceptions import * # NOQA
from taskcluster._client_importer import * # NOQA
log = logging.getLogger(__name__)
if os.environ.get('DEBUG_TASKCLUSTER_CLIENT'):
log.setLevel(logging.DEBUG)
if len(log.handlers) == 0:
log.addHandler(logging.StreamHandler())
log.addHandler(logging.NullHandler())

View File

@ -0,0 +1,17 @@
from .auth import Auth # NOQA
from .authevents import AuthEvents # NOQA
from .awsprovisioner import AwsProvisioner # NOQA
from .awsprovisionerevents import AwsProvisionerEvents # NOQA
from .ec2manager import EC2Manager # NOQA
from .github import Github # NOQA
from .githubevents import GithubEvents # NOQA
from .hooks import Hooks # NOQA
from .index import Index # NOQA
from .login import Login # NOQA
from .notify import Notify # NOQA
from .purgecache import PurgeCache # NOQA
from .purgecacheevents import PurgeCacheEvents # NOQA
from .queue import Queue # NOQA
from .queueevents import QueueEvents # NOQA
from .secrets import Secrets # NOQA
from .treeherderevents import TreeherderEvents # NOQA

View File

@ -0,0 +1,16 @@
""" Python client for Taskcluster """
import logging
import os
from .asyncclient import createSession # NOQA
from taskcluster.utils import * # NOQA
from taskcluster.exceptions import * # NOQA
from ._client_importer import * # NOQA
log = logging.getLogger(__name__)
if os.environ.get('DEBUG_TASKCLUSTER_CLIENT'):
log.setLevel(logging.DEBUG)
if len(log.handlers) == 0:
log.addHandler(logging.StreamHandler())
log.addHandler(logging.NullHandler())

View File

@ -0,0 +1,17 @@
from .auth import Auth # NOQA
from .authevents import AuthEvents # NOQA
from .awsprovisioner import AwsProvisioner # NOQA
from .awsprovisionerevents import AwsProvisionerEvents # NOQA
from .ec2manager import EC2Manager # NOQA
from .github import Github # NOQA
from .githubevents import GithubEvents # NOQA
from .hooks import Hooks # NOQA
from .index import Index # NOQA
from .login import Login # NOQA
from .notify import Notify # NOQA
from .purgecache import PurgeCache # NOQA
from .purgecacheevents import PurgeCacheEvents # NOQA
from .queue import Queue # NOQA
from .queueevents import QueueEvents # NOQA
from .secrets import Secrets # NOQA
from .treeherderevents import TreeherderEvents # NOQA

View File

@ -0,0 +1,388 @@
"""This module is used to interact with taskcluster rest apis"""
from __future__ import absolute_import, division, print_function
import os
import logging
import hashlib
import hmac
import datetime
import calendar
import six
from six.moves import urllib
import mohawk
import mohawk.bewit
import aiohttp
import asyncio
from .. import exceptions
from .. import utils
from ..client import BaseClient
from . import asyncutils
log = logging.getLogger(__name__)
# Default configuration
_defaultConfig = config = {
'credentials': {
'clientId': os.environ.get('TASKCLUSTER_CLIENT_ID'),
'accessToken': os.environ.get('TASKCLUSTER_ACCESS_TOKEN'),
'certificate': os.environ.get('TASKCLUSTER_CERTIFICATE'),
},
'maxRetries': 5,
'signedUrlExpiration': 15 * 60,
}
def createSession(*args, **kwargs):
""" Create a new aiohttp session. This passes through all positional and
keyword arguments to the asyncutils.createSession() constructor.
It's preferred to do something like
async with createSession(...) as session:
queue = Queue(session=session)
await queue.ping()
or
async with createSession(...) as session:
async with Queue(session=session) as queue:
await queue.ping()
in the client code.
"""
return asyncutils.createSession(*args, **kwargs)
class AsyncBaseClient(BaseClient):
""" Base Class for API Client Classes. Each individual Client class
needs to set up its own methods for REST endpoints and Topic Exchange
routing key patterns. The _makeApiCall() and _topicExchange() methods
help with this.
"""
def __init__(self, *args, **kwargs):
super(AsyncBaseClient, self).__init__(*args, **kwargs)
self._implicitSession = False
if self.session is None:
self._implicitSession = True
def _createSession(self):
""" If self.session isn't set, don't create an implicit.
To avoid `session.close()` warnings at the end of tasks, and
various strongly-worded aiohttp warnings about using `async with`,
let's set `self.session` to `None` if no session is passed in to
`__init__`. The `asyncutils` functions will create a new session
per call in that case.
"""
return None
async def _makeApiCall(self, entry, *args, **kwargs):
""" This function is used to dispatch calls to other functions
for a given API Reference entry"""
x = self._processArgs(entry, *args, **kwargs)
routeParams, payload, query, paginationHandler, paginationLimit = x
route = self._subArgsInRoute(entry, routeParams)
# TODO: Check for limit being in the Query of the api ref
if paginationLimit and 'limit' in entry.get('query', []):
query['limit'] = paginationLimit
if query:
_route = route + '?' + urllib.parse.urlencode(query)
else:
_route = route
response = await self._makeHttpRequest(entry['method'], _route, payload)
if paginationHandler:
paginationHandler(response)
while response.get('continuationToken'):
query['continuationToken'] = response['continuationToken']
_route = route + '?' + urllib.parse.urlencode(query)
response = await self._makeHttpRequest(entry['method'], _route, payload)
paginationHandler(response)
else:
return response
async def _makeHttpRequest(self, method, route, payload):
""" Make an HTTP Request for the API endpoint. This method wraps
the logic about doing failure retry and passes off the actual work
of doing an HTTP request to another method."""
url = self._joinBaseUrlAndRoute(route)
log.debug('Full URL used is: %s', url)
hawkExt = self.makeHawkExt()
# Serialize payload if given
if payload is not None:
payload = utils.dumpJson(payload)
# Do a loop of retries
retry = -1 # we plus first in the loop, and attempt 1 is retry 0
retries = self.options['maxRetries']
while retry < retries:
retry += 1
# if this isn't the first retry then we sleep
if retry > 0:
snooze = float(retry * retry) / 10.0
log.info('Sleeping %0.2f seconds for exponential backoff', snooze)
await asyncio.sleep(utils.calculateSleepTime(retry))
# Construct header
if self._hasCredentials():
sender = mohawk.Sender(
credentials={
'id': self.options['credentials']['clientId'],
'key': self.options['credentials']['accessToken'],
'algorithm': 'sha256',
},
ext=hawkExt if hawkExt else {},
url=url,
content=payload if payload else '',
content_type='application/json' if payload else '',
method=method,
)
headers = {'Authorization': sender.request_header}
else:
log.debug('Not using hawk!')
headers = {}
if payload:
# Set header for JSON if payload is given, note that we serialize
# outside this loop.
headers['Content-Type'] = 'application/json'
log.debug('Making attempt %d', retry)
try:
response = await asyncutils.makeSingleHttpRequest(
method, url, payload, headers, session=self.session
)
except aiohttp.ClientError as rerr:
if retry < retries:
log.warn('Retrying because of: %s' % rerr)
continue
# raise a connection exception
raise exceptions.TaskclusterConnectionError(
"Failed to establish connection",
superExc=rerr
)
status = response.status
if status == 204:
return None
# Catch retryable errors and go to the beginning of the loop
# to do the retry
if 500 <= status and status < 600 and retry < retries:
log.warn('Retrying because of a %s status code' % status)
continue
# Throw errors for non-retryable errors
if status < 200 or status >= 300:
# Parse messages from errors
data = {}
try:
data = await response.json()
except:
pass # Ignore JSON errors in error messages
# Find error message
message = "Unknown Server Error"
if isinstance(data, dict):
message = data.get('message')
else:
if status == 401:
message = "Authentication Error"
elif status == 500:
message = "Internal Server Error"
else:
message = "Unknown Server Error %s\n%s" % (str(status), str(data)[:1024])
# Raise TaskclusterAuthFailure if this is an auth issue
if status == 401:
raise exceptions.TaskclusterAuthFailure(
message,
status_code=status,
body=data,
superExc=None
)
# Raise TaskclusterRestFailure for all other issues
raise exceptions.TaskclusterRestFailure(
message,
status_code=status,
body=data,
superExc=None
)
# Try to load JSON
try:
await response.release()
return await response.json()
except ValueError:
return {"response": response}
# This code-path should be unreachable
assert False, "Error from last retry should have been raised!"
async def __aenter__(self):
if self._implicitSession and not self.session:
self.session = createSession()
return self
async def __aexit__(self, *args):
if self._implicitSession and self.session:
await self.session.close()
self.session = None
def createApiClient(name, api):
attributes = dict(
name=name,
__doc__=api.get('description'),
classOptions={},
funcinfo={},
)
copiedOptions = ('baseUrl', 'exchangePrefix')
for opt in copiedOptions:
if opt in api['reference']:
attributes['classOptions'][opt] = api['reference'][opt]
for entry in api['reference']['entries']:
if entry['type'] == 'function':
def addApiCall(e):
async def apiCall(self, *args, **kwargs):
return await self._makeApiCall(e, *args, **kwargs)
return apiCall
f = addApiCall(entry)
docStr = "Call the %s api's %s method. " % (name, entry['name'])
if entry['args'] and len(entry['args']) > 0:
docStr += "This method takes:\n\n"
docStr += '\n'.join(['- ``%s``' % x for x in entry['args']])
docStr += '\n\n'
else:
docStr += "This method takes no arguments. "
if 'input' in entry:
docStr += "This method takes input ``%s``. " % entry['input']
if 'output' in entry:
docStr += "This method gives output ``%s``" % entry['output']
docStr += '\n\nThis method does a ``%s`` to ``%s``.' % (
entry['method'].upper(), entry['route'])
f.__doc__ = docStr
attributes['funcinfo'][entry['name']] = entry
elif entry['type'] == 'topic-exchange':
def addTopicExchange(e):
def topicExchange(self, *args, **kwargs):
return self._makeTopicExchange(e, *args, **kwargs)
return topicExchange
f = addTopicExchange(entry)
docStr = 'Generate a routing key pattern for the %s exchange. ' % entry['exchange']
docStr += 'This method takes a given routing key as a string or a '
docStr += 'dictionary. For each given dictionary key, the corresponding '
docStr += 'routing key token takes its value. For routing key tokens '
docStr += 'which are not specified by the dictionary, the * or # character '
docStr += 'is used depending on whether or not the key allows multiple words.\n\n'
docStr += 'This exchange takes the following keys:\n\n'
docStr += '\n'.join(['- ``%s``' % x['name'] for x in entry['routingKey']])
f.__doc__ = docStr
# Add whichever function we created
f.__name__ = str(entry['name'])
attributes[entry['name']] = f
return type(utils.toStr(name), (BaseClient,), attributes)
def createTemporaryCredentials(clientId, accessToken, start, expiry, scopes, name=None):
""" Create a set of temporary credentials
Callers should not apply any clock skew; clock drift is accounted for by
auth service.
clientId: the issuing clientId
accessToken: the issuer's accessToken
start: start time of credentials, seconds since epoch
expiry: expiration time of credentials, seconds since epoch
scopes: list of scopes granted
name: credential name (optional)
Returns a dictionary in the form:
{ 'clientId': str, 'accessToken: str, 'certificate': str}
"""
now = datetime.datetime.utcnow()
now = now - datetime.timedelta(minutes=10) # Subtract 5 minutes for clock drift
for scope in scopes:
if not isinstance(scope, six.string_types):
raise exceptions.TaskclusterFailure('Scope must be string')
# Credentials can only be valid for 31 days. I hope that
# this is validated on the server somehow...
if expiry - start > datetime.timedelta(days=31):
raise exceptions.TaskclusterFailure('Only 31 days allowed')
# We multiply times by 1000 because the auth service is JS and as a result
# uses milliseconds instead of seconds
cert = dict(
version=1,
scopes=scopes,
start=calendar.timegm(start.utctimetuple()) * 1000,
expiry=calendar.timegm(expiry.utctimetuple()) * 1000,
seed=utils.slugId() + utils.slugId(),
)
# if this is a named temporary credential, include the issuer in the certificate
if name:
cert['issuer'] = utils.toStr(clientId)
sig = ['version:' + utils.toStr(cert['version'])]
if name:
sig.extend([
'clientId:' + utils.toStr(name),
'issuer:' + utils.toStr(clientId),
])
sig.extend([
'seed:' + utils.toStr(cert['seed']),
'start:' + utils.toStr(cert['start']),
'expiry:' + utils.toStr(cert['expiry']),
'scopes:'
] + scopes)
sigStr = '\n'.join(sig).encode()
if isinstance(accessToken, six.text_type):
accessToken = accessToken.encode()
sig = hmac.new(accessToken, sigStr, hashlib.sha256).digest()
cert['signature'] = utils.encodeStringForB64Header(sig)
newToken = hmac.new(accessToken, cert['seed'], hashlib.sha256).digest()
newToken = utils.makeB64UrlSafe(utils.encodeStringForB64Header(newToken)).replace(b'=', b'')
return {
'clientId': name or clientId,
'accessToken': newToken,
'certificate': utils.dumpJson(cert),
}
__all__ = [
'createTemporaryCredentials',
'config',
'BaseClient',
'createApiClient',
]

View File

@ -0,0 +1,116 @@
from __future__ import absolute_import, division, print_function
import aiohttp
import aiohttp.hdrs
import asyncio
import async_timeout
import logging
import os
import six
import taskcluster.utils as utils
import taskcluster.exceptions as exceptions
log = logging.getLogger(__name__)
def createSession(*args, **kwargs):
return aiohttp.ClientSession(*args, **kwargs)
# Useful information: https://www.blog.pythonlibrary.org/2016/07/26/python-3-an-intro-to-asyncio/
async def makeHttpRequest(method, url, payload, headers, retries=utils.MAX_RETRIES, session=None):
""" Make an HTTP request and retry it until success, return request """
retry = -1
response = None
implicit = False
if session is None:
implicit = True
session = aiohttp.ClientSession()
def cleanup():
if implicit:
loop = asyncio.get_event_loop()
loop.run_until_complete(session.close())
try:
while True:
retry += 1
# if this isn't the first retry then we sleep
if retry > 0:
snooze = float(retry * retry) / 10.0
log.info('Sleeping %0.2f seconds for exponential backoff', snooze)
await asyncio.sleep(snooze)
# Seek payload to start, if it is a file
if hasattr(payload, 'seek'):
payload.seek(0)
log.debug('Making attempt %d', retry)
try:
with async_timeout.timeout(60):
response = await makeSingleHttpRequest(method, url, payload, headers, session)
except aiohttp.ClientError as rerr:
if retry < retries:
log.warn('Retrying because of: %s' % rerr)
continue
# raise a connection exception
raise rerr
except ValueError as rerr:
log.warn('ValueError from aiohttp: redirect to non-http or https')
raise rerr
except RuntimeError as rerr:
log.warn('RuntimeError from aiohttp: session closed')
raise rerr
# Handle non 2xx status code and retry if possible
status = response.status
if 500 <= status and status < 600 and retry < retries:
if retry < retries:
log.warn('Retrying because of: %d status' % status)
continue
else:
raise exceptions.TaskclusterRestFailure("Unknown Server Error", superExc=None)
return response
finally:
cleanup()
# This code-path should be unreachable
assert False, "Error from last retry should have been raised!"
async def makeSingleHttpRequest(method, url, payload, headers, session=None):
method = method.upper()
log.debug('Making a %s request to %s', method, url)
log.debug('HTTP Headers: %s' % str(headers))
log.debug('HTTP Payload: %s (limit 100 char)' % str(payload)[:100])
implicit = False
if session is None:
implicit = True
session = aiohttp.ClientSession()
skip_auto_headers = [aiohttp.hdrs.CONTENT_TYPE]
try:
# https://docs.aiohttp.org/en/stable/client_quickstart.html#passing-parameters-in-urls
# we must avoid aiohttp's helpful "requoting" functionality, as it breaks Hawk signatures
url = aiohttp.client.URL(url, encoded=True)
async with session.request(
method, url, data=payload, headers=headers,
skip_auto_headers=skip_auto_headers, compress=False
) as resp:
response_text = await resp.text()
log.debug('Received HTTP Status: %s' % resp.status)
log.debug('Received HTTP Headers: %s' % str(resp.headers))
log.debug('Received HTTP Payload: %s (limit 1024 char)' %
six.text_type(response_text)[:1024])
return resp
finally:
if implicit:
await session.close()
async def putFile(filename, url, contentType, session=None):
with open(filename, 'rb') as f:
contentLength = os.fstat(f.fileno()).st_size
return await makeHttpRequest('put', url, f, headers={
'Content-Length': contentLength,
'Content-Type': contentType,
}, session=session)

View File

@ -0,0 +1,866 @@
# coding=utf-8
#####################################################
# THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT #
#####################################################
# noqa: E128,E201
from .asyncclient import AsyncBaseClient
from .asyncclient import createApiClient
from .asyncclient import config
from .asyncclient import createTemporaryCredentials
from .asyncclient import createSession
_defaultConfig = config
class Auth(AsyncBaseClient):
"""
Authentication related API end-points for Taskcluster and related
services. These API end-points are of interest if you wish to:
* Authorize a request signed with Taskcluster credentials,
* Manage clients and roles,
* Inspect or audit clients and roles,
* Gain access to various services guarded by this API.
Note that in this service "authentication" refers to validating the
correctness of the supplied credentials (that the caller posesses the
appropriate access token). This service does not provide any kind of user
authentication (identifying a particular person).
### Clients
The authentication service manages _clients_, at a high-level each client
consists of a `clientId`, an `accessToken`, scopes, and some metadata.
The `clientId` and `accessToken` can be used for authentication when
calling Taskcluster APIs.
The client's scopes control the client's access to Taskcluster resources.
The scopes are *expanded* by substituting roles, as defined below.
### Roles
A _role_ consists of a `roleId`, a set of scopes and a description.
Each role constitutes a simple _expansion rule_ that says if you have
the scope: `assume:<roleId>` you get the set of scopes the role has.
Think of the `assume:<roleId>` as a scope that allows a client to assume
a role.
As in scopes the `*` kleene star also have special meaning if it is
located at the end of a `roleId`. If you have a role with the following
`roleId`: `my-prefix*`, then any client which has a scope staring with
`assume:my-prefix` will be allowed to assume the role.
### Guarded Services
The authentication service also has API end-points for delegating access
to some guarded service such as AWS S3, or Azure Table Storage.
Generally, we add API end-points to this server when we wish to use
Taskcluster credentials to grant access to a third-party service used
by many Taskcluster components.
"""
classOptions = {
"baseUrl": "https://auth.taskcluster.net/v1/"
}
async def ping(self, *args, **kwargs):
"""
Ping Server
Respond without doing anything.
This endpoint is used to check that the service is up.
This method is ``stable``
"""
return await self._makeApiCall(self.funcinfo["ping"], *args, **kwargs)
async def listClients(self, *args, **kwargs):
"""
List Clients
Get a list of all clients. With `prefix`, only clients for which
it is a prefix of the clientId are returned.
By default this end-point will try to return up to 1000 clients in one
request. But it **may return less, even none**.
It may also return a `continuationToken` even though there are no more
results. However, you can only be sure to have seen all results if you
keep calling `listClients` with the last `continuationToken` until you
get a result without a `continuationToken`.
This method gives output: ``v1/list-clients-response.json#``
This method is ``stable``
"""
return await self._makeApiCall(self.funcinfo["listClients"], *args, **kwargs)
async def client(self, *args, **kwargs):
"""
Get Client
Get information about a single client.
This method gives output: ``v1/get-client-response.json#``
This method is ``stable``
"""
return await self._makeApiCall(self.funcinfo["client"], *args, **kwargs)
async def createClient(self, *args, **kwargs):
"""
Create Client
Create a new client and get the `accessToken` for this client.
You should store the `accessToken` from this API call as there is no
other way to retrieve it.
If you loose the `accessToken` you can call `resetAccessToken` to reset
it, and a new `accessToken` will be returned, but you cannot retrieve the
current `accessToken`.
If a client with the same `clientId` already exists this operation will
fail. Use `updateClient` if you wish to update an existing client.
The caller's scopes must satisfy `scopes`.
This method takes input: ``v1/create-client-request.json#``
This method gives output: ``v1/create-client-response.json#``
This method is ``stable``
"""
return await self._makeApiCall(self.funcinfo["createClient"], *args, **kwargs)
async def resetAccessToken(self, *args, **kwargs):
"""
Reset `accessToken`
Reset a clients `accessToken`, this will revoke the existing
`accessToken`, generate a new `accessToken` and return it from this
call.
There is no way to retrieve an existing `accessToken`, so if you loose it
you must reset the accessToken to acquire it again.
This method gives output: ``v1/create-client-response.json#``
This method is ``stable``
"""
return await self._makeApiCall(self.funcinfo["resetAccessToken"], *args, **kwargs)
async def updateClient(self, *args, **kwargs):
"""
Update Client
Update an exisiting client. The `clientId` and `accessToken` cannot be
updated, but `scopes` can be modified. The caller's scopes must
satisfy all scopes being added to the client in the update operation.
If no scopes are given in the request, the client's scopes remain
unchanged
This method takes input: ``v1/create-client-request.json#``
This method gives output: ``v1/get-client-response.json#``
This method is ``stable``
"""
return await self._makeApiCall(self.funcinfo["updateClient"], *args, **kwargs)
async def enableClient(self, *args, **kwargs):
"""
Enable Client
Enable a client that was disabled with `disableClient`. If the client
is already enabled, this does nothing.
This is typically used by identity providers to re-enable clients that
had been disabled when the corresponding identity's scopes changed.
This method gives output: ``v1/get-client-response.json#``
This method is ``stable``
"""
return await self._makeApiCall(self.funcinfo["enableClient"], *args, **kwargs)
async def disableClient(self, *args, **kwargs):
"""
Disable Client
Disable a client. If the client is already disabled, this does nothing.
This is typically used by identity providers to disable clients when the
corresponding identity's scopes no longer satisfy the client's scopes.
This method gives output: ``v1/get-client-response.json#``
This method is ``stable``
"""
return await self._makeApiCall(self.funcinfo["disableClient"], *args, **kwargs)
async def deleteClient(self, *args, **kwargs):
"""
Delete Client
Delete a client, please note that any roles related to this client must
be deleted independently.
This method is ``stable``
"""
return await self._makeApiCall(self.funcinfo["deleteClient"], *args, **kwargs)
async def listRoles(self, *args, **kwargs):
"""
List Roles
Get a list of all roles, each role object also includes the list of
scopes it expands to.
This method gives output: ``v1/list-roles-response.json#``
This method is ``stable``
"""
return await self._makeApiCall(self.funcinfo["listRoles"], *args, **kwargs)
async def role(self, *args, **kwargs):
"""
Get Role
Get information about a single role, including the set of scopes that the
role expands to.
This method gives output: ``v1/get-role-response.json#``
This method is ``stable``
"""
return await self._makeApiCall(self.funcinfo["role"], *args, **kwargs)
async def createRole(self, *args, **kwargs):
"""
Create Role
Create a new role.
The caller's scopes must satisfy the new role's scopes.
If there already exists a role with the same `roleId` this operation
will fail. Use `updateRole` to modify an existing role.
Creation of a role that will generate an infinite expansion will result
in an error response.
This method takes input: ``v1/create-role-request.json#``
This method gives output: ``v1/get-role-response.json#``
This method is ``stable``
"""
return await self._makeApiCall(self.funcinfo["createRole"], *args, **kwargs)
async def updateRole(self, *args, **kwargs):
"""
Update Role
Update an existing role.
The caller's scopes must satisfy all of the new scopes being added, but
need not satisfy all of the client's existing scopes.
An update of a role that will generate an infinite expansion will result
in an error response.
This method takes input: ``v1/create-role-request.json#``
This method gives output: ``v1/get-role-response.json#``
This method is ``stable``
"""
return await self._makeApiCall(self.funcinfo["updateRole"], *args, **kwargs)
async def deleteRole(self, *args, **kwargs):
"""
Delete Role
Delete a role. This operation will succeed regardless of whether or not
the role exists.
This method is ``stable``
"""
return await self._makeApiCall(self.funcinfo["deleteRole"], *args, **kwargs)
async def expandScopesGet(self, *args, **kwargs):
"""
Expand Scopes
Return an expanded copy of the given scopeset, with scopes implied by any
roles included.
This call uses the GET method with an HTTP body. It remains only for
backward compatibility.
This method takes input: ``v1/scopeset.json#``
This method gives output: ``v1/scopeset.json#``
This method is ``deprecated``
"""
return await self._makeApiCall(self.funcinfo["expandScopesGet"], *args, **kwargs)
async def expandScopes(self, *args, **kwargs):
"""
Expand Scopes
Return an expanded copy of the given scopeset, with scopes implied by any
roles included.
This method takes input: ``v1/scopeset.json#``
This method gives output: ``v1/scopeset.json#``
This method is ``stable``
"""
return await self._makeApiCall(self.funcinfo["expandScopes"], *args, **kwargs)
async def currentScopes(self, *args, **kwargs):
"""
Get Current Scopes
Return the expanded scopes available in the request, taking into account all sources
of scopes and scope restrictions (temporary credentials, assumeScopes, client scopes,
and roles).
This method gives output: ``v1/scopeset.json#``
This method is ``stable``
"""
return await self._makeApiCall(self.funcinfo["currentScopes"], *args, **kwargs)
async def awsS3Credentials(self, *args, **kwargs):
"""
Get Temporary Read/Write Credentials S3
Get temporary AWS credentials for `read-write` or `read-only` access to
a given `bucket` and `prefix` within that bucket.
The `level` parameter can be `read-write` or `read-only` and determines
which type of credentials are returned. Please note that the `level`
parameter is required in the scope guarding access. The bucket name must
not contain `.`, as recommended by Amazon.
This method can only allow access to a whitelisted set of buckets. To add
a bucket to that whitelist, contact the Taskcluster team, who will add it to
the appropriate IAM policy. If the bucket is in a different AWS account, you
will also need to add a bucket policy allowing access from the Taskcluster
account. That policy should look like this:
```
{
"Version": "2012-10-17",
"Statement": [
{
"Sid": "allow-taskcluster-auth-to-delegate-access",
"Effect": "Allow",
"Principal": {
"AWS": "arn:aws:iam::692406183521:root"
},
"Action": [
"s3:ListBucket",
"s3:GetObject",
"s3:PutObject",
"s3:DeleteObject",
"s3:GetBucketLocation"
],
"Resource": [
"arn:aws:s3:::<bucket>",
"arn:aws:s3:::<bucket>/*"
]
}
]
}
```
The credentials are set to expire after an hour, but this behavior is
subject to change. Hence, you should always read the `expires` property
from the response, if you intend to maintain active credentials in your
application.
Please note that your `prefix` may not start with slash `/`. Such a prefix
is allowed on S3, but we forbid it here to discourage bad behavior.
Also note that if your `prefix` doesn't end in a slash `/`, the STS
credentials may allow access to unexpected keys, as S3 does not treat
slashes specially. For example, a prefix of `my-folder` will allow
access to `my-folder/file.txt` as expected, but also to `my-folder.txt`,
which may not be intended.
Finally, note that the `PutObjectAcl` call is not allowed. Passing a canned
ACL other than `private` to `PutObject` is treated as a `PutObjectAcl` call, and
will result in an access-denied error from AWS. This limitation is due to a
security flaw in Amazon S3 which might otherwise allow indefinite access to
uploaded objects.
**EC2 metadata compatibility**, if the querystring parameter
`?format=iam-role-compat` is given, the response will be compatible
with the JSON exposed by the EC2 metadata service. This aims to ease
compatibility for libraries and tools built to auto-refresh credentials.
For details on the format returned by EC2 metadata service see:
[EC2 User Guide](http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html#instance-metadata-security-credentials).
This method gives output: ``v1/aws-s3-credentials-response.json#``
This method is ``stable``
"""
return await self._makeApiCall(self.funcinfo["awsS3Credentials"], *args, **kwargs)
async def azureAccounts(self, *args, **kwargs):
"""
List Accounts Managed by Auth
Retrieve a list of all Azure accounts managed by Taskcluster Auth.
This method gives output: ``v1/azure-account-list-response.json#``
This method is ``stable``
"""
return await self._makeApiCall(self.funcinfo["azureAccounts"], *args, **kwargs)
async def azureTables(self, *args, **kwargs):
"""
List Tables in an Account Managed by Auth
Retrieve a list of all tables in an account.
This method gives output: ``v1/azure-table-list-response.json#``
This method is ``stable``
"""
return await self._makeApiCall(self.funcinfo["azureTables"], *args, **kwargs)
async def azureTableSAS(self, *args, **kwargs):
"""
Get Shared-Access-Signature for Azure Table
Get a shared access signature (SAS) string for use with a specific Azure
Table Storage table.
The `level` parameter can be `read-write` or `read-only` and determines
which type of credentials are returned. If level is read-write, it will create the
table if it doesn't already exist.
This method gives output: ``v1/azure-table-access-response.json#``
This method is ``stable``
"""
return await self._makeApiCall(self.funcinfo["azureTableSAS"], *args, **kwargs)
async def azureContainers(self, *args, **kwargs):
"""
List containers in an Account Managed by Auth
Retrieve a list of all containers in an account.
This method gives output: ``v1/azure-container-list-response.json#``
This method is ``stable``
"""
return await self._makeApiCall(self.funcinfo["azureContainers"], *args, **kwargs)
async def azureContainerSAS(self, *args, **kwargs):
"""
Get Shared-Access-Signature for Azure Container
Get a shared access signature (SAS) string for use with a specific Azure
Blob Storage container.
The `level` parameter can be `read-write` or `read-only` and determines
which type of credentials are returned. If level is read-write, it will create the
container if it doesn't already exist.
This method gives output: ``v1/azure-container-response.json#``
This method is ``stable``
"""
return await self._makeApiCall(self.funcinfo["azureContainerSAS"], *args, **kwargs)
async def sentryDSN(self, *args, **kwargs):
"""
Get DSN for Sentry Project
Get temporary DSN (access credentials) for a sentry project.
The credentials returned can be used with any Sentry client for up to
24 hours, after which the credentials will be automatically disabled.
If the project doesn't exist it will be created, and assigned to the
initial team configured for this component. Contact a Sentry admin
to have the project transferred to a team you have access to if needed
This method gives output: ``v1/sentry-dsn-response.json#``
This method is ``stable``
"""
return await self._makeApiCall(self.funcinfo["sentryDSN"], *args, **kwargs)
async def statsumToken(self, *args, **kwargs):
"""
Get Token for Statsum Project
Get temporary `token` and `baseUrl` for sending metrics to statsum.
The token is valid for 24 hours, clients should refresh after expiration.
This method gives output: ``v1/statsum-token-response.json#``
This method is ``stable``
"""
return await self._makeApiCall(self.funcinfo["statsumToken"], *args, **kwargs)
async def webhooktunnelToken(self, *args, **kwargs):
"""
Get Token for Webhooktunnel Proxy
Get temporary `token` and `id` for connecting to webhooktunnel
The token is valid for 96 hours, clients should refresh after expiration.
This method gives output: ``v1/webhooktunnel-token-response.json#``
This method is ``stable``
"""
return await self._makeApiCall(self.funcinfo["webhooktunnelToken"], *args, **kwargs)
async def authenticateHawk(self, *args, **kwargs):
"""
Authenticate Hawk Request
Validate the request signature given on input and return list of scopes
that the authenticating client has.
This method is used by other services that wish rely on Taskcluster
credentials for authentication. This way we can use Hawk without having
the secret credentials leave this service.
This method takes input: ``v1/authenticate-hawk-request.json#``
This method gives output: ``v1/authenticate-hawk-response.json#``
This method is ``stable``
"""
return await self._makeApiCall(self.funcinfo["authenticateHawk"], *args, **kwargs)
async def testAuthenticate(self, *args, **kwargs):
"""
Test Authentication
Utility method to test client implementations of Taskcluster
authentication.
Rather than using real credentials, this endpoint accepts requests with
clientId `tester` and accessToken `no-secret`. That client's scopes are
based on `clientScopes` in the request body.
The request is validated, with any certificate, authorizedScopes, etc.
applied, and the resulting scopes are checked against `requiredScopes`
from the request body. On success, the response contains the clientId
and scopes as seen by the API method.
This method takes input: ``v1/test-authenticate-request.json#``
This method gives output: ``v1/test-authenticate-response.json#``
This method is ``stable``
"""
return await self._makeApiCall(self.funcinfo["testAuthenticate"], *args, **kwargs)
async def testAuthenticateGet(self, *args, **kwargs):
"""
Test Authentication (GET)
Utility method similar to `testAuthenticate`, but with the GET method,
so it can be used with signed URLs (bewits).
Rather than using real credentials, this endpoint accepts requests with
clientId `tester` and accessToken `no-secret`. That client's scopes are
`['test:*', 'auth:create-client:test:*']`. The call fails if the
`test:authenticate-get` scope is not available.
The request is validated, with any certificate, authorizedScopes, etc.
applied, and the resulting scopes are checked, just like any API call.
On success, the response contains the clientId and scopes as seen by
the API method.
This method may later be extended to allow specification of client and
required scopes via query arguments.
This method gives output: ``v1/test-authenticate-response.json#``
This method is ``stable``
"""
return await self._makeApiCall(self.funcinfo["testAuthenticateGet"], *args, **kwargs)
funcinfo = {
"authenticateHawk": {
'args': [],
'input': 'v1/authenticate-hawk-request.json#',
'method': 'post',
'name': 'authenticateHawk',
'output': 'v1/authenticate-hawk-response.json#',
'route': '/authenticate-hawk',
'stability': 'stable',
},
"awsS3Credentials": {
'args': ['level', 'bucket', 'prefix'],
'method': 'get',
'name': 'awsS3Credentials',
'output': 'v1/aws-s3-credentials-response.json#',
'query': ['format'],
'route': '/aws/s3/<level>/<bucket>/<prefix>',
'stability': 'stable',
},
"azureAccounts": {
'args': [],
'method': 'get',
'name': 'azureAccounts',
'output': 'v1/azure-account-list-response.json#',
'route': '/azure/accounts',
'stability': 'stable',
},
"azureContainerSAS": {
'args': ['account', 'container', 'level'],
'method': 'get',
'name': 'azureContainerSAS',
'output': 'v1/azure-container-response.json#',
'route': '/azure/<account>/containers/<container>/<level>',
'stability': 'stable',
},
"azureContainers": {
'args': ['account'],
'method': 'get',
'name': 'azureContainers',
'output': 'v1/azure-container-list-response.json#',
'query': ['continuationToken'],
'route': '/azure/<account>/containers',
'stability': 'stable',
},
"azureTableSAS": {
'args': ['account', 'table', 'level'],
'method': 'get',
'name': 'azureTableSAS',
'output': 'v1/azure-table-access-response.json#',
'route': '/azure/<account>/table/<table>/<level>',
'stability': 'stable',
},
"azureTables": {
'args': ['account'],
'method': 'get',
'name': 'azureTables',
'output': 'v1/azure-table-list-response.json#',
'query': ['continuationToken'],
'route': '/azure/<account>/tables',
'stability': 'stable',
},
"client": {
'args': ['clientId'],
'method': 'get',
'name': 'client',
'output': 'v1/get-client-response.json#',
'route': '/clients/<clientId>',
'stability': 'stable',
},
"createClient": {
'args': ['clientId'],
'input': 'v1/create-client-request.json#',
'method': 'put',
'name': 'createClient',
'output': 'v1/create-client-response.json#',
'route': '/clients/<clientId>',
'stability': 'stable',
},
"createRole": {
'args': ['roleId'],
'input': 'v1/create-role-request.json#',
'method': 'put',
'name': 'createRole',
'output': 'v1/get-role-response.json#',
'route': '/roles/<roleId>',
'stability': 'stable',
},
"currentScopes": {
'args': [],
'method': 'get',
'name': 'currentScopes',
'output': 'v1/scopeset.json#',
'route': '/scopes/current',
'stability': 'stable',
},
"deleteClient": {
'args': ['clientId'],
'method': 'delete',
'name': 'deleteClient',
'route': '/clients/<clientId>',
'stability': 'stable',
},
"deleteRole": {
'args': ['roleId'],
'method': 'delete',
'name': 'deleteRole',
'route': '/roles/<roleId>',
'stability': 'stable',
},
"disableClient": {
'args': ['clientId'],
'method': 'post',
'name': 'disableClient',
'output': 'v1/get-client-response.json#',
'route': '/clients/<clientId>/disable',
'stability': 'stable',
},
"enableClient": {
'args': ['clientId'],
'method': 'post',
'name': 'enableClient',
'output': 'v1/get-client-response.json#',
'route': '/clients/<clientId>/enable',
'stability': 'stable',
},
"expandScopes": {
'args': [],
'input': 'v1/scopeset.json#',
'method': 'post',
'name': 'expandScopes',
'output': 'v1/scopeset.json#',
'route': '/scopes/expand',
'stability': 'stable',
},
"expandScopesGet": {
'args': [],
'input': 'v1/scopeset.json#',
'method': 'get',
'name': 'expandScopesGet',
'output': 'v1/scopeset.json#',
'route': '/scopes/expand',
'stability': 'deprecated',
},
"listClients": {
'args': [],
'method': 'get',
'name': 'listClients',
'output': 'v1/list-clients-response.json#',
'query': ['prefix', 'continuationToken', 'limit'],
'route': '/clients/',
'stability': 'stable',
},
"listRoles": {
'args': [],
'method': 'get',
'name': 'listRoles',
'output': 'v1/list-roles-response.json#',
'route': '/roles/',
'stability': 'stable',
},
"ping": {
'args': [],
'method': 'get',
'name': 'ping',
'route': '/ping',
'stability': 'stable',
},
"resetAccessToken": {
'args': ['clientId'],
'method': 'post',
'name': 'resetAccessToken',
'output': 'v1/create-client-response.json#',
'route': '/clients/<clientId>/reset',
'stability': 'stable',
},
"role": {
'args': ['roleId'],
'method': 'get',
'name': 'role',
'output': 'v1/get-role-response.json#',
'route': '/roles/<roleId>',
'stability': 'stable',
},
"sentryDSN": {
'args': ['project'],
'method': 'get',
'name': 'sentryDSN',
'output': 'v1/sentry-dsn-response.json#',
'route': '/sentry/<project>/dsn',
'stability': 'stable',
},
"statsumToken": {
'args': ['project'],
'method': 'get',
'name': 'statsumToken',
'output': 'v1/statsum-token-response.json#',
'route': '/statsum/<project>/token',
'stability': 'stable',
},
"testAuthenticate": {
'args': [],
'input': 'v1/test-authenticate-request.json#',
'method': 'post',
'name': 'testAuthenticate',
'output': 'v1/test-authenticate-response.json#',
'route': '/test-authenticate',
'stability': 'stable',
},
"testAuthenticateGet": {
'args': [],
'method': 'get',
'name': 'testAuthenticateGet',
'output': 'v1/test-authenticate-response.json#',
'route': '/test-authenticate-get/',
'stability': 'stable',
},
"updateClient": {
'args': ['clientId'],
'input': 'v1/create-client-request.json#',
'method': 'post',
'name': 'updateClient',
'output': 'v1/get-client-response.json#',
'route': '/clients/<clientId>',
'stability': 'stable',
},
"updateRole": {
'args': ['roleId'],
'input': 'v1/create-role-request.json#',
'method': 'post',
'name': 'updateRole',
'output': 'v1/get-role-response.json#',
'route': '/roles/<roleId>',
'stability': 'stable',
},
"webhooktunnelToken": {
'args': [],
'method': 'get',
'name': 'webhooktunnelToken',
'output': 'v1/webhooktunnel-token-response.json#',
'route': '/webhooktunnel',
'stability': 'stable',
},
}
__all__ = ['createTemporaryCredentials', 'config', '_defaultConfig', 'createApiClient', 'createSession', 'Auth']

View File

@ -0,0 +1,178 @@
# coding=utf-8
#####################################################
# THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT #
#####################################################
# noqa: E128,E201
from .asyncclient import AsyncBaseClient
from .asyncclient import createApiClient
from .asyncclient import config
from .asyncclient import createTemporaryCredentials
from .asyncclient import createSession
_defaultConfig = config
class AuthEvents(AsyncBaseClient):
"""
The auth service, typically available at `auth.taskcluster.net`
is responsible for storing credentials, managing assignment of scopes,
and validation of request signatures from other services.
These exchanges provides notifications when credentials or roles are
updated. This is mostly so that multiple instances of the auth service
can purge their caches and synchronize state. But you are of course
welcome to use these for other purposes, monitoring changes for example.
"""
classOptions = {
"exchangePrefix": "exchange/taskcluster-auth/v1/"
}
def clientCreated(self, *args, **kwargs):
"""
Client Created Messages
Message that a new client has been created.
This exchange outputs: ``v1/client-message.json#``This exchange takes the following keys:
* reserved: Space reserved for future routing-key entries, you should always match this entry with `#`. As automatically done by our tooling, if not specified.
"""
ref = {
'exchange': 'client-created',
'name': 'clientCreated',
'routingKey': [
{
'multipleWords': True,
'name': 'reserved',
},
],
'schema': 'v1/client-message.json#',
}
return self._makeTopicExchange(ref, *args, **kwargs)
def clientUpdated(self, *args, **kwargs):
"""
Client Updated Messages
Message that a new client has been updated.
This exchange outputs: ``v1/client-message.json#``This exchange takes the following keys:
* reserved: Space reserved for future routing-key entries, you should always match this entry with `#`. As automatically done by our tooling, if not specified.
"""
ref = {
'exchange': 'client-updated',
'name': 'clientUpdated',
'routingKey': [
{
'multipleWords': True,
'name': 'reserved',
},
],
'schema': 'v1/client-message.json#',
}
return self._makeTopicExchange(ref, *args, **kwargs)
def clientDeleted(self, *args, **kwargs):
"""
Client Deleted Messages
Message that a new client has been deleted.
This exchange outputs: ``v1/client-message.json#``This exchange takes the following keys:
* reserved: Space reserved for future routing-key entries, you should always match this entry with `#`. As automatically done by our tooling, if not specified.
"""
ref = {
'exchange': 'client-deleted',
'name': 'clientDeleted',
'routingKey': [
{
'multipleWords': True,
'name': 'reserved',
},
],
'schema': 'v1/client-message.json#',
}
return self._makeTopicExchange(ref, *args, **kwargs)
def roleCreated(self, *args, **kwargs):
"""
Role Created Messages
Message that a new role has been created.
This exchange outputs: ``v1/role-message.json#``This exchange takes the following keys:
* reserved: Space reserved for future routing-key entries, you should always match this entry with `#`. As automatically done by our tooling, if not specified.
"""
ref = {
'exchange': 'role-created',
'name': 'roleCreated',
'routingKey': [
{
'multipleWords': True,
'name': 'reserved',
},
],
'schema': 'v1/role-message.json#',
}
return self._makeTopicExchange(ref, *args, **kwargs)
def roleUpdated(self, *args, **kwargs):
"""
Role Updated Messages
Message that a new role has been updated.
This exchange outputs: ``v1/role-message.json#``This exchange takes the following keys:
* reserved: Space reserved for future routing-key entries, you should always match this entry with `#`. As automatically done by our tooling, if not specified.
"""
ref = {
'exchange': 'role-updated',
'name': 'roleUpdated',
'routingKey': [
{
'multipleWords': True,
'name': 'reserved',
},
],
'schema': 'v1/role-message.json#',
}
return self._makeTopicExchange(ref, *args, **kwargs)
def roleDeleted(self, *args, **kwargs):
"""
Role Deleted Messages
Message that a new role has been deleted.
This exchange outputs: ``v1/role-message.json#``This exchange takes the following keys:
* reserved: Space reserved for future routing-key entries, you should always match this entry with `#`. As automatically done by our tooling, if not specified.
"""
ref = {
'exchange': 'role-deleted',
'name': 'roleDeleted',
'routingKey': [
{
'multipleWords': True,
'name': 'reserved',
},
],
'schema': 'v1/role-message.json#',
}
return self._makeTopicExchange(ref, *args, **kwargs)
funcinfo = {
}
__all__ = ['createTemporaryCredentials', 'config', '_defaultConfig', 'createApiClient', 'createSession', 'AuthEvents']

View File

@ -0,0 +1,449 @@
# coding=utf-8
#####################################################
# THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT #
#####################################################
# noqa: E128,E201
from .asyncclient import AsyncBaseClient
from .asyncclient import createApiClient
from .asyncclient import config
from .asyncclient import createTemporaryCredentials
from .asyncclient import createSession
_defaultConfig = config
class AwsProvisioner(AsyncBaseClient):
"""
The AWS Provisioner is responsible for provisioning instances on EC2 for use in
Taskcluster. The provisioner maintains a set of worker configurations which
can be managed with an API that is typically available at
aws-provisioner.taskcluster.net/v1. This API can also perform basic instance
management tasks in addition to maintaining the internal state of worker type
configuration information.
The Provisioner runs at a configurable interval. Each iteration of the
provisioner fetches a current copy the state that the AWS EC2 api reports. In
each iteration, we ask the Queue how many tasks are pending for that worker
type. Based on the number of tasks pending and the scaling ratio, we may
submit requests for new instances. We use pricing information, capacity and
utility factor information to decide which instance type in which region would
be the optimal configuration.
Each EC2 instance type will declare a capacity and utility factor. Capacity is
the number of tasks that a given machine is capable of running concurrently.
Utility factor is a relative measure of performance between two instance types.
We multiply the utility factor by the spot price to compare instance types and
regions when making the bidding choices.
When a new EC2 instance is instantiated, its user data contains a token in
`securityToken` that can be used with the `getSecret` method to retrieve
the worker's credentials and any needed passwords or other restricted
information. The worker is responsible for deleting the secret after
retrieving it, to prevent dissemination of the secret to other proceses
which can read the instance user data.
"""
classOptions = {
"baseUrl": "https://aws-provisioner.taskcluster.net/v1"
}
async def listWorkerTypeSummaries(self, *args, **kwargs):
"""
List worker types with details
Return a list of worker types, including some summary information about
current capacity for each. While this list includes all defined worker types,
there may be running EC2 instances for deleted worker types that are not
included here. The list is unordered.
This method gives output: ``http://schemas.taskcluster.net/aws-provisioner/v1/list-worker-types-summaries-response.json#``
This method is ``stable``
"""
return await self._makeApiCall(self.funcinfo["listWorkerTypeSummaries"], *args, **kwargs)
async def createWorkerType(self, *args, **kwargs):
"""
Create new Worker Type
Create a worker type. A worker type contains all the configuration
needed for the provisioner to manage the instances. Each worker type
knows which regions and which instance types are allowed for that
worker type. Remember that Capacity is the number of concurrent tasks
that can be run on a given EC2 resource and that Utility is the relative
performance rate between different instance types. There is no way to
configure different regions to have different sets of instance types
so ensure that all instance types are available in all regions.
This function is idempotent.
Once a worker type is in the provisioner, a back ground process will
begin creating instances for it based on its capacity bounds and its
pending task count from the Queue. It is the worker's responsibility
to shut itself down. The provisioner has a limit (currently 96hours)
for all instances to prevent zombie instances from running indefinitely.
The provisioner will ensure that all instances created are tagged with
aws resource tags containing the provisioner id and the worker type.
If provided, the secrets in the global, region and instance type sections
are available using the secrets api. If specified, the scopes provided
will be used to generate a set of temporary credentials available with
the other secrets.
This method takes input: ``http://schemas.taskcluster.net/aws-provisioner/v1/create-worker-type-request.json#``
This method gives output: ``http://schemas.taskcluster.net/aws-provisioner/v1/get-worker-type-response.json#``
This method is ``stable``
"""
return await self._makeApiCall(self.funcinfo["createWorkerType"], *args, **kwargs)
async def updateWorkerType(self, *args, **kwargs):
"""
Update Worker Type
Provide a new copy of a worker type to replace the existing one.
This will overwrite the existing worker type definition if there
is already a worker type of that name. This method will return a
200 response along with a copy of the worker type definition created
Note that if you are using the result of a GET on the worker-type
end point that you will need to delete the lastModified and workerType
keys from the object returned, since those fields are not allowed
the request body for this method
Otherwise, all input requirements and actions are the same as the
create method.
This method takes input: ``http://schemas.taskcluster.net/aws-provisioner/v1/create-worker-type-request.json#``
This method gives output: ``http://schemas.taskcluster.net/aws-provisioner/v1/get-worker-type-response.json#``
This method is ``stable``
"""
return await self._makeApiCall(self.funcinfo["updateWorkerType"], *args, **kwargs)
async def workerTypeLastModified(self, *args, **kwargs):
"""
Get Worker Type Last Modified Time
This method is provided to allow workers to see when they were
last modified. The value provided through UserData can be
compared against this value to see if changes have been made
If the worker type definition has not been changed, the date
should be identical as it is the same stored value.
This method gives output: ``http://schemas.taskcluster.net/aws-provisioner/v1/get-worker-type-last-modified.json#``
This method is ``stable``
"""
return await self._makeApiCall(self.funcinfo["workerTypeLastModified"], *args, **kwargs)
async def workerType(self, *args, **kwargs):
"""
Get Worker Type
Retrieve a copy of the requested worker type definition.
This copy contains a lastModified field as well as the worker
type name. As such, it will require manipulation to be able to
use the results of this method to submit date to the update
method.
This method gives output: ``http://schemas.taskcluster.net/aws-provisioner/v1/get-worker-type-response.json#``
This method is ``stable``
"""
return await self._makeApiCall(self.funcinfo["workerType"], *args, **kwargs)
async def removeWorkerType(self, *args, **kwargs):
"""
Delete Worker Type
Delete a worker type definition. This method will only delete
the worker type definition from the storage table. The actual
deletion will be handled by a background worker. As soon as this
method is called for a worker type, the background worker will
immediately submit requests to cancel all spot requests for this
worker type as well as killing all instances regardless of their
state. If you want to gracefully remove a worker type, you must
either ensure that no tasks are created with that worker type name
or you could theoretically set maxCapacity to 0, though, this is
not a supported or tested action
This method is ``stable``
"""
return await self._makeApiCall(self.funcinfo["removeWorkerType"], *args, **kwargs)
async def listWorkerTypes(self, *args, **kwargs):
"""
List Worker Types
Return a list of string worker type names. These are the names
of all managed worker types known to the provisioner. This does
not include worker types which are left overs from a deleted worker
type definition but are still running in AWS.
This method gives output: ``http://schemas.taskcluster.net/aws-provisioner/v1/list-worker-types-response.json#``
This method is ``stable``
"""
return await self._makeApiCall(self.funcinfo["listWorkerTypes"], *args, **kwargs)
async def createSecret(self, *args, **kwargs):
"""
Create new Secret
Insert a secret into the secret storage. The supplied secrets will
be provided verbatime via `getSecret`, while the supplied scopes will
be converted into credentials by `getSecret`.
This method is not ordinarily used in production; instead, the provisioner
creates a new secret directly for each spot bid.
This method takes input: ``http://schemas.taskcluster.net/aws-provisioner/v1/create-secret-request.json#``
This method is ``stable``
"""
return await self._makeApiCall(self.funcinfo["createSecret"], *args, **kwargs)
async def getSecret(self, *args, **kwargs):
"""
Get a Secret
Retrieve a secret from storage. The result contains any passwords or
other restricted information verbatim as well as a temporary credential
based on the scopes specified when the secret was created.
It is important that this secret is deleted by the consumer (`removeSecret`),
or else the secrets will be visible to any process which can access the
user data associated with the instance.
This method gives output: ``http://schemas.taskcluster.net/aws-provisioner/v1/get-secret-response.json#``
This method is ``stable``
"""
return await self._makeApiCall(self.funcinfo["getSecret"], *args, **kwargs)
async def instanceStarted(self, *args, **kwargs):
"""
Report an instance starting
An instance will report in by giving its instance id as well
as its security token. The token is given and checked to ensure
that it matches a real token that exists to ensure that random
machines do not check in. We could generate a different token
but that seems like overkill
This method is ``stable``
"""
return await self._makeApiCall(self.funcinfo["instanceStarted"], *args, **kwargs)
async def removeSecret(self, *args, **kwargs):
"""
Remove a Secret
Remove a secret. After this call, a call to `getSecret` with the given
token will return no information.
It is very important that the consumer of a
secret delete the secret from storage before handing over control
to untrusted processes to prevent credential and/or secret leakage.
This method is ``stable``
"""
return await self._makeApiCall(self.funcinfo["removeSecret"], *args, **kwargs)
async def getLaunchSpecs(self, *args, **kwargs):
"""
Get All Launch Specifications for WorkerType
This method returns a preview of all possible launch specifications
that this worker type definition could submit to EC2. It is used to
test worker types, nothing more
**This API end-point is experimental and may be subject to change without warning.**
This method gives output: ``http://schemas.taskcluster.net/aws-provisioner/v1/get-launch-specs-response.json#``
This method is ``experimental``
"""
return await self._makeApiCall(self.funcinfo["getLaunchSpecs"], *args, **kwargs)
async def state(self, *args, **kwargs):
"""
Get AWS State for a worker type
Return the state of a given workertype as stored by the provisioner.
This state is stored as three lists: 1 for running instances, 1 for
pending requests. The `summary` property contains an updated summary
similar to that returned from `listWorkerTypeSummaries`.
This method is ``stable``
"""
return await self._makeApiCall(self.funcinfo["state"], *args, **kwargs)
async def backendStatus(self, *args, **kwargs):
"""
Backend Status
This endpoint is used to show when the last time the provisioner
has checked in. A check in is done through the deadman's snitch
api. It is done at the conclusion of a provisioning iteration
and used to tell if the background provisioning process is still
running.
**Warning** this api end-point is **not stable**.
This method gives output: ``http://schemas.taskcluster.net/aws-provisioner/v1/backend-status-response.json#``
This method is ``experimental``
"""
return await self._makeApiCall(self.funcinfo["backendStatus"], *args, **kwargs)
async def ping(self, *args, **kwargs):
"""
Ping Server
Respond without doing anything.
This endpoint is used to check that the service is up.
This method is ``stable``
"""
return await self._makeApiCall(self.funcinfo["ping"], *args, **kwargs)
funcinfo = {
"backendStatus": {
'args': [],
'method': 'get',
'name': 'backendStatus',
'output': 'http://schemas.taskcluster.net/aws-provisioner/v1/backend-status-response.json#',
'route': '/backend-status',
'stability': 'experimental',
},
"createSecret": {
'args': ['token'],
'input': 'http://schemas.taskcluster.net/aws-provisioner/v1/create-secret-request.json#',
'method': 'put',
'name': 'createSecret',
'route': '/secret/<token>',
'stability': 'stable',
},
"createWorkerType": {
'args': ['workerType'],
'input': 'http://schemas.taskcluster.net/aws-provisioner/v1/create-worker-type-request.json#',
'method': 'put',
'name': 'createWorkerType',
'output': 'http://schemas.taskcluster.net/aws-provisioner/v1/get-worker-type-response.json#',
'route': '/worker-type/<workerType>',
'stability': 'stable',
},
"getLaunchSpecs": {
'args': ['workerType'],
'method': 'get',
'name': 'getLaunchSpecs',
'output': 'http://schemas.taskcluster.net/aws-provisioner/v1/get-launch-specs-response.json#',
'route': '/worker-type/<workerType>/launch-specifications',
'stability': 'experimental',
},
"getSecret": {
'args': ['token'],
'method': 'get',
'name': 'getSecret',
'output': 'http://schemas.taskcluster.net/aws-provisioner/v1/get-secret-response.json#',
'route': '/secret/<token>',
'stability': 'stable',
},
"instanceStarted": {
'args': ['instanceId', 'token'],
'method': 'get',
'name': 'instanceStarted',
'route': '/instance-started/<instanceId>/<token>',
'stability': 'stable',
},
"listWorkerTypeSummaries": {
'args': [],
'method': 'get',
'name': 'listWorkerTypeSummaries',
'output': 'http://schemas.taskcluster.net/aws-provisioner/v1/list-worker-types-summaries-response.json#',
'route': '/list-worker-type-summaries',
'stability': 'stable',
},
"listWorkerTypes": {
'args': [],
'method': 'get',
'name': 'listWorkerTypes',
'output': 'http://schemas.taskcluster.net/aws-provisioner/v1/list-worker-types-response.json#',
'route': '/list-worker-types',
'stability': 'stable',
},
"ping": {
'args': [],
'method': 'get',
'name': 'ping',
'route': '/ping',
'stability': 'stable',
},
"removeSecret": {
'args': ['token'],
'method': 'delete',
'name': 'removeSecret',
'route': '/secret/<token>',
'stability': 'stable',
},
"removeWorkerType": {
'args': ['workerType'],
'method': 'delete',
'name': 'removeWorkerType',
'route': '/worker-type/<workerType>',
'stability': 'stable',
},
"state": {
'args': ['workerType'],
'method': 'get',
'name': 'state',
'route': '/state/<workerType>',
'stability': 'stable',
},
"updateWorkerType": {
'args': ['workerType'],
'input': 'http://schemas.taskcluster.net/aws-provisioner/v1/create-worker-type-request.json#',
'method': 'post',
'name': 'updateWorkerType',
'output': 'http://schemas.taskcluster.net/aws-provisioner/v1/get-worker-type-response.json#',
'route': '/worker-type/<workerType>/update',
'stability': 'stable',
},
"workerType": {
'args': ['workerType'],
'method': 'get',
'name': 'workerType',
'output': 'http://schemas.taskcluster.net/aws-provisioner/v1/get-worker-type-response.json#',
'route': '/worker-type/<workerType>',
'stability': 'stable',
},
"workerTypeLastModified": {
'args': ['workerType'],
'method': 'get',
'name': 'workerTypeLastModified',
'output': 'http://schemas.taskcluster.net/aws-provisioner/v1/get-worker-type-last-modified.json#',
'route': '/worker-type-last-modified/<workerType>',
'stability': 'stable',
},
}
__all__ = ['createTemporaryCredentials', 'config', '_defaultConfig', 'createApiClient', 'createSession', 'AwsProvisioner']

View File

@ -0,0 +1,141 @@
# coding=utf-8
#####################################################
# THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT #
#####################################################
# noqa: E128,E201
from .asyncclient import AsyncBaseClient
from .asyncclient import createApiClient
from .asyncclient import config
from .asyncclient import createTemporaryCredentials
from .asyncclient import createSession
_defaultConfig = config
class AwsProvisionerEvents(AsyncBaseClient):
"""
Exchanges from the provisioner... more docs later
"""
classOptions = {
"exchangePrefix": "exchange/taskcluster-aws-provisioner/v1/"
}
def workerTypeCreated(self, *args, **kwargs):
"""
WorkerType Created Message
When a new `workerType` is created a message will be published to this
exchange.
This exchange outputs: ``http://schemas.taskcluster.net/aws-provisioner/v1/worker-type-message.json#``This exchange takes the following keys:
* routingKeyKind: Identifier for the routing-key kind. This is always `'primary'` for the formalized routing key. (required)
* workerType: WorkerType that this message concerns. (required)
* reserved: Space reserved for future routing-key entries, you should always match this entry with `#`. As automatically done by our tooling, if not specified.
"""
ref = {
'exchange': 'worker-type-created',
'name': 'workerTypeCreated',
'routingKey': [
{
'constant': 'primary',
'multipleWords': False,
'name': 'routingKeyKind',
},
{
'multipleWords': False,
'name': 'workerType',
},
{
'multipleWords': True,
'name': 'reserved',
},
],
'schema': 'http://schemas.taskcluster.net/aws-provisioner/v1/worker-type-message.json#',
}
return self._makeTopicExchange(ref, *args, **kwargs)
def workerTypeUpdated(self, *args, **kwargs):
"""
WorkerType Updated Message
When a `workerType` is updated a message will be published to this
exchange.
This exchange outputs: ``http://schemas.taskcluster.net/aws-provisioner/v1/worker-type-message.json#``This exchange takes the following keys:
* routingKeyKind: Identifier for the routing-key kind. This is always `'primary'` for the formalized routing key. (required)
* workerType: WorkerType that this message concerns. (required)
* reserved: Space reserved for future routing-key entries, you should always match this entry with `#`. As automatically done by our tooling, if not specified.
"""
ref = {
'exchange': 'worker-type-updated',
'name': 'workerTypeUpdated',
'routingKey': [
{
'constant': 'primary',
'multipleWords': False,
'name': 'routingKeyKind',
},
{
'multipleWords': False,
'name': 'workerType',
},
{
'multipleWords': True,
'name': 'reserved',
},
],
'schema': 'http://schemas.taskcluster.net/aws-provisioner/v1/worker-type-message.json#',
}
return self._makeTopicExchange(ref, *args, **kwargs)
def workerTypeRemoved(self, *args, **kwargs):
"""
WorkerType Removed Message
When a `workerType` is removed a message will be published to this
exchange.
This exchange outputs: ``http://schemas.taskcluster.net/aws-provisioner/v1/worker-type-message.json#``This exchange takes the following keys:
* routingKeyKind: Identifier for the routing-key kind. This is always `'primary'` for the formalized routing key. (required)
* workerType: WorkerType that this message concerns. (required)
* reserved: Space reserved for future routing-key entries, you should always match this entry with `#`. As automatically done by our tooling, if not specified.
"""
ref = {
'exchange': 'worker-type-removed',
'name': 'workerTypeRemoved',
'routingKey': [
{
'constant': 'primary',
'multipleWords': False,
'name': 'routingKeyKind',
},
{
'multipleWords': False,
'name': 'workerType',
},
{
'multipleWords': True,
'name': 'reserved',
},
],
'schema': 'http://schemas.taskcluster.net/aws-provisioner/v1/worker-type-message.json#',
}
return self._makeTopicExchange(ref, *args, **kwargs)
funcinfo = {
}
__all__ = ['createTemporaryCredentials', 'config', '_defaultConfig', 'createApiClient', 'createSession', 'AwsProvisionerEvents']

View File

@ -0,0 +1,492 @@
# coding=utf-8
#####################################################
# THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT #
#####################################################
# noqa: E128,E201
from .asyncclient import AsyncBaseClient
from .asyncclient import createApiClient
from .asyncclient import config
from .asyncclient import createTemporaryCredentials
from .asyncclient import createSession
_defaultConfig = config
class EC2Manager(AsyncBaseClient):
"""
A taskcluster service which manages EC2 instances. This service does not understand any taskcluster concepts intrinsicaly other than using the name `workerType` to refer to a group of associated instances. Unless you are working on building a provisioner for AWS, you almost certainly do not want to use this service
"""
classOptions = {
"baseUrl": "https://ec2-manager.taskcluster.net/v1"
}
async def listWorkerTypes(self, *args, **kwargs):
"""
See the list of worker types which are known to be managed
This method is only for debugging the ec2-manager
This method gives output: ``http://schemas.taskcluster.net/ec2-manager/v1/list-worker-types.json#``
This method is ``experimental``
"""
return await self._makeApiCall(self.funcinfo["listWorkerTypes"], *args, **kwargs)
async def runInstance(self, *args, **kwargs):
"""
Run an instance
Request an instance of a worker type
This method takes input: ``http://schemas.taskcluster.net/ec2-manager/v1/run-instance-request.json#``
This method is ``experimental``
"""
return await self._makeApiCall(self.funcinfo["runInstance"], *args, **kwargs)
async def terminateWorkerType(self, *args, **kwargs):
"""
Terminate all resources from a worker type
Terminate all instances for this worker type
This method is ``experimental``
"""
return await self._makeApiCall(self.funcinfo["terminateWorkerType"], *args, **kwargs)
async def workerTypeStats(self, *args, **kwargs):
"""
Look up the resource stats for a workerType
Return an object which has a generic state description. This only contains counts of instances
This method gives output: ``http://schemas.taskcluster.net/ec2-manager/v1/worker-type-resources.json#``
This method is ``experimental``
"""
return await self._makeApiCall(self.funcinfo["workerTypeStats"], *args, **kwargs)
async def workerTypeHealth(self, *args, **kwargs):
"""
Look up the resource health for a workerType
Return a view of the health of a given worker type
This method gives output: ``http://schemas.taskcluster.net/ec2-manager/v1/health.json#``
This method is ``experimental``
"""
return await self._makeApiCall(self.funcinfo["workerTypeHealth"], *args, **kwargs)
async def workerTypeErrors(self, *args, **kwargs):
"""
Look up the most recent errors of a workerType
Return a list of the most recent errors encountered by a worker type
This method gives output: ``http://schemas.taskcluster.net/ec2-manager/v1/errors.json#``
This method is ``experimental``
"""
return await self._makeApiCall(self.funcinfo["workerTypeErrors"], *args, **kwargs)
async def workerTypeState(self, *args, **kwargs):
"""
Look up the resource state for a workerType
Return state information for a given worker type
This method gives output: ``http://schemas.taskcluster.net/ec2-manager/v1/worker-type-state.json#``
This method is ``experimental``
"""
return await self._makeApiCall(self.funcinfo["workerTypeState"], *args, **kwargs)
async def ensureKeyPair(self, *args, **kwargs):
"""
Ensure a KeyPair for a given worker type exists
Idempotently ensure that a keypair of a given name exists
This method takes input: ``http://schemas.taskcluster.net/ec2-manager/v1/create-key-pair.json#``
This method is ``experimental``
"""
return await self._makeApiCall(self.funcinfo["ensureKeyPair"], *args, **kwargs)
async def removeKeyPair(self, *args, **kwargs):
"""
Ensure a KeyPair for a given worker type does not exist
Ensure that a keypair of a given name does not exist.
This method is ``experimental``
"""
return await self._makeApiCall(self.funcinfo["removeKeyPair"], *args, **kwargs)
async def terminateInstance(self, *args, **kwargs):
"""
Terminate an instance
Terminate an instance in a specified region
This method is ``experimental``
"""
return await self._makeApiCall(self.funcinfo["terminateInstance"], *args, **kwargs)
async def getPrices(self, *args, **kwargs):
"""
Request prices for EC2
Return a list of possible prices for EC2
This method gives output: ``http://schemas.taskcluster.net/ec2-manager/v1/prices.json#``
This method is ``experimental``
"""
return await self._makeApiCall(self.funcinfo["getPrices"], *args, **kwargs)
async def getSpecificPrices(self, *args, **kwargs):
"""
Request prices for EC2
Return a list of possible prices for EC2
This method takes input: ``http://schemas.taskcluster.net/ec2-manager/v1/prices-request.json#``
This method gives output: ``http://schemas.taskcluster.net/ec2-manager/v1/prices.json#``
This method is ``experimental``
"""
return await self._makeApiCall(self.funcinfo["getSpecificPrices"], *args, **kwargs)
async def getHealth(self, *args, **kwargs):
"""
Get EC2 account health metrics
Give some basic stats on the health of our EC2 account
This method gives output: ``http://schemas.taskcluster.net/ec2-manager/v1/health.json#``
This method is ``experimental``
"""
return await self._makeApiCall(self.funcinfo["getHealth"], *args, **kwargs)
async def getRecentErrors(self, *args, **kwargs):
"""
Look up the most recent errors in the provisioner across all worker types
Return a list of recent errors encountered
This method gives output: ``http://schemas.taskcluster.net/ec2-manager/v1/errors.json#``
This method is ``experimental``
"""
return await self._makeApiCall(self.funcinfo["getRecentErrors"], *args, **kwargs)
async def regions(self, *args, **kwargs):
"""
See the list of regions managed by this ec2-manager
This method is only for debugging the ec2-manager
This method is ``experimental``
"""
return await self._makeApiCall(self.funcinfo["regions"], *args, **kwargs)
async def amiUsage(self, *args, **kwargs):
"""
See the list of AMIs and their usage
List AMIs and their usage by returning a list of objects in the form:
{
region: string
volumetype: string
lastused: timestamp
}
This method is ``experimental``
"""
return await self._makeApiCall(self.funcinfo["amiUsage"], *args, **kwargs)
async def ebsUsage(self, *args, **kwargs):
"""
See the current EBS volume usage list
Lists current EBS volume usage by returning a list of objects
that are uniquely defined by {region, volumetype, state} in the form:
{
region: string,
volumetype: string,
state: string,
totalcount: integer,
totalgb: integer,
touched: timestamp (last time that information was updated),
}
This method is ``experimental``
"""
return await self._makeApiCall(self.funcinfo["ebsUsage"], *args, **kwargs)
async def dbpoolStats(self, *args, **kwargs):
"""
Statistics on the Database client pool
This method is only for debugging the ec2-manager
This method is ``experimental``
"""
return await self._makeApiCall(self.funcinfo["dbpoolStats"], *args, **kwargs)
async def allState(self, *args, **kwargs):
"""
List out the entire internal state
This method is only for debugging the ec2-manager
This method is ``experimental``
"""
return await self._makeApiCall(self.funcinfo["allState"], *args, **kwargs)
async def sqsStats(self, *args, **kwargs):
"""
Statistics on the sqs queues
This method is only for debugging the ec2-manager
This method is ``experimental``
"""
return await self._makeApiCall(self.funcinfo["sqsStats"], *args, **kwargs)
async def purgeQueues(self, *args, **kwargs):
"""
Purge the SQS queues
This method is only for debugging the ec2-manager
This method is ``experimental``
"""
return await self._makeApiCall(self.funcinfo["purgeQueues"], *args, **kwargs)
async def apiReference(self, *args, **kwargs):
"""
API Reference
Generate an API reference for this service
This method is ``experimental``
"""
return await self._makeApiCall(self.funcinfo["apiReference"], *args, **kwargs)
async def ping(self, *args, **kwargs):
"""
Ping Server
Respond without doing anything.
This endpoint is used to check that the service is up.
This method is ``stable``
"""
return await self._makeApiCall(self.funcinfo["ping"], *args, **kwargs)
funcinfo = {
"allState": {
'args': [],
'method': 'get',
'name': 'allState',
'route': '/internal/all-state',
'stability': 'experimental',
},
"amiUsage": {
'args': [],
'method': 'get',
'name': 'amiUsage',
'route': '/internal/ami-usage',
'stability': 'experimental',
},
"apiReference": {
'args': [],
'method': 'get',
'name': 'apiReference',
'route': '/internal/api-reference',
'stability': 'experimental',
},
"dbpoolStats": {
'args': [],
'method': 'get',
'name': 'dbpoolStats',
'route': '/internal/db-pool-stats',
'stability': 'experimental',
},
"ebsUsage": {
'args': [],
'method': 'get',
'name': 'ebsUsage',
'route': '/internal/ebs-usage',
'stability': 'experimental',
},
"ensureKeyPair": {
'args': ['name'],
'input': 'http://schemas.taskcluster.net/ec2-manager/v1/create-key-pair.json#',
'method': 'get',
'name': 'ensureKeyPair',
'route': '/key-pairs/<name>',
'stability': 'experimental',
},
"getHealth": {
'args': [],
'method': 'get',
'name': 'getHealth',
'output': 'http://schemas.taskcluster.net/ec2-manager/v1/health.json#',
'route': '/health',
'stability': 'experimental',
},
"getPrices": {
'args': [],
'method': 'get',
'name': 'getPrices',
'output': 'http://schemas.taskcluster.net/ec2-manager/v1/prices.json#',
'route': '/prices',
'stability': 'experimental',
},
"getRecentErrors": {
'args': [],
'method': 'get',
'name': 'getRecentErrors',
'output': 'http://schemas.taskcluster.net/ec2-manager/v1/errors.json#',
'route': '/errors',
'stability': 'experimental',
},
"getSpecificPrices": {
'args': [],
'input': 'http://schemas.taskcluster.net/ec2-manager/v1/prices-request.json#',
'method': 'post',
'name': 'getSpecificPrices',
'output': 'http://schemas.taskcluster.net/ec2-manager/v1/prices.json#',
'route': '/prices',
'stability': 'experimental',
},
"listWorkerTypes": {
'args': [],
'method': 'get',
'name': 'listWorkerTypes',
'output': 'http://schemas.taskcluster.net/ec2-manager/v1/list-worker-types.json#',
'route': '/worker-types',
'stability': 'experimental',
},
"ping": {
'args': [],
'method': 'get',
'name': 'ping',
'route': '/ping',
'stability': 'stable',
},
"purgeQueues": {
'args': [],
'method': 'get',
'name': 'purgeQueues',
'route': '/internal/purge-queues',
'stability': 'experimental',
},
"regions": {
'args': [],
'method': 'get',
'name': 'regions',
'route': '/internal/regions',
'stability': 'experimental',
},
"removeKeyPair": {
'args': ['name'],
'method': 'delete',
'name': 'removeKeyPair',
'route': '/key-pairs/<name>',
'stability': 'experimental',
},
"runInstance": {
'args': ['workerType'],
'input': 'http://schemas.taskcluster.net/ec2-manager/v1/run-instance-request.json#',
'method': 'put',
'name': 'runInstance',
'route': '/worker-types/<workerType>/instance',
'stability': 'experimental',
},
"sqsStats": {
'args': [],
'method': 'get',
'name': 'sqsStats',
'route': '/internal/sqs-stats',
'stability': 'experimental',
},
"terminateInstance": {
'args': ['region', 'instanceId'],
'method': 'delete',
'name': 'terminateInstance',
'route': '/region/<region>/instance/<instanceId>',
'stability': 'experimental',
},
"terminateWorkerType": {
'args': ['workerType'],
'method': 'delete',
'name': 'terminateWorkerType',
'route': '/worker-types/<workerType>/resources',
'stability': 'experimental',
},
"workerTypeErrors": {
'args': ['workerType'],
'method': 'get',
'name': 'workerTypeErrors',
'output': 'http://schemas.taskcluster.net/ec2-manager/v1/errors.json#',
'route': '/worker-types/<workerType>/errors',
'stability': 'experimental',
},
"workerTypeHealth": {
'args': ['workerType'],
'method': 'get',
'name': 'workerTypeHealth',
'output': 'http://schemas.taskcluster.net/ec2-manager/v1/health.json#',
'route': '/worker-types/<workerType>/health',
'stability': 'experimental',
},
"workerTypeState": {
'args': ['workerType'],
'method': 'get',
'name': 'workerTypeState',
'output': 'http://schemas.taskcluster.net/ec2-manager/v1/worker-type-state.json#',
'route': '/worker-types/<workerType>/state',
'stability': 'experimental',
},
"workerTypeStats": {
'args': ['workerType'],
'method': 'get',
'name': 'workerTypeStats',
'output': 'http://schemas.taskcluster.net/ec2-manager/v1/worker-type-resources.json#',
'route': '/worker-types/<workerType>/stats',
'stability': 'experimental',
},
}
__all__ = ['createTemporaryCredentials', 'config', '_defaultConfig', 'createApiClient', 'createSession', 'EC2Manager']

View File

@ -0,0 +1,205 @@
# coding=utf-8
#####################################################
# THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT #
#####################################################
# noqa: E128,E201
from .asyncclient import AsyncBaseClient
from .asyncclient import createApiClient
from .asyncclient import config
from .asyncclient import createTemporaryCredentials
from .asyncclient import createSession
_defaultConfig = config
class Github(AsyncBaseClient):
"""
The github service, typically available at
`github.taskcluster.net`, is responsible for publishing pulse
messages in response to GitHub events.
This document describes the API end-point for consuming GitHub
web hooks, as well as some useful consumer APIs.
When Github forbids an action, this service returns an HTTP 403
with code ForbiddenByGithub.
"""
classOptions = {
"baseUrl": "https://github.taskcluster.net/v1/"
}
async def ping(self, *args, **kwargs):
"""
Ping Server
Respond without doing anything.
This endpoint is used to check that the service is up.
This method is ``stable``
"""
return await self._makeApiCall(self.funcinfo["ping"], *args, **kwargs)
async def githubWebHookConsumer(self, *args, **kwargs):
"""
Consume GitHub WebHook
Capture a GitHub event and publish it via pulse, if it's a push,
release or pull request.
This method is ``experimental``
"""
return await self._makeApiCall(self.funcinfo["githubWebHookConsumer"], *args, **kwargs)
async def builds(self, *args, **kwargs):
"""
List of Builds
A paginated list of builds that have been run in
Taskcluster. Can be filtered on various git-specific
fields.
This method gives output: ``v1/build-list.json#``
This method is ``experimental``
"""
return await self._makeApiCall(self.funcinfo["builds"], *args, **kwargs)
async def badge(self, *args, **kwargs):
"""
Latest Build Status Badge
Checks the status of the latest build of a given branch
and returns corresponding badge svg.
This method is ``experimental``
"""
return await self._makeApiCall(self.funcinfo["badge"], *args, **kwargs)
async def repository(self, *args, **kwargs):
"""
Get Repository Info
Returns any repository metadata that is
useful within Taskcluster related services.
This method gives output: ``v1/repository.json#``
This method is ``experimental``
"""
return await self._makeApiCall(self.funcinfo["repository"], *args, **kwargs)
async def latest(self, *args, **kwargs):
"""
Latest Status for Branch
For a given branch of a repository, this will always point
to a status page for the most recent task triggered by that
branch.
Note: This is a redirect rather than a direct link.
This method is ``experimental``
"""
return await self._makeApiCall(self.funcinfo["latest"], *args, **kwargs)
async def createStatus(self, *args, **kwargs):
"""
Post a status against a given changeset
For a given changeset (SHA) of a repository, this will attach a "commit status"
on github. These statuses are links displayed next to each revision.
The status is either OK (green check) or FAILURE (red cross),
made of a custom title and link.
This method takes input: ``v1/create-status.json#``
This method is ``experimental``
"""
return await self._makeApiCall(self.funcinfo["createStatus"], *args, **kwargs)
async def createComment(self, *args, **kwargs):
"""
Post a comment on a given GitHub Issue or Pull Request
For a given Issue or Pull Request of a repository, this will write a new message.
This method takes input: ``v1/create-comment.json#``
This method is ``experimental``
"""
return await self._makeApiCall(self.funcinfo["createComment"], *args, **kwargs)
funcinfo = {
"badge": {
'args': ['owner', 'repo', 'branch'],
'method': 'get',
'name': 'badge',
'route': '/repository/<owner>/<repo>/<branch>/badge.svg',
'stability': 'experimental',
},
"builds": {
'args': [],
'method': 'get',
'name': 'builds',
'output': 'v1/build-list.json#',
'query': ['continuationToken', 'limit', 'organization', 'repository', 'sha'],
'route': '/builds',
'stability': 'experimental',
},
"createComment": {
'args': ['owner', 'repo', 'number'],
'input': 'v1/create-comment.json#',
'method': 'post',
'name': 'createComment',
'route': '/repository/<owner>/<repo>/issues/<number>/comments',
'stability': 'experimental',
},
"createStatus": {
'args': ['owner', 'repo', 'sha'],
'input': 'v1/create-status.json#',
'method': 'post',
'name': 'createStatus',
'route': '/repository/<owner>/<repo>/statuses/<sha>',
'stability': 'experimental',
},
"githubWebHookConsumer": {
'args': [],
'method': 'post',
'name': 'githubWebHookConsumer',
'route': '/github',
'stability': 'experimental',
},
"latest": {
'args': ['owner', 'repo', 'branch'],
'method': 'get',
'name': 'latest',
'route': '/repository/<owner>/<repo>/<branch>/latest',
'stability': 'experimental',
},
"ping": {
'args': [],
'method': 'get',
'name': 'ping',
'route': '/ping',
'stability': 'stable',
},
"repository": {
'args': ['owner', 'repo'],
'method': 'get',
'name': 'repository',
'output': 'v1/repository.json#',
'route': '/repository/<owner>/<repo>',
'stability': 'experimental',
},
}
__all__ = ['createTemporaryCredentials', 'config', '_defaultConfig', 'createApiClient', 'createSession', 'Github']

View File

@ -0,0 +1,155 @@
# coding=utf-8
#####################################################
# THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT #
#####################################################
# noqa: E128,E201
from .asyncclient import AsyncBaseClient
from .asyncclient import createApiClient
from .asyncclient import config
from .asyncclient import createTemporaryCredentials
from .asyncclient import createSession
_defaultConfig = config
class GithubEvents(AsyncBaseClient):
"""
The github service publishes a pulse
message for supported github events, translating Github webhook
events into pulse messages.
This document describes the exchange offered by the taskcluster
github service
"""
classOptions = {
"exchangePrefix": "exchange/taskcluster-github/v1/"
}
def pullRequest(self, *args, **kwargs):
"""
GitHub Pull Request Event
When a GitHub pull request event is posted it will be broadcast on this
exchange with the designated `organization` and `repository`
in the routing-key along with event specific metadata in the payload.
This exchange outputs: ``v1/github-pull-request-message.json#``This exchange takes the following keys:
* routingKeyKind: Identifier for the routing-key kind. This is always `"primary"` for the formalized routing key. (required)
* organization: The GitHub `organization` which had an event. All periods have been replaced by % - such that foo.bar becomes foo%bar - and all other special characters aside from - and _ have been stripped. (required)
* repository: The GitHub `repository` which had an event.All periods have been replaced by % - such that foo.bar becomes foo%bar - and all other special characters aside from - and _ have been stripped. (required)
* action: The GitHub `action` which triggered an event. See for possible values see the payload actions property. (required)
"""
ref = {
'exchange': 'pull-request',
'name': 'pullRequest',
'routingKey': [
{
'constant': 'primary',
'multipleWords': False,
'name': 'routingKeyKind',
},
{
'multipleWords': False,
'name': 'organization',
},
{
'multipleWords': False,
'name': 'repository',
},
{
'multipleWords': False,
'name': 'action',
},
],
'schema': 'v1/github-pull-request-message.json#',
}
return self._makeTopicExchange(ref, *args, **kwargs)
def push(self, *args, **kwargs):
"""
GitHub push Event
When a GitHub push event is posted it will be broadcast on this
exchange with the designated `organization` and `repository`
in the routing-key along with event specific metadata in the payload.
This exchange outputs: ``v1/github-push-message.json#``This exchange takes the following keys:
* routingKeyKind: Identifier for the routing-key kind. This is always `"primary"` for the formalized routing key. (required)
* organization: The GitHub `organization` which had an event. All periods have been replaced by % - such that foo.bar becomes foo%bar - and all other special characters aside from - and _ have been stripped. (required)
* repository: The GitHub `repository` which had an event.All periods have been replaced by % - such that foo.bar becomes foo%bar - and all other special characters aside from - and _ have been stripped. (required)
"""
ref = {
'exchange': 'push',
'name': 'push',
'routingKey': [
{
'constant': 'primary',
'multipleWords': False,
'name': 'routingKeyKind',
},
{
'multipleWords': False,
'name': 'organization',
},
{
'multipleWords': False,
'name': 'repository',
},
],
'schema': 'v1/github-push-message.json#',
}
return self._makeTopicExchange(ref, *args, **kwargs)
def release(self, *args, **kwargs):
"""
GitHub release Event
When a GitHub release event is posted it will be broadcast on this
exchange with the designated `organization` and `repository`
in the routing-key along with event specific metadata in the payload.
This exchange outputs: ``v1/github-release-message.json#``This exchange takes the following keys:
* routingKeyKind: Identifier for the routing-key kind. This is always `"primary"` for the formalized routing key. (required)
* organization: The GitHub `organization` which had an event. All periods have been replaced by % - such that foo.bar becomes foo%bar - and all other special characters aside from - and _ have been stripped. (required)
* repository: The GitHub `repository` which had an event.All periods have been replaced by % - such that foo.bar becomes foo%bar - and all other special characters aside from - and _ have been stripped. (required)
"""
ref = {
'exchange': 'release',
'name': 'release',
'routingKey': [
{
'constant': 'primary',
'multipleWords': False,
'name': 'routingKeyKind',
},
{
'multipleWords': False,
'name': 'organization',
},
{
'multipleWords': False,
'name': 'repository',
},
],
'schema': 'v1/github-release-message.json#',
}
return self._makeTopicExchange(ref, *args, **kwargs)
funcinfo = {
}
__all__ = ['createTemporaryCredentials', 'config', '_defaultConfig', 'createApiClient', 'createSession', 'GithubEvents']

View File

@ -0,0 +1,323 @@
# coding=utf-8
#####################################################
# THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT #
#####################################################
# noqa: E128,E201
from .asyncclient import AsyncBaseClient
from .asyncclient import createApiClient
from .asyncclient import config
from .asyncclient import createTemporaryCredentials
from .asyncclient import createSession
_defaultConfig = config
class Hooks(AsyncBaseClient):
"""
Hooks are a mechanism for creating tasks in response to events.
Hooks are identified with a `hookGroupId` and a `hookId`.
When an event occurs, the resulting task is automatically created. The
task is created using the scope `assume:hook-id:<hookGroupId>/<hookId>`,
which must have scopes to make the createTask call, including satisfying all
scopes in `task.scopes`. The new task has a `taskGroupId` equal to its
`taskId`, as is the convention for decision tasks.
Hooks can have a "schedule" indicating specific times that new tasks should
be created. Each schedule is in a simple cron format, per
https://www.npmjs.com/package/cron-parser. For example:
* `['0 0 1 * * *']` -- daily at 1:00 UTC
* `['0 0 9,21 * * 1-5', '0 0 12 * * 0,6']` -- weekdays at 9:00 and 21:00 UTC, weekends at noon
The task definition is used as a JSON-e template, with a context depending on how it is fired. See
https://docs.taskcluster.net/reference/core/taskcluster-hooks/docs/firing-hooks
for more information.
"""
classOptions = {
"baseUrl": "https://hooks.taskcluster.net/v1/"
}
async def ping(self, *args, **kwargs):
"""
Ping Server
Respond without doing anything.
This endpoint is used to check that the service is up.
This method is ``stable``
"""
return await self._makeApiCall(self.funcinfo["ping"], *args, **kwargs)
async def listHookGroups(self, *args, **kwargs):
"""
List hook groups
This endpoint will return a list of all hook groups with at least one hook.
This method gives output: ``v1/list-hook-groups-response.json#``
This method is ``stable``
"""
return await self._makeApiCall(self.funcinfo["listHookGroups"], *args, **kwargs)
async def listHooks(self, *args, **kwargs):
"""
List hooks in a given group
This endpoint will return a list of all the hook definitions within a
given hook group.
This method gives output: ``v1/list-hooks-response.json#``
This method is ``stable``
"""
return await self._makeApiCall(self.funcinfo["listHooks"], *args, **kwargs)
async def hook(self, *args, **kwargs):
"""
Get hook definition
This endpoint will return the hook definition for the given `hookGroupId`
and hookId.
This method gives output: ``v1/hook-definition.json#``
This method is ``stable``
"""
return await self._makeApiCall(self.funcinfo["hook"], *args, **kwargs)
async def getHookStatus(self, *args, **kwargs):
"""
Get hook status
This endpoint will return the current status of the hook. This represents a
snapshot in time and may vary from one call to the next.
This method gives output: ``v1/hook-status.json#``
This method is ``stable``
"""
return await self._makeApiCall(self.funcinfo["getHookStatus"], *args, **kwargs)
async def createHook(self, *args, **kwargs):
"""
Create a hook
This endpoint will create a new hook.
The caller's credentials must include the role that will be used to
create the task. That role must satisfy task.scopes as well as the
necessary scopes to add the task to the queue.
This method takes input: ``v1/create-hook-request.json#``
This method gives output: ``v1/hook-definition.json#``
This method is ``stable``
"""
return await self._makeApiCall(self.funcinfo["createHook"], *args, **kwargs)
async def updateHook(self, *args, **kwargs):
"""
Update a hook
This endpoint will update an existing hook. All fields except
`hookGroupId` and `hookId` can be modified.
This method takes input: ``v1/create-hook-request.json#``
This method gives output: ``v1/hook-definition.json#``
This method is ``stable``
"""
return await self._makeApiCall(self.funcinfo["updateHook"], *args, **kwargs)
async def removeHook(self, *args, **kwargs):
"""
Delete a hook
This endpoint will remove a hook definition.
This method is ``stable``
"""
return await self._makeApiCall(self.funcinfo["removeHook"], *args, **kwargs)
async def triggerHook(self, *args, **kwargs):
"""
Trigger a hook
This endpoint will trigger the creation of a task from a hook definition.
The HTTP payload must match the hooks `triggerSchema`. If it does, it is
provided as the `payload` property of the JSON-e context used to render the
task template.
This method takes input: ``v1/trigger-hook.json#``
This method gives output: ``v1/task-status.json#``
This method is ``stable``
"""
return await self._makeApiCall(self.funcinfo["triggerHook"], *args, **kwargs)
async def getTriggerToken(self, *args, **kwargs):
"""
Get a trigger token
Retrieve a unique secret token for triggering the specified hook. This
token can be deactivated with `resetTriggerToken`.
This method gives output: ``v1/trigger-token-response.json#``
This method is ``stable``
"""
return await self._makeApiCall(self.funcinfo["getTriggerToken"], *args, **kwargs)
async def resetTriggerToken(self, *args, **kwargs):
"""
Reset a trigger token
Reset the token for triggering a given hook. This invalidates token that
may have been issued via getTriggerToken with a new token.
This method gives output: ``v1/trigger-token-response.json#``
This method is ``stable``
"""
return await self._makeApiCall(self.funcinfo["resetTriggerToken"], *args, **kwargs)
async def triggerHookWithToken(self, *args, **kwargs):
"""
Trigger a hook with a token
This endpoint triggers a defined hook with a valid token.
The HTTP payload must match the hooks `triggerSchema`. If it does, it is
provided as the `payload` property of the JSON-e context used to render the
task template.
This method takes input: ``v1/trigger-hook.json#``
This method gives output: ``v1/task-status.json#``
This method is ``stable``
"""
return await self._makeApiCall(self.funcinfo["triggerHookWithToken"], *args, **kwargs)
funcinfo = {
"createHook": {
'args': ['hookGroupId', 'hookId'],
'input': 'v1/create-hook-request.json#',
'method': 'put',
'name': 'createHook',
'output': 'v1/hook-definition.json#',
'route': '/hooks/<hookGroupId>/<hookId>',
'stability': 'stable',
},
"getHookStatus": {
'args': ['hookGroupId', 'hookId'],
'method': 'get',
'name': 'getHookStatus',
'output': 'v1/hook-status.json#',
'route': '/hooks/<hookGroupId>/<hookId>/status',
'stability': 'stable',
},
"getTriggerToken": {
'args': ['hookGroupId', 'hookId'],
'method': 'get',
'name': 'getTriggerToken',
'output': 'v1/trigger-token-response.json#',
'route': '/hooks/<hookGroupId>/<hookId>/token',
'stability': 'stable',
},
"hook": {
'args': ['hookGroupId', 'hookId'],
'method': 'get',
'name': 'hook',
'output': 'v1/hook-definition.json#',
'route': '/hooks/<hookGroupId>/<hookId>',
'stability': 'stable',
},
"listHookGroups": {
'args': [],
'method': 'get',
'name': 'listHookGroups',
'output': 'v1/list-hook-groups-response.json#',
'route': '/hooks',
'stability': 'stable',
},
"listHooks": {
'args': ['hookGroupId'],
'method': 'get',
'name': 'listHooks',
'output': 'v1/list-hooks-response.json#',
'route': '/hooks/<hookGroupId>',
'stability': 'stable',
},
"ping": {
'args': [],
'method': 'get',
'name': 'ping',
'route': '/ping',
'stability': 'stable',
},
"removeHook": {
'args': ['hookGroupId', 'hookId'],
'method': 'delete',
'name': 'removeHook',
'route': '/hooks/<hookGroupId>/<hookId>',
'stability': 'stable',
},
"resetTriggerToken": {
'args': ['hookGroupId', 'hookId'],
'method': 'post',
'name': 'resetTriggerToken',
'output': 'v1/trigger-token-response.json#',
'route': '/hooks/<hookGroupId>/<hookId>/token',
'stability': 'stable',
},
"triggerHook": {
'args': ['hookGroupId', 'hookId'],
'input': 'v1/trigger-hook.json#',
'method': 'post',
'name': 'triggerHook',
'output': 'v1/task-status.json#',
'route': '/hooks/<hookGroupId>/<hookId>/trigger',
'stability': 'stable',
},
"triggerHookWithToken": {
'args': ['hookGroupId', 'hookId', 'token'],
'input': 'v1/trigger-hook.json#',
'method': 'post',
'name': 'triggerHookWithToken',
'output': 'v1/task-status.json#',
'route': '/hooks/<hookGroupId>/<hookId>/trigger/<token>',
'stability': 'stable',
},
"updateHook": {
'args': ['hookGroupId', 'hookId'],
'input': 'v1/create-hook-request.json#',
'method': 'post',
'name': 'updateHook',
'output': 'v1/hook-definition.json#',
'route': '/hooks/<hookGroupId>/<hookId>',
'stability': 'stable',
},
}
__all__ = ['createTemporaryCredentials', 'config', '_defaultConfig', 'createApiClient', 'createSession', 'Hooks']

View File

@ -0,0 +1,277 @@
# coding=utf-8
#####################################################
# THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT #
#####################################################
# noqa: E128,E201
from .asyncclient import AsyncBaseClient
from .asyncclient import createApiClient
from .asyncclient import config
from .asyncclient import createTemporaryCredentials
from .asyncclient import createSession
_defaultConfig = config
class Index(AsyncBaseClient):
"""
The task index, typically available at `index.taskcluster.net`, is
responsible for indexing tasks. The service ensures that tasks can be
located by recency and/or arbitrary strings. Common use-cases include:
* Locate tasks by git or mercurial `<revision>`, or
* Locate latest task from given `<branch>`, such as a release.
**Index hierarchy**, tasks are indexed in a dot (`.`) separated hierarchy
called a namespace. For example a task could be indexed with the index path
`some-app.<revision>.linux-64.release-build`. In this case the following
namespaces is created.
1. `some-app`,
1. `some-app.<revision>`, and,
2. `some-app.<revision>.linux-64`
Inside the namespace `some-app.<revision>` you can find the namespace
`some-app.<revision>.linux-64` inside which you can find the indexed task
`some-app.<revision>.linux-64.release-build`. This is an example of indexing
builds for a given platform and revision.
**Task Rank**, when a task is indexed, it is assigned a `rank` (defaults
to `0`). If another task is already indexed in the same namespace with
lower or equal `rank`, the index for that task will be overwritten. For example
consider index path `mozilla-central.linux-64.release-build`. In
this case one might choose to use a UNIX timestamp or mercurial revision
number as `rank`. This way the latest completed linux 64 bit release
build is always available at `mozilla-central.linux-64.release-build`.
Note that this does mean index paths are not immutable: the same path may
point to a different task now than it did a moment ago.
**Indexed Data**, when a task is retrieved from the index the result includes
a `taskId` and an additional user-defined JSON blob that was indexed with
the task.
**Entry Expiration**, all indexed entries must have an expiration date.
Typically this defaults to one year, if not specified. If you are
indexing tasks to make it easy to find artifacts, consider using the
artifact's expiration date.
**Valid Characters**, all keys in a namespace `<key1>.<key2>` must be
in the form `/[a-zA-Z0-9_!~*'()%-]+/`. Observe that this is URL-safe and
that if you strictly want to put another character you can URL encode it.
**Indexing Routes**, tasks can be indexed using the API below, but the
most common way to index tasks is adding a custom route to `task.routes` of the
form `index.<namespace>`. In order to add this route to a task you'll
need the scope `queue:route:index.<namespace>`. When a task has
this route, it will be indexed when the task is **completed successfully**.
The task will be indexed with `rank`, `data` and `expires` as specified
in `task.extra.index`. See the example below:
```
{
payload: { /* ... */ },
routes: [
// index.<namespace> prefixed routes, tasks CC'ed such a route will
// be indexed under the given <namespace>
"index.mozilla-central.linux-64.release-build",
"index.<revision>.linux-64.release-build"
],
extra: {
// Optional details for indexing service
index: {
// Ordering, this taskId will overwrite any thing that has
// rank <= 4000 (defaults to zero)
rank: 4000,
// Specify when the entries expire (Defaults to 1 year)
expires: new Date().toJSON(),
// A little informal data to store along with taskId
// (less 16 kb when encoded as JSON)
data: {
hgRevision: "...",
commitMessae: "...",
whatever...
}
},
// Extra properties for other services...
}
// Other task properties...
}
```
**Remark**, when indexing tasks using custom routes, it's also possible
to listen for messages about these tasks. For
example one could bind to `route.index.some-app.*.release-build`,
and pick up all messages about release builds. Hence, it is a
good idea to document task index hierarchies, as these make up extension
points in their own.
"""
classOptions = {
"baseUrl": "https://index.taskcluster.net/v1/"
}
async def ping(self, *args, **kwargs):
"""
Ping Server
Respond without doing anything.
This endpoint is used to check that the service is up.
This method is ``stable``
"""
return await self._makeApiCall(self.funcinfo["ping"], *args, **kwargs)
async def findTask(self, *args, **kwargs):
"""
Find Indexed Task
Find a task by index path, returning the highest-rank task with that path. If no
task exists for the given path, this API end-point will respond with a 404 status.
This method gives output: ``v1/indexed-task-response.json#``
This method is ``stable``
"""
return await self._makeApiCall(self.funcinfo["findTask"], *args, **kwargs)
async def listNamespaces(self, *args, **kwargs):
"""
List Namespaces
List the namespaces immediately under a given namespace.
This endpoint
lists up to 1000 namespaces. If more namespaces are present, a
`continuationToken` will be returned, which can be given in the next
request. For the initial request, the payload should be an empty JSON
object.
This method gives output: ``v1/list-namespaces-response.json#``
This method is ``stable``
"""
return await self._makeApiCall(self.funcinfo["listNamespaces"], *args, **kwargs)
async def listTasks(self, *args, **kwargs):
"""
List Tasks
List the tasks immediately under a given namespace.
This endpoint
lists up to 1000 tasks. If more tasks are present, a
`continuationToken` will be returned, which can be given in the next
request. For the initial request, the payload should be an empty JSON
object.
**Remark**, this end-point is designed for humans browsing for tasks, not
services, as that makes little sense.
This method gives output: ``v1/list-tasks-response.json#``
This method is ``stable``
"""
return await self._makeApiCall(self.funcinfo["listTasks"], *args, **kwargs)
async def insertTask(self, *args, **kwargs):
"""
Insert Task into Index
Insert a task into the index. If the new rank is less than the existing rank
at the given index path, the task is not indexed but the response is still 200 OK.
Please see the introduction above for information
about indexing successfully completed tasks automatically using custom routes.
This method takes input: ``v1/insert-task-request.json#``
This method gives output: ``v1/indexed-task-response.json#``
This method is ``stable``
"""
return await self._makeApiCall(self.funcinfo["insertTask"], *args, **kwargs)
async def findArtifactFromTask(self, *args, **kwargs):
"""
Get Artifact From Indexed Task
Find a task by index path and redirect to the artifact on the most recent
run with the given `name`.
Note that multiple calls to this endpoint may return artifacts from differen tasks
if a new task is inserted into the index between calls. Avoid using this method as
a stable link to multiple, connected files if the index path does not contain a
unique identifier. For example, the following two links may return unrelated files:
* https://index.taskcluster.net/task/some-app.win64.latest.installer/artifacts/public/installer.exe`
* https://index.taskcluster.net/task/some-app.win64.latest.installer/artifacts/public/debug-symbols.zip`
This problem be remedied by including the revision in the index path or by bundling both
installer and debug symbols into a single artifact.
If no task exists for the given index path, this API end-point responds with 404.
This method is ``stable``
"""
return await self._makeApiCall(self.funcinfo["findArtifactFromTask"], *args, **kwargs)
funcinfo = {
"findArtifactFromTask": {
'args': ['indexPath', 'name'],
'method': 'get',
'name': 'findArtifactFromTask',
'route': '/task/<indexPath>/artifacts/<name>',
'stability': 'stable',
},
"findTask": {
'args': ['indexPath'],
'method': 'get',
'name': 'findTask',
'output': 'v1/indexed-task-response.json#',
'route': '/task/<indexPath>',
'stability': 'stable',
},
"insertTask": {
'args': ['namespace'],
'input': 'v1/insert-task-request.json#',
'method': 'put',
'name': 'insertTask',
'output': 'v1/indexed-task-response.json#',
'route': '/task/<namespace>',
'stability': 'stable',
},
"listNamespaces": {
'args': ['namespace'],
'method': 'get',
'name': 'listNamespaces',
'output': 'v1/list-namespaces-response.json#',
'query': ['continuationToken', 'limit'],
'route': '/namespaces/<namespace>',
'stability': 'stable',
},
"listTasks": {
'args': ['namespace'],
'method': 'get',
'name': 'listTasks',
'output': 'v1/list-tasks-response.json#',
'query': ['continuationToken', 'limit'],
'route': '/tasks/<namespace>',
'stability': 'stable',
},
"ping": {
'args': [],
'method': 'get',
'name': 'ping',
'route': '/ping',
'stability': 'stable',
},
}
__all__ = ['createTemporaryCredentials', 'config', '_defaultConfig', 'createApiClient', 'createSession', 'Index']

View File

@ -0,0 +1,88 @@
# coding=utf-8
#####################################################
# THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT #
#####################################################
# noqa: E128,E201
from .asyncclient import AsyncBaseClient
from .asyncclient import createApiClient
from .asyncclient import config
from .asyncclient import createTemporaryCredentials
from .asyncclient import createSession
_defaultConfig = config
class Login(AsyncBaseClient):
"""
The Login service serves as the interface between external authentication
systems and Taskcluster credentials.
"""
classOptions = {
"baseUrl": "https://login.taskcluster.net/v1"
}
async def oidcCredentials(self, *args, **kwargs):
"""
Get Taskcluster credentials given a suitable `access_token`
Given an OIDC `access_token` from a trusted OpenID provider, return a
set of Taskcluster credentials for use on behalf of the identified
user.
This method is typically not called with a Taskcluster client library
and does not accept Hawk credentials. The `access_token` should be
given in an `Authorization` header:
```
Authorization: Bearer abc.xyz
```
The `access_token` is first verified against the named
:provider, then passed to the provider's API to retrieve a user
profile. That profile is then used to generate Taskcluster credentials
appropriate to the user. Note that the resulting credentials may or may
not include a `certificate` property. Callers should be prepared for either
alternative.
The given credentials will expire in a relatively short time. Callers should
monitor this expiration and refresh the credentials if necessary, by calling
this endpoint again, if they have expired.
This method gives output: ``http://schemas.taskcluster.net/login/v1/oidc-credentials-response.json``
This method is ``experimental``
"""
return await self._makeApiCall(self.funcinfo["oidcCredentials"], *args, **kwargs)
async def ping(self, *args, **kwargs):
"""
Ping Server
Respond without doing anything.
This endpoint is used to check that the service is up.
This method is ``stable``
"""
return await self._makeApiCall(self.funcinfo["ping"], *args, **kwargs)
funcinfo = {
"oidcCredentials": {
'args': ['provider'],
'method': 'get',
'name': 'oidcCredentials',
'output': 'http://schemas.taskcluster.net/login/v1/oidc-credentials-response.json',
'route': '/oidc-credentials/<provider>',
'stability': 'experimental',
},
"ping": {
'args': [],
'method': 'get',
'name': 'ping',
'route': '/ping',
'stability': 'stable',
},
}
__all__ = ['createTemporaryCredentials', 'config', '_defaultConfig', 'createApiClient', 'createSession', 'Login']

View File

@ -0,0 +1,124 @@
# coding=utf-8
#####################################################
# THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT #
#####################################################
# noqa: E128,E201
from .asyncclient import AsyncBaseClient
from .asyncclient import createApiClient
from .asyncclient import config
from .asyncclient import createTemporaryCredentials
from .asyncclient import createSession
_defaultConfig = config
class Notify(AsyncBaseClient):
"""
The notification service, typically available at `notify.taskcluster.net`
listens for tasks with associated notifications and handles requests to
send emails and post pulse messages.
"""
classOptions = {
"baseUrl": "https://notify.taskcluster.net/v1/"
}
async def ping(self, *args, **kwargs):
"""
Ping Server
Respond without doing anything.
This endpoint is used to check that the service is up.
This method is ``stable``
"""
return await self._makeApiCall(self.funcinfo["ping"], *args, **kwargs)
async def email(self, *args, **kwargs):
"""
Send an Email
Send an email to `address`. The content is markdown and will be rendered
to HTML, but both the HTML and raw markdown text will be sent in the
email. If a link is included, it will be rendered to a nice button in the
HTML version of the email
This method takes input: ``v1/email-request.json#``
This method is ``experimental``
"""
return await self._makeApiCall(self.funcinfo["email"], *args, **kwargs)
async def pulse(self, *args, **kwargs):
"""
Publish a Pulse Message
Publish a message on pulse with the given `routingKey`.
This method takes input: ``v1/pulse-request.json#``
This method is ``experimental``
"""
return await self._makeApiCall(self.funcinfo["pulse"], *args, **kwargs)
async def irc(self, *args, **kwargs):
"""
Post IRC Message
Post a message on IRC to a specific channel or user, or a specific user
on a specific channel.
Success of this API method does not imply the message was successfully
posted. This API method merely inserts the IRC message into a queue
that will be processed by a background process.
This allows us to re-send the message in face of connection issues.
However, if the user isn't online the message will be dropped without
error. We maybe improve this behavior in the future. For now just keep
in mind that IRC is a best-effort service.
This method takes input: ``v1/irc-request.json#``
This method is ``experimental``
"""
return await self._makeApiCall(self.funcinfo["irc"], *args, **kwargs)
funcinfo = {
"email": {
'args': [],
'input': 'v1/email-request.json#',
'method': 'post',
'name': 'email',
'route': '/email',
'stability': 'experimental',
},
"irc": {
'args': [],
'input': 'v1/irc-request.json#',
'method': 'post',
'name': 'irc',
'route': '/irc',
'stability': 'experimental',
},
"ping": {
'args': [],
'method': 'get',
'name': 'ping',
'route': '/ping',
'stability': 'stable',
},
"pulse": {
'args': [],
'input': 'v1/pulse-request.json#',
'method': 'post',
'name': 'pulse',
'route': '/pulse',
'stability': 'experimental',
},
}
__all__ = ['createTemporaryCredentials', 'config', '_defaultConfig', 'createApiClient', 'createSession', 'Notify']

View File

@ -0,0 +1,151 @@
# coding=utf-8
#####################################################
# THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT #
#####################################################
# noqa: E128,E201
from .asyncclient import AsyncBaseClient
from .asyncclient import createApiClient
from .asyncclient import config
from .asyncclient import createTemporaryCredentials
from .asyncclient import createSession
_defaultConfig = config
class Pulse(AsyncBaseClient):
"""
The taskcluster-pulse service, typically available at `pulse.taskcluster.net`
manages pulse credentials for taskcluster users.
A service to manage Pulse credentials for anything using
Taskcluster credentials. This allows for self-service pulse
access and greater control within the Taskcluster project.
"""
classOptions = {
"baseUrl": "https://pulse.taskcluster.net/v1"
}
async def overview(self, *args, **kwargs):
"""
Rabbit Overview
Get an overview of the Rabbit cluster.
This method gives output: ``http://schemas.taskcluster.net/pulse/v1/rabbit-overview.json``
This method is ``experimental``
"""
return await self._makeApiCall(self.funcinfo["overview"], *args, **kwargs)
async def listNamespaces(self, *args, **kwargs):
"""
List Namespaces
List the namespaces managed by this service.
This will list up to 1000 namespaces. If more namespaces are present a
`continuationToken` will be returned, which can be given in the next
request. For the initial request, do not provide continuation.
This method gives output: ``http://schemas.taskcluster.net/pulse/v1/list-namespaces-response.json``
This method is ``experimental``
"""
return await self._makeApiCall(self.funcinfo["listNamespaces"], *args, **kwargs)
async def namespace(self, *args, **kwargs):
"""
Get a namespace
Get public information about a single namespace. This is the same information
as returned by `listNamespaces`.
This method gives output: ``http://schemas.taskcluster.net/pulse/v1/namespace.json``
This method is ``experimental``
"""
return await self._makeApiCall(self.funcinfo["namespace"], *args, **kwargs)
async def claimNamespace(self, *args, **kwargs):
"""
Claim a namespace
Claim a namespace, returning a username and password with access to that
namespace good for a short time. Clients should call this endpoint again
at the re-claim time given in the response, as the password will be rotated
soon after that time. The namespace will expire, and any associated queues
and exchanges will be deleted, at the given expiration time.
The `expires` and `contact` properties can be updated at any time in a reclaim
operation.
This method takes input: ``http://schemas.taskcluster.net/pulse/v1/namespace-request.json``
This method gives output: ``http://schemas.taskcluster.net/pulse/v1/namespace-response.json``
This method is ``experimental``
"""
return await self._makeApiCall(self.funcinfo["claimNamespace"], *args, **kwargs)
async def ping(self, *args, **kwargs):
"""
Ping Server
Respond without doing anything.
This endpoint is used to check that the service is up.
This method is ``stable``
"""
return await self._makeApiCall(self.funcinfo["ping"], *args, **kwargs)
funcinfo = {
"claimNamespace": {
'args': ['namespace'],
'input': 'http://schemas.taskcluster.net/pulse/v1/namespace-request.json',
'method': 'post',
'name': 'claimNamespace',
'output': 'http://schemas.taskcluster.net/pulse/v1/namespace-response.json',
'route': '/namespace/<namespace>',
'stability': 'experimental',
},
"listNamespaces": {
'args': [],
'method': 'get',
'name': 'listNamespaces',
'output': 'http://schemas.taskcluster.net/pulse/v1/list-namespaces-response.json',
'query': ['limit', 'continuation'],
'route': '/namespaces',
'stability': 'experimental',
},
"namespace": {
'args': ['namespace'],
'method': 'get',
'name': 'namespace',
'output': 'http://schemas.taskcluster.net/pulse/v1/namespace.json',
'route': '/namespace/<namespace>',
'stability': 'experimental',
},
"overview": {
'args': [],
'method': 'get',
'name': 'overview',
'output': 'http://schemas.taskcluster.net/pulse/v1/rabbit-overview.json',
'route': '/overview',
'stability': 'experimental',
},
"ping": {
'args': [],
'method': 'get',
'name': 'ping',
'route': '/ping',
'stability': 'stable',
},
}
__all__ = ['createTemporaryCredentials', 'config', '_defaultConfig', 'createApiClient', 'createSession', 'Pulse']

View File

@ -0,0 +1,124 @@
# coding=utf-8
#####################################################
# THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT #
#####################################################
# noqa: E128,E201
from .asyncclient import AsyncBaseClient
from .asyncclient import createApiClient
from .asyncclient import config
from .asyncclient import createTemporaryCredentials
from .asyncclient import createSession
_defaultConfig = config
class PurgeCache(AsyncBaseClient):
"""
The purge-cache service, typically available at
`purge-cache.taskcluster.net`, is responsible for publishing a pulse
message for workers, so they can purge cache upon request.
This document describes the API end-point for publishing the pulse
message. This is mainly intended to be used by tools.
"""
classOptions = {
"baseUrl": "https://purge-cache.taskcluster.net/v1/"
}
async def ping(self, *args, **kwargs):
"""
Ping Server
Respond without doing anything.
This endpoint is used to check that the service is up.
This method is ``stable``
"""
return await self._makeApiCall(self.funcinfo["ping"], *args, **kwargs)
async def purgeCache(self, *args, **kwargs):
"""
Purge Worker Cache
Publish a purge-cache message to purge caches named `cacheName` with
`provisionerId` and `workerType` in the routing-key. Workers should
be listening for this message and purge caches when they see it.
This method takes input: ``v1/purge-cache-request.json#``
This method is ``stable``
"""
return await self._makeApiCall(self.funcinfo["purgeCache"], *args, **kwargs)
async def allPurgeRequests(self, *args, **kwargs):
"""
All Open Purge Requests
This is useful mostly for administors to view
the set of open purge requests. It should not
be used by workers. They should use the purgeRequests
endpoint that is specific to their workerType and
provisionerId.
This method gives output: ``v1/all-purge-cache-request-list.json#``
This method is ``stable``
"""
return await self._makeApiCall(self.funcinfo["allPurgeRequests"], *args, **kwargs)
async def purgeRequests(self, *args, **kwargs):
"""
Open Purge Requests for a provisionerId/workerType pair
List of caches that need to be purged if they are from before
a certain time. This is safe to be used in automation from
workers.
This method gives output: ``v1/purge-cache-request-list.json#``
This method is ``stable``
"""
return await self._makeApiCall(self.funcinfo["purgeRequests"], *args, **kwargs)
funcinfo = {
"allPurgeRequests": {
'args': [],
'method': 'get',
'name': 'allPurgeRequests',
'output': 'v1/all-purge-cache-request-list.json#',
'query': ['continuationToken', 'limit'],
'route': '/purge-cache/list',
'stability': 'stable',
},
"ping": {
'args': [],
'method': 'get',
'name': 'ping',
'route': '/ping',
'stability': 'stable',
},
"purgeCache": {
'args': ['provisionerId', 'workerType'],
'input': 'v1/purge-cache-request.json#',
'method': 'post',
'name': 'purgeCache',
'route': '/purge-cache/<provisionerId>/<workerType>',
'stability': 'stable',
},
"purgeRequests": {
'args': ['provisionerId', 'workerType'],
'method': 'get',
'name': 'purgeRequests',
'output': 'v1/purge-cache-request-list.json#',
'query': ['since'],
'route': '/purge-cache/<provisionerId>/<workerType>',
'stability': 'stable',
},
}
__all__ = ['createTemporaryCredentials', 'config', '_defaultConfig', 'createApiClient', 'createSession', 'PurgeCache']

View File

@ -0,0 +1,71 @@
# coding=utf-8
#####################################################
# THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT #
#####################################################
# noqa: E128,E201
from .asyncclient import AsyncBaseClient
from .asyncclient import createApiClient
from .asyncclient import config
from .asyncclient import createTemporaryCredentials
from .asyncclient import createSession
_defaultConfig = config
class PurgeCacheEvents(AsyncBaseClient):
"""
The purge-cache service, typically available at
`purge-cache.taskcluster.net`, is responsible for publishing a pulse
message for workers, so they can purge cache upon request.
This document describes the exchange offered for workers by the
cache-purge service.
"""
classOptions = {
"exchangePrefix": "exchange/taskcluster-purge-cache/v1/"
}
def purgeCache(self, *args, **kwargs):
"""
Purge Cache Messages
When a cache purge is requested a message will be posted on this
exchange with designated `provisionerId` and `workerType` in the
routing-key and the name of the `cacheFolder` as payload
This exchange outputs: ``v1/purge-cache-message.json#``This exchange takes the following keys:
* routingKeyKind: Identifier for the routing-key kind. This is always `'primary'` for the formalized routing key. (required)
* provisionerId: `provisionerId` under which to purge cache. (required)
* workerType: `workerType` for which to purge cache. (required)
"""
ref = {
'exchange': 'purge-cache',
'name': 'purgeCache',
'routingKey': [
{
'constant': 'primary',
'multipleWords': False,
'name': 'routingKeyKind',
},
{
'multipleWords': False,
'name': 'provisionerId',
},
{
'multipleWords': False,
'name': 'workerType',
},
],
'schema': 'v1/purge-cache-message.json#',
}
return self._makeTopicExchange(ref, *args, **kwargs)
funcinfo = {
}
__all__ = ['createTemporaryCredentials', 'config', '_defaultConfig', 'createApiClient', 'createSession', 'PurgeCacheEvents']

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,716 @@
# coding=utf-8
#####################################################
# THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT #
#####################################################
# noqa: E128,E201
from .asyncclient import AsyncBaseClient
from .asyncclient import createApiClient
from .asyncclient import config
from .asyncclient import createTemporaryCredentials
from .asyncclient import createSession
_defaultConfig = config
class QueueEvents(AsyncBaseClient):
"""
The queue, typically available at `queue.taskcluster.net`, is responsible
for accepting tasks and track their state as they are executed by
workers. In order ensure they are eventually resolved.
This document describes AMQP exchanges offered by the queue, which allows
third-party listeners to monitor tasks as they progress to resolution.
These exchanges targets the following audience:
* Schedulers, who takes action after tasks are completed,
* Workers, who wants to listen for new or canceled tasks (optional),
* Tools, that wants to update their view as task progress.
You'll notice that all the exchanges in the document shares the same
routing key pattern. This makes it very easy to bind to all messages
about a certain kind tasks.
**Task specific routes**, a task can define a task specific route using
the `task.routes` property. See task creation documentation for details
on permissions required to provide task specific routes. If a task has
the entry `'notify.by-email'` in as task specific route defined in
`task.routes` all messages about this task will be CC'ed with the
routing-key `'route.notify.by-email'`.
These routes will always be prefixed `route.`, so that cannot interfere
with the _primary_ routing key as documented here. Notice that the
_primary_ routing key is always prefixed `primary.`. This is ensured
in the routing key reference, so API clients will do this automatically.
Please, note that the way RabbitMQ works, the message will only arrive
in your queue once, even though you may have bound to the exchange with
multiple routing key patterns that matches more of the CC'ed routing
routing keys.
**Delivery guarantees**, most operations on the queue are idempotent,
which means that if repeated with the same arguments then the requests
will ensure completion of the operation and return the same response.
This is useful if the server crashes or the TCP connection breaks, but
when re-executing an idempotent operation, the queue will also resend
any related AMQP messages. Hence, messages may be repeated.
This shouldn't be much of a problem, as the best you can achieve using
confirm messages with AMQP is at-least-once delivery semantics. Hence,
this only prevents you from obtaining at-most-once delivery semantics.
**Remark**, some message generated by timeouts maybe dropped if the
server crashes at wrong time. Ideally, we'll address this in the
future. For now we suggest you ignore this corner case, and notify us
if this corner case is of concern to you.
"""
classOptions = {
"exchangePrefix": "exchange/taskcluster-queue/v1/"
}
def taskDefined(self, *args, **kwargs):
"""
Task Defined Messages
When a task is created or just defined a message is posted to this
exchange.
This message exchange is mainly useful when tasks are scheduled by a
scheduler that uses `defineTask` as this does not make the task
`pending`. Thus, no `taskPending` message is published.
Please, note that messages are also published on this exchange if defined
using `createTask`.
This exchange outputs: ``v1/task-defined-message.json#``This exchange takes the following keys:
* routingKeyKind: Identifier for the routing-key kind. This is always `'primary'` for the formalized routing key. (required)
* taskId: `taskId` for the task this message concerns (required)
* runId: `runId` of latest run for the task, `_` if no run is exists for the task.
* workerGroup: `workerGroup` of latest run for the task, `_` if no run is exists for the task.
* workerId: `workerId` of latest run for the task, `_` if no run is exists for the task.
* provisionerId: `provisionerId` this task is targeted at. (required)
* workerType: `workerType` this task must run on. (required)
* schedulerId: `schedulerId` this task was created by. (required)
* taskGroupId: `taskGroupId` this task was created in. (required)
* reserved: Space reserved for future routing-key entries, you should always match this entry with `#`. As automatically done by our tooling, if not specified.
"""
ref = {
'exchange': 'task-defined',
'name': 'taskDefined',
'routingKey': [
{
'constant': 'primary',
'multipleWords': False,
'name': 'routingKeyKind',
},
{
'multipleWords': False,
'name': 'taskId',
},
{
'multipleWords': False,
'name': 'runId',
},
{
'multipleWords': False,
'name': 'workerGroup',
},
{
'multipleWords': False,
'name': 'workerId',
},
{
'multipleWords': False,
'name': 'provisionerId',
},
{
'multipleWords': False,
'name': 'workerType',
},
{
'multipleWords': False,
'name': 'schedulerId',
},
{
'multipleWords': False,
'name': 'taskGroupId',
},
{
'multipleWords': True,
'name': 'reserved',
},
],
'schema': 'v1/task-defined-message.json#',
}
return self._makeTopicExchange(ref, *args, **kwargs)
def taskPending(self, *args, **kwargs):
"""
Task Pending Messages
When a task becomes `pending` a message is posted to this exchange.
This is useful for workers who doesn't want to constantly poll the queue
for new tasks. The queue will also be authority for task states and
claims. But using this exchange workers should be able to distribute work
efficiently and they would be able to reduce their polling interval
significantly without affecting general responsiveness.
This exchange outputs: ``v1/task-pending-message.json#``This exchange takes the following keys:
* routingKeyKind: Identifier for the routing-key kind. This is always `'primary'` for the formalized routing key. (required)
* taskId: `taskId` for the task this message concerns (required)
* runId: `runId` of latest run for the task, `_` if no run is exists for the task. (required)
* workerGroup: `workerGroup` of latest run for the task, `_` if no run is exists for the task.
* workerId: `workerId` of latest run for the task, `_` if no run is exists for the task.
* provisionerId: `provisionerId` this task is targeted at. (required)
* workerType: `workerType` this task must run on. (required)
* schedulerId: `schedulerId` this task was created by. (required)
* taskGroupId: `taskGroupId` this task was created in. (required)
* reserved: Space reserved for future routing-key entries, you should always match this entry with `#`. As automatically done by our tooling, if not specified.
"""
ref = {
'exchange': 'task-pending',
'name': 'taskPending',
'routingKey': [
{
'constant': 'primary',
'multipleWords': False,
'name': 'routingKeyKind',
},
{
'multipleWords': False,
'name': 'taskId',
},
{
'multipleWords': False,
'name': 'runId',
},
{
'multipleWords': False,
'name': 'workerGroup',
},
{
'multipleWords': False,
'name': 'workerId',
},
{
'multipleWords': False,
'name': 'provisionerId',
},
{
'multipleWords': False,
'name': 'workerType',
},
{
'multipleWords': False,
'name': 'schedulerId',
},
{
'multipleWords': False,
'name': 'taskGroupId',
},
{
'multipleWords': True,
'name': 'reserved',
},
],
'schema': 'v1/task-pending-message.json#',
}
return self._makeTopicExchange(ref, *args, **kwargs)
def taskRunning(self, *args, **kwargs):
"""
Task Running Messages
Whenever a task is claimed by a worker, a run is started on the worker,
and a message is posted on this exchange.
This exchange outputs: ``v1/task-running-message.json#``This exchange takes the following keys:
* routingKeyKind: Identifier for the routing-key kind. This is always `'primary'` for the formalized routing key. (required)
* taskId: `taskId` for the task this message concerns (required)
* runId: `runId` of latest run for the task, `_` if no run is exists for the task. (required)
* workerGroup: `workerGroup` of latest run for the task, `_` if no run is exists for the task. (required)
* workerId: `workerId` of latest run for the task, `_` if no run is exists for the task. (required)
* provisionerId: `provisionerId` this task is targeted at. (required)
* workerType: `workerType` this task must run on. (required)
* schedulerId: `schedulerId` this task was created by. (required)
* taskGroupId: `taskGroupId` this task was created in. (required)
* reserved: Space reserved for future routing-key entries, you should always match this entry with `#`. As automatically done by our tooling, if not specified.
"""
ref = {
'exchange': 'task-running',
'name': 'taskRunning',
'routingKey': [
{
'constant': 'primary',
'multipleWords': False,
'name': 'routingKeyKind',
},
{
'multipleWords': False,
'name': 'taskId',
},
{
'multipleWords': False,
'name': 'runId',
},
{
'multipleWords': False,
'name': 'workerGroup',
},
{
'multipleWords': False,
'name': 'workerId',
},
{
'multipleWords': False,
'name': 'provisionerId',
},
{
'multipleWords': False,
'name': 'workerType',
},
{
'multipleWords': False,
'name': 'schedulerId',
},
{
'multipleWords': False,
'name': 'taskGroupId',
},
{
'multipleWords': True,
'name': 'reserved',
},
],
'schema': 'v1/task-running-message.json#',
}
return self._makeTopicExchange(ref, *args, **kwargs)
def artifactCreated(self, *args, **kwargs):
"""
Artifact Creation Messages
Whenever the `createArtifact` end-point is called, the queue will create
a record of the artifact and post a message on this exchange. All of this
happens before the queue returns a signed URL for the caller to upload
the actual artifact with (pending on `storageType`).
This means that the actual artifact is rarely available when this message
is posted. But it is not unreasonable to assume that the artifact will
will become available at some point later. Most signatures will expire in
30 minutes or so, forcing the uploader to call `createArtifact` with
the same payload again in-order to continue uploading the artifact.
However, in most cases (especially for small artifacts) it's very
reasonable assume the artifact will be available within a few minutes.
This property means that this exchange is mostly useful for tools
monitoring task evaluation. One could also use it count number of
artifacts per task, or _index_ artifacts though in most cases it'll be
smarter to index artifacts after the task in question have completed
successfully.
This exchange outputs: ``v1/artifact-created-message.json#``This exchange takes the following keys:
* routingKeyKind: Identifier for the routing-key kind. This is always `'primary'` for the formalized routing key. (required)
* taskId: `taskId` for the task this message concerns (required)
* runId: `runId` of latest run for the task, `_` if no run is exists for the task. (required)
* workerGroup: `workerGroup` of latest run for the task, `_` if no run is exists for the task. (required)
* workerId: `workerId` of latest run for the task, `_` if no run is exists for the task. (required)
* provisionerId: `provisionerId` this task is targeted at. (required)
* workerType: `workerType` this task must run on. (required)
* schedulerId: `schedulerId` this task was created by. (required)
* taskGroupId: `taskGroupId` this task was created in. (required)
* reserved: Space reserved for future routing-key entries, you should always match this entry with `#`. As automatically done by our tooling, if not specified.
"""
ref = {
'exchange': 'artifact-created',
'name': 'artifactCreated',
'routingKey': [
{
'constant': 'primary',
'multipleWords': False,
'name': 'routingKeyKind',
},
{
'multipleWords': False,
'name': 'taskId',
},
{
'multipleWords': False,
'name': 'runId',
},
{
'multipleWords': False,
'name': 'workerGroup',
},
{
'multipleWords': False,
'name': 'workerId',
},
{
'multipleWords': False,
'name': 'provisionerId',
},
{
'multipleWords': False,
'name': 'workerType',
},
{
'multipleWords': False,
'name': 'schedulerId',
},
{
'multipleWords': False,
'name': 'taskGroupId',
},
{
'multipleWords': True,
'name': 'reserved',
},
],
'schema': 'v1/artifact-created-message.json#',
}
return self._makeTopicExchange(ref, *args, **kwargs)
def taskCompleted(self, *args, **kwargs):
"""
Task Completed Messages
When a task is successfully completed by a worker a message is posted
this exchange.
This message is routed using the `runId`, `workerGroup` and `workerId`
that completed the task. But information about additional runs is also
available from the task status structure.
This exchange outputs: ``v1/task-completed-message.json#``This exchange takes the following keys:
* routingKeyKind: Identifier for the routing-key kind. This is always `'primary'` for the formalized routing key. (required)
* taskId: `taskId` for the task this message concerns (required)
* runId: `runId` of latest run for the task, `_` if no run is exists for the task. (required)
* workerGroup: `workerGroup` of latest run for the task, `_` if no run is exists for the task. (required)
* workerId: `workerId` of latest run for the task, `_` if no run is exists for the task. (required)
* provisionerId: `provisionerId` this task is targeted at. (required)
* workerType: `workerType` this task must run on. (required)
* schedulerId: `schedulerId` this task was created by. (required)
* taskGroupId: `taskGroupId` this task was created in. (required)
* reserved: Space reserved for future routing-key entries, you should always match this entry with `#`. As automatically done by our tooling, if not specified.
"""
ref = {
'exchange': 'task-completed',
'name': 'taskCompleted',
'routingKey': [
{
'constant': 'primary',
'multipleWords': False,
'name': 'routingKeyKind',
},
{
'multipleWords': False,
'name': 'taskId',
},
{
'multipleWords': False,
'name': 'runId',
},
{
'multipleWords': False,
'name': 'workerGroup',
},
{
'multipleWords': False,
'name': 'workerId',
},
{
'multipleWords': False,
'name': 'provisionerId',
},
{
'multipleWords': False,
'name': 'workerType',
},
{
'multipleWords': False,
'name': 'schedulerId',
},
{
'multipleWords': False,
'name': 'taskGroupId',
},
{
'multipleWords': True,
'name': 'reserved',
},
],
'schema': 'v1/task-completed-message.json#',
}
return self._makeTopicExchange(ref, *args, **kwargs)
def taskFailed(self, *args, **kwargs):
"""
Task Failed Messages
When a task ran, but failed to complete successfully a message is posted
to this exchange. This is same as worker ran task-specific code, but the
task specific code exited non-zero.
This exchange outputs: ``v1/task-failed-message.json#``This exchange takes the following keys:
* routingKeyKind: Identifier for the routing-key kind. This is always `'primary'` for the formalized routing key. (required)
* taskId: `taskId` for the task this message concerns (required)
* runId: `runId` of latest run for the task, `_` if no run is exists for the task.
* workerGroup: `workerGroup` of latest run for the task, `_` if no run is exists for the task.
* workerId: `workerId` of latest run for the task, `_` if no run is exists for the task.
* provisionerId: `provisionerId` this task is targeted at. (required)
* workerType: `workerType` this task must run on. (required)
* schedulerId: `schedulerId` this task was created by. (required)
* taskGroupId: `taskGroupId` this task was created in. (required)
* reserved: Space reserved for future routing-key entries, you should always match this entry with `#`. As automatically done by our tooling, if not specified.
"""
ref = {
'exchange': 'task-failed',
'name': 'taskFailed',
'routingKey': [
{
'constant': 'primary',
'multipleWords': False,
'name': 'routingKeyKind',
},
{
'multipleWords': False,
'name': 'taskId',
},
{
'multipleWords': False,
'name': 'runId',
},
{
'multipleWords': False,
'name': 'workerGroup',
},
{
'multipleWords': False,
'name': 'workerId',
},
{
'multipleWords': False,
'name': 'provisionerId',
},
{
'multipleWords': False,
'name': 'workerType',
},
{
'multipleWords': False,
'name': 'schedulerId',
},
{
'multipleWords': False,
'name': 'taskGroupId',
},
{
'multipleWords': True,
'name': 'reserved',
},
],
'schema': 'v1/task-failed-message.json#',
}
return self._makeTopicExchange(ref, *args, **kwargs)
def taskException(self, *args, **kwargs):
"""
Task Exception Messages
Whenever Taskcluster fails to run a message is posted to this exchange.
This happens if the task isn't completed before its `deadlìne`,
all retries failed (i.e. workers stopped responding), the task was
canceled by another entity, or the task carried a malformed payload.
The specific _reason_ is evident from that task status structure, refer
to the `reasonResolved` property for the last run.
This exchange outputs: ``v1/task-exception-message.json#``This exchange takes the following keys:
* routingKeyKind: Identifier for the routing-key kind. This is always `'primary'` for the formalized routing key. (required)
* taskId: `taskId` for the task this message concerns (required)
* runId: `runId` of latest run for the task, `_` if no run is exists for the task.
* workerGroup: `workerGroup` of latest run for the task, `_` if no run is exists for the task.
* workerId: `workerId` of latest run for the task, `_` if no run is exists for the task.
* provisionerId: `provisionerId` this task is targeted at. (required)
* workerType: `workerType` this task must run on. (required)
* schedulerId: `schedulerId` this task was created by. (required)
* taskGroupId: `taskGroupId` this task was created in. (required)
* reserved: Space reserved for future routing-key entries, you should always match this entry with `#`. As automatically done by our tooling, if not specified.
"""
ref = {
'exchange': 'task-exception',
'name': 'taskException',
'routingKey': [
{
'constant': 'primary',
'multipleWords': False,
'name': 'routingKeyKind',
},
{
'multipleWords': False,
'name': 'taskId',
},
{
'multipleWords': False,
'name': 'runId',
},
{
'multipleWords': False,
'name': 'workerGroup',
},
{
'multipleWords': False,
'name': 'workerId',
},
{
'multipleWords': False,
'name': 'provisionerId',
},
{
'multipleWords': False,
'name': 'workerType',
},
{
'multipleWords': False,
'name': 'schedulerId',
},
{
'multipleWords': False,
'name': 'taskGroupId',
},
{
'multipleWords': True,
'name': 'reserved',
},
],
'schema': 'v1/task-exception-message.json#',
}
return self._makeTopicExchange(ref, *args, **kwargs)
def taskGroupResolved(self, *args, **kwargs):
"""
Task Group Resolved Messages
A message is published on task-group-resolved whenever all submitted
tasks (whether scheduled or unscheduled) for a given task group have
been resolved, regardless of whether they resolved as successful or
not. A task group may be resolved multiple times, since new tasks may
be submitted against an already resolved task group.
This exchange outputs: ``v1/task-group-resolved.json#``This exchange takes the following keys:
* routingKeyKind: Identifier for the routing-key kind. This is always `'primary'` for the formalized routing key. (required)
* taskGroupId: `taskGroupId` for the task-group this message concerns (required)
* schedulerId: `schedulerId` for the task-group this message concerns (required)
* reserved: Space reserved for future routing-key entries, you should always match this entry with `#`. As automatically done by our tooling, if not specified.
"""
ref = {
'exchange': 'task-group-resolved',
'name': 'taskGroupResolved',
'routingKey': [
{
'constant': 'primary',
'multipleWords': False,
'name': 'routingKeyKind',
},
{
'multipleWords': False,
'name': 'taskGroupId',
},
{
'multipleWords': False,
'name': 'schedulerId',
},
{
'multipleWords': True,
'name': 'reserved',
},
],
'schema': 'v1/task-group-resolved.json#',
}
return self._makeTopicExchange(ref, *args, **kwargs)
funcinfo = {
}
__all__ = ['createTemporaryCredentials', 'config', '_defaultConfig', 'createApiClient', 'createSession', 'QueueEvents']

View File

@ -0,0 +1,148 @@
# coding=utf-8
#####################################################
# THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT #
#####################################################
# noqa: E128,E201
from .asyncclient import AsyncBaseClient
from .asyncclient import createApiClient
from .asyncclient import config
from .asyncclient import createTemporaryCredentials
from .asyncclient import createSession
_defaultConfig = config
class Secrets(AsyncBaseClient):
"""
The secrets service provides a simple key/value store for small bits of secret
data. Access is limited by scopes, so values can be considered secret from
those who do not have the relevant scopes.
Secrets also have an expiration date, and once a secret has expired it can no
longer be read. This is useful for short-term secrets such as a temporary
service credential or a one-time signing key.
"""
classOptions = {
"baseUrl": "https://secrets.taskcluster.net/v1/"
}
async def ping(self, *args, **kwargs):
"""
Ping Server
Respond without doing anything.
This endpoint is used to check that the service is up.
This method is ``stable``
"""
return await self._makeApiCall(self.funcinfo["ping"], *args, **kwargs)
async def set(self, *args, **kwargs):
"""
Set Secret
Set the secret associated with some key. If the secret already exists, it is
updated instead.
This method takes input: ``v1/secret.json#``
This method is ``stable``
"""
return await self._makeApiCall(self.funcinfo["set"], *args, **kwargs)
async def remove(self, *args, **kwargs):
"""
Delete Secret
Delete the secret associated with some key.
This method is ``stable``
"""
return await self._makeApiCall(self.funcinfo["remove"], *args, **kwargs)
async def get(self, *args, **kwargs):
"""
Read Secret
Read the secret associated with some key. If the secret has recently
expired, the response code 410 is returned. If the caller lacks the
scope necessary to get the secret, the call will fail with a 403 code
regardless of whether the secret exists.
This method gives output: ``v1/secret.json#``
This method is ``stable``
"""
return await self._makeApiCall(self.funcinfo["get"], *args, **kwargs)
async def list(self, *args, **kwargs):
"""
List Secrets
List the names of all secrets.
By default this end-point will try to return up to 1000 secret names in one
request. But it **may return less**, even if more tasks are available.
It may also return a `continuationToken` even though there are no more
results. However, you can only be sure to have seen all results if you
keep calling `listTaskGroup` with the last `continuationToken` until you
get a result without a `continuationToken`.
If you are not interested in listing all the members at once, you may
use the query-string option `limit` to return fewer.
This method gives output: ``v1/secret-list.json#``
This method is ``stable``
"""
return await self._makeApiCall(self.funcinfo["list"], *args, **kwargs)
funcinfo = {
"get": {
'args': ['name'],
'method': 'get',
'name': 'get',
'output': 'v1/secret.json#',
'route': '/secret/<name>',
'stability': 'stable',
},
"list": {
'args': [],
'method': 'get',
'name': 'list',
'output': 'v1/secret-list.json#',
'query': ['continuationToken', 'limit'],
'route': '/secrets',
'stability': 'stable',
},
"ping": {
'args': [],
'method': 'get',
'name': 'ping',
'route': '/ping',
'stability': 'stable',
},
"remove": {
'args': ['name'],
'method': 'delete',
'name': 'remove',
'route': '/secret/<name>',
'stability': 'stable',
},
"set": {
'args': ['name'],
'input': 'v1/secret.json#',
'method': 'put',
'name': 'set',
'route': '/secret/<name>',
'stability': 'stable',
},
}
__all__ = ['createTemporaryCredentials', 'config', '_defaultConfig', 'createApiClient', 'createSession', 'Secrets']

View File

@ -0,0 +1,70 @@
# coding=utf-8
#####################################################
# THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT #
#####################################################
# noqa: E128,E201
from .asyncclient import AsyncBaseClient
from .asyncclient import createApiClient
from .asyncclient import config
from .asyncclient import createTemporaryCredentials
from .asyncclient import createSession
_defaultConfig = config
class TreeherderEvents(AsyncBaseClient):
"""
The taskcluster-treeherder service is responsible for processing
task events published by TaskCluster Queue and producing job messages
that are consumable by Treeherder.
This exchange provides that job messages to be consumed by any queue that
attached to the exchange. This could be a production Treeheder instance,
a local development environment, or a custom dashboard.
"""
classOptions = {
"exchangePrefix": "exchange/taskcluster-treeherder/v1/"
}
def jobs(self, *args, **kwargs):
"""
Job Messages
When a task run is scheduled or resolved, a message is posted to
this exchange in a Treeherder consumable format.
This exchange outputs: ``http://schemas.taskcluster.net/taskcluster-treeherder/v1/pulse-job.json#``This exchange takes the following keys:
* destination: destination (required)
* project: project (required)
* reserved: Space reserved for future routing-key entries, you should always match this entry with `#`. As automatically done by our tooling, if not specified.
"""
ref = {
'exchange': 'jobs',
'name': 'jobs',
'routingKey': [
{
'multipleWords': False,
'name': 'destination',
},
{
'multipleWords': False,
'name': 'project',
},
{
'multipleWords': True,
'name': 'reserved',
},
],
'schema': 'http://schemas.taskcluster.net/taskcluster-treeherder/v1/pulse-job.json#',
}
return self._makeTopicExchange(ref, *args, **kwargs)
funcinfo = {
}
__all__ = ['createTemporaryCredentials', 'config', '_defaultConfig', 'createApiClient', 'createSession', 'TreeherderEvents']

View File

@ -0,0 +1,866 @@
# coding=utf-8
#####################################################
# THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT #
#####################################################
# noqa: E128,E201
from .client import BaseClient
from .client import createApiClient
from .client import config
from .client import createTemporaryCredentials
from .client import createSession
_defaultConfig = config
class Auth(BaseClient):
"""
Authentication related API end-points for Taskcluster and related
services. These API end-points are of interest if you wish to:
* Authorize a request signed with Taskcluster credentials,
* Manage clients and roles,
* Inspect or audit clients and roles,
* Gain access to various services guarded by this API.
Note that in this service "authentication" refers to validating the
correctness of the supplied credentials (that the caller posesses the
appropriate access token). This service does not provide any kind of user
authentication (identifying a particular person).
### Clients
The authentication service manages _clients_, at a high-level each client
consists of a `clientId`, an `accessToken`, scopes, and some metadata.
The `clientId` and `accessToken` can be used for authentication when
calling Taskcluster APIs.
The client's scopes control the client's access to Taskcluster resources.
The scopes are *expanded* by substituting roles, as defined below.
### Roles
A _role_ consists of a `roleId`, a set of scopes and a description.
Each role constitutes a simple _expansion rule_ that says if you have
the scope: `assume:<roleId>` you get the set of scopes the role has.
Think of the `assume:<roleId>` as a scope that allows a client to assume
a role.
As in scopes the `*` kleene star also have special meaning if it is
located at the end of a `roleId`. If you have a role with the following
`roleId`: `my-prefix*`, then any client which has a scope staring with
`assume:my-prefix` will be allowed to assume the role.
### Guarded Services
The authentication service also has API end-points for delegating access
to some guarded service such as AWS S3, or Azure Table Storage.
Generally, we add API end-points to this server when we wish to use
Taskcluster credentials to grant access to a third-party service used
by many Taskcluster components.
"""
classOptions = {
"baseUrl": "https://auth.taskcluster.net/v1/"
}
def ping(self, *args, **kwargs):
"""
Ping Server
Respond without doing anything.
This endpoint is used to check that the service is up.
This method is ``stable``
"""
return self._makeApiCall(self.funcinfo["ping"], *args, **kwargs)
def listClients(self, *args, **kwargs):
"""
List Clients
Get a list of all clients. With `prefix`, only clients for which
it is a prefix of the clientId are returned.
By default this end-point will try to return up to 1000 clients in one
request. But it **may return less, even none**.
It may also return a `continuationToken` even though there are no more
results. However, you can only be sure to have seen all results if you
keep calling `listClients` with the last `continuationToken` until you
get a result without a `continuationToken`.
This method gives output: ``v1/list-clients-response.json#``
This method is ``stable``
"""
return self._makeApiCall(self.funcinfo["listClients"], *args, **kwargs)
def client(self, *args, **kwargs):
"""
Get Client
Get information about a single client.
This method gives output: ``v1/get-client-response.json#``
This method is ``stable``
"""
return self._makeApiCall(self.funcinfo["client"], *args, **kwargs)
def createClient(self, *args, **kwargs):
"""
Create Client
Create a new client and get the `accessToken` for this client.
You should store the `accessToken` from this API call as there is no
other way to retrieve it.
If you loose the `accessToken` you can call `resetAccessToken` to reset
it, and a new `accessToken` will be returned, but you cannot retrieve the
current `accessToken`.
If a client with the same `clientId` already exists this operation will
fail. Use `updateClient` if you wish to update an existing client.
The caller's scopes must satisfy `scopes`.
This method takes input: ``v1/create-client-request.json#``
This method gives output: ``v1/create-client-response.json#``
This method is ``stable``
"""
return self._makeApiCall(self.funcinfo["createClient"], *args, **kwargs)
def resetAccessToken(self, *args, **kwargs):
"""
Reset `accessToken`
Reset a clients `accessToken`, this will revoke the existing
`accessToken`, generate a new `accessToken` and return it from this
call.
There is no way to retrieve an existing `accessToken`, so if you loose it
you must reset the accessToken to acquire it again.
This method gives output: ``v1/create-client-response.json#``
This method is ``stable``
"""
return self._makeApiCall(self.funcinfo["resetAccessToken"], *args, **kwargs)
def updateClient(self, *args, **kwargs):
"""
Update Client
Update an exisiting client. The `clientId` and `accessToken` cannot be
updated, but `scopes` can be modified. The caller's scopes must
satisfy all scopes being added to the client in the update operation.
If no scopes are given in the request, the client's scopes remain
unchanged
This method takes input: ``v1/create-client-request.json#``
This method gives output: ``v1/get-client-response.json#``
This method is ``stable``
"""
return self._makeApiCall(self.funcinfo["updateClient"], *args, **kwargs)
def enableClient(self, *args, **kwargs):
"""
Enable Client
Enable a client that was disabled with `disableClient`. If the client
is already enabled, this does nothing.
This is typically used by identity providers to re-enable clients that
had been disabled when the corresponding identity's scopes changed.
This method gives output: ``v1/get-client-response.json#``
This method is ``stable``
"""
return self._makeApiCall(self.funcinfo["enableClient"], *args, **kwargs)
def disableClient(self, *args, **kwargs):
"""
Disable Client
Disable a client. If the client is already disabled, this does nothing.
This is typically used by identity providers to disable clients when the
corresponding identity's scopes no longer satisfy the client's scopes.
This method gives output: ``v1/get-client-response.json#``
This method is ``stable``
"""
return self._makeApiCall(self.funcinfo["disableClient"], *args, **kwargs)
def deleteClient(self, *args, **kwargs):
"""
Delete Client
Delete a client, please note that any roles related to this client must
be deleted independently.
This method is ``stable``
"""
return self._makeApiCall(self.funcinfo["deleteClient"], *args, **kwargs)
def listRoles(self, *args, **kwargs):
"""
List Roles
Get a list of all roles, each role object also includes the list of
scopes it expands to.
This method gives output: ``v1/list-roles-response.json#``
This method is ``stable``
"""
return self._makeApiCall(self.funcinfo["listRoles"], *args, **kwargs)
def role(self, *args, **kwargs):
"""
Get Role
Get information about a single role, including the set of scopes that the
role expands to.
This method gives output: ``v1/get-role-response.json#``
This method is ``stable``
"""
return self._makeApiCall(self.funcinfo["role"], *args, **kwargs)
def createRole(self, *args, **kwargs):
"""
Create Role
Create a new role.
The caller's scopes must satisfy the new role's scopes.
If there already exists a role with the same `roleId` this operation
will fail. Use `updateRole` to modify an existing role.
Creation of a role that will generate an infinite expansion will result
in an error response.
This method takes input: ``v1/create-role-request.json#``
This method gives output: ``v1/get-role-response.json#``
This method is ``stable``
"""
return self._makeApiCall(self.funcinfo["createRole"], *args, **kwargs)
def updateRole(self, *args, **kwargs):
"""
Update Role
Update an existing role.
The caller's scopes must satisfy all of the new scopes being added, but
need not satisfy all of the client's existing scopes.
An update of a role that will generate an infinite expansion will result
in an error response.
This method takes input: ``v1/create-role-request.json#``
This method gives output: ``v1/get-role-response.json#``
This method is ``stable``
"""
return self._makeApiCall(self.funcinfo["updateRole"], *args, **kwargs)
def deleteRole(self, *args, **kwargs):
"""
Delete Role
Delete a role. This operation will succeed regardless of whether or not
the role exists.
This method is ``stable``
"""
return self._makeApiCall(self.funcinfo["deleteRole"], *args, **kwargs)
def expandScopesGet(self, *args, **kwargs):
"""
Expand Scopes
Return an expanded copy of the given scopeset, with scopes implied by any
roles included.
This call uses the GET method with an HTTP body. It remains only for
backward compatibility.
This method takes input: ``v1/scopeset.json#``
This method gives output: ``v1/scopeset.json#``
This method is ``deprecated``
"""
return self._makeApiCall(self.funcinfo["expandScopesGet"], *args, **kwargs)
def expandScopes(self, *args, **kwargs):
"""
Expand Scopes
Return an expanded copy of the given scopeset, with scopes implied by any
roles included.
This method takes input: ``v1/scopeset.json#``
This method gives output: ``v1/scopeset.json#``
This method is ``stable``
"""
return self._makeApiCall(self.funcinfo["expandScopes"], *args, **kwargs)
def currentScopes(self, *args, **kwargs):
"""
Get Current Scopes
Return the expanded scopes available in the request, taking into account all sources
of scopes and scope restrictions (temporary credentials, assumeScopes, client scopes,
and roles).
This method gives output: ``v1/scopeset.json#``
This method is ``stable``
"""
return self._makeApiCall(self.funcinfo["currentScopes"], *args, **kwargs)
def awsS3Credentials(self, *args, **kwargs):
"""
Get Temporary Read/Write Credentials S3
Get temporary AWS credentials for `read-write` or `read-only` access to
a given `bucket` and `prefix` within that bucket.
The `level` parameter can be `read-write` or `read-only` and determines
which type of credentials are returned. Please note that the `level`
parameter is required in the scope guarding access. The bucket name must
not contain `.`, as recommended by Amazon.
This method can only allow access to a whitelisted set of buckets. To add
a bucket to that whitelist, contact the Taskcluster team, who will add it to
the appropriate IAM policy. If the bucket is in a different AWS account, you
will also need to add a bucket policy allowing access from the Taskcluster
account. That policy should look like this:
```
{
"Version": "2012-10-17",
"Statement": [
{
"Sid": "allow-taskcluster-auth-to-delegate-access",
"Effect": "Allow",
"Principal": {
"AWS": "arn:aws:iam::692406183521:root"
},
"Action": [
"s3:ListBucket",
"s3:GetObject",
"s3:PutObject",
"s3:DeleteObject",
"s3:GetBucketLocation"
],
"Resource": [
"arn:aws:s3:::<bucket>",
"arn:aws:s3:::<bucket>/*"
]
}
]
}
```
The credentials are set to expire after an hour, but this behavior is
subject to change. Hence, you should always read the `expires` property
from the response, if you intend to maintain active credentials in your
application.
Please note that your `prefix` may not start with slash `/`. Such a prefix
is allowed on S3, but we forbid it here to discourage bad behavior.
Also note that if your `prefix` doesn't end in a slash `/`, the STS
credentials may allow access to unexpected keys, as S3 does not treat
slashes specially. For example, a prefix of `my-folder` will allow
access to `my-folder/file.txt` as expected, but also to `my-folder.txt`,
which may not be intended.
Finally, note that the `PutObjectAcl` call is not allowed. Passing a canned
ACL other than `private` to `PutObject` is treated as a `PutObjectAcl` call, and
will result in an access-denied error from AWS. This limitation is due to a
security flaw in Amazon S3 which might otherwise allow indefinite access to
uploaded objects.
**EC2 metadata compatibility**, if the querystring parameter
`?format=iam-role-compat` is given, the response will be compatible
with the JSON exposed by the EC2 metadata service. This aims to ease
compatibility for libraries and tools built to auto-refresh credentials.
For details on the format returned by EC2 metadata service see:
[EC2 User Guide](http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html#instance-metadata-security-credentials).
This method gives output: ``v1/aws-s3-credentials-response.json#``
This method is ``stable``
"""
return self._makeApiCall(self.funcinfo["awsS3Credentials"], *args, **kwargs)
def azureAccounts(self, *args, **kwargs):
"""
List Accounts Managed by Auth
Retrieve a list of all Azure accounts managed by Taskcluster Auth.
This method gives output: ``v1/azure-account-list-response.json#``
This method is ``stable``
"""
return self._makeApiCall(self.funcinfo["azureAccounts"], *args, **kwargs)
def azureTables(self, *args, **kwargs):
"""
List Tables in an Account Managed by Auth
Retrieve a list of all tables in an account.
This method gives output: ``v1/azure-table-list-response.json#``
This method is ``stable``
"""
return self._makeApiCall(self.funcinfo["azureTables"], *args, **kwargs)
def azureTableSAS(self, *args, **kwargs):
"""
Get Shared-Access-Signature for Azure Table
Get a shared access signature (SAS) string for use with a specific Azure
Table Storage table.
The `level` parameter can be `read-write` or `read-only` and determines
which type of credentials are returned. If level is read-write, it will create the
table if it doesn't already exist.
This method gives output: ``v1/azure-table-access-response.json#``
This method is ``stable``
"""
return self._makeApiCall(self.funcinfo["azureTableSAS"], *args, **kwargs)
def azureContainers(self, *args, **kwargs):
"""
List containers in an Account Managed by Auth
Retrieve a list of all containers in an account.
This method gives output: ``v1/azure-container-list-response.json#``
This method is ``stable``
"""
return self._makeApiCall(self.funcinfo["azureContainers"], *args, **kwargs)
def azureContainerSAS(self, *args, **kwargs):
"""
Get Shared-Access-Signature for Azure Container
Get a shared access signature (SAS) string for use with a specific Azure
Blob Storage container.
The `level` parameter can be `read-write` or `read-only` and determines
which type of credentials are returned. If level is read-write, it will create the
container if it doesn't already exist.
This method gives output: ``v1/azure-container-response.json#``
This method is ``stable``
"""
return self._makeApiCall(self.funcinfo["azureContainerSAS"], *args, **kwargs)
def sentryDSN(self, *args, **kwargs):
"""
Get DSN for Sentry Project
Get temporary DSN (access credentials) for a sentry project.
The credentials returned can be used with any Sentry client for up to
24 hours, after which the credentials will be automatically disabled.
If the project doesn't exist it will be created, and assigned to the
initial team configured for this component. Contact a Sentry admin
to have the project transferred to a team you have access to if needed
This method gives output: ``v1/sentry-dsn-response.json#``
This method is ``stable``
"""
return self._makeApiCall(self.funcinfo["sentryDSN"], *args, **kwargs)
def statsumToken(self, *args, **kwargs):
"""
Get Token for Statsum Project
Get temporary `token` and `baseUrl` for sending metrics to statsum.
The token is valid for 24 hours, clients should refresh after expiration.
This method gives output: ``v1/statsum-token-response.json#``
This method is ``stable``
"""
return self._makeApiCall(self.funcinfo["statsumToken"], *args, **kwargs)
def webhooktunnelToken(self, *args, **kwargs):
"""
Get Token for Webhooktunnel Proxy
Get temporary `token` and `id` for connecting to webhooktunnel
The token is valid for 96 hours, clients should refresh after expiration.
This method gives output: ``v1/webhooktunnel-token-response.json#``
This method is ``stable``
"""
return self._makeApiCall(self.funcinfo["webhooktunnelToken"], *args, **kwargs)
def authenticateHawk(self, *args, **kwargs):
"""
Authenticate Hawk Request
Validate the request signature given on input and return list of scopes
that the authenticating client has.
This method is used by other services that wish rely on Taskcluster
credentials for authentication. This way we can use Hawk without having
the secret credentials leave this service.
This method takes input: ``v1/authenticate-hawk-request.json#``
This method gives output: ``v1/authenticate-hawk-response.json#``
This method is ``stable``
"""
return self._makeApiCall(self.funcinfo["authenticateHawk"], *args, **kwargs)
def testAuthenticate(self, *args, **kwargs):
"""
Test Authentication
Utility method to test client implementations of Taskcluster
authentication.
Rather than using real credentials, this endpoint accepts requests with
clientId `tester` and accessToken `no-secret`. That client's scopes are
based on `clientScopes` in the request body.
The request is validated, with any certificate, authorizedScopes, etc.
applied, and the resulting scopes are checked against `requiredScopes`
from the request body. On success, the response contains the clientId
and scopes as seen by the API method.
This method takes input: ``v1/test-authenticate-request.json#``
This method gives output: ``v1/test-authenticate-response.json#``
This method is ``stable``
"""
return self._makeApiCall(self.funcinfo["testAuthenticate"], *args, **kwargs)
def testAuthenticateGet(self, *args, **kwargs):
"""
Test Authentication (GET)
Utility method similar to `testAuthenticate`, but with the GET method,
so it can be used with signed URLs (bewits).
Rather than using real credentials, this endpoint accepts requests with
clientId `tester` and accessToken `no-secret`. That client's scopes are
`['test:*', 'auth:create-client:test:*']`. The call fails if the
`test:authenticate-get` scope is not available.
The request is validated, with any certificate, authorizedScopes, etc.
applied, and the resulting scopes are checked, just like any API call.
On success, the response contains the clientId and scopes as seen by
the API method.
This method may later be extended to allow specification of client and
required scopes via query arguments.
This method gives output: ``v1/test-authenticate-response.json#``
This method is ``stable``
"""
return self._makeApiCall(self.funcinfo["testAuthenticateGet"], *args, **kwargs)
funcinfo = {
"authenticateHawk": {
'args': [],
'input': 'v1/authenticate-hawk-request.json#',
'method': 'post',
'name': 'authenticateHawk',
'output': 'v1/authenticate-hawk-response.json#',
'route': '/authenticate-hawk',
'stability': 'stable',
},
"awsS3Credentials": {
'args': ['level', 'bucket', 'prefix'],
'method': 'get',
'name': 'awsS3Credentials',
'output': 'v1/aws-s3-credentials-response.json#',
'query': ['format'],
'route': '/aws/s3/<level>/<bucket>/<prefix>',
'stability': 'stable',
},
"azureAccounts": {
'args': [],
'method': 'get',
'name': 'azureAccounts',
'output': 'v1/azure-account-list-response.json#',
'route': '/azure/accounts',
'stability': 'stable',
},
"azureContainerSAS": {
'args': ['account', 'container', 'level'],
'method': 'get',
'name': 'azureContainerSAS',
'output': 'v1/azure-container-response.json#',
'route': '/azure/<account>/containers/<container>/<level>',
'stability': 'stable',
},
"azureContainers": {
'args': ['account'],
'method': 'get',
'name': 'azureContainers',
'output': 'v1/azure-container-list-response.json#',
'query': ['continuationToken'],
'route': '/azure/<account>/containers',
'stability': 'stable',
},
"azureTableSAS": {
'args': ['account', 'table', 'level'],
'method': 'get',
'name': 'azureTableSAS',
'output': 'v1/azure-table-access-response.json#',
'route': '/azure/<account>/table/<table>/<level>',
'stability': 'stable',
},
"azureTables": {
'args': ['account'],
'method': 'get',
'name': 'azureTables',
'output': 'v1/azure-table-list-response.json#',
'query': ['continuationToken'],
'route': '/azure/<account>/tables',
'stability': 'stable',
},
"client": {
'args': ['clientId'],
'method': 'get',
'name': 'client',
'output': 'v1/get-client-response.json#',
'route': '/clients/<clientId>',
'stability': 'stable',
},
"createClient": {
'args': ['clientId'],
'input': 'v1/create-client-request.json#',
'method': 'put',
'name': 'createClient',
'output': 'v1/create-client-response.json#',
'route': '/clients/<clientId>',
'stability': 'stable',
},
"createRole": {
'args': ['roleId'],
'input': 'v1/create-role-request.json#',
'method': 'put',
'name': 'createRole',
'output': 'v1/get-role-response.json#',
'route': '/roles/<roleId>',
'stability': 'stable',
},
"currentScopes": {
'args': [],
'method': 'get',
'name': 'currentScopes',
'output': 'v1/scopeset.json#',
'route': '/scopes/current',
'stability': 'stable',
},
"deleteClient": {
'args': ['clientId'],
'method': 'delete',
'name': 'deleteClient',
'route': '/clients/<clientId>',
'stability': 'stable',
},
"deleteRole": {
'args': ['roleId'],
'method': 'delete',
'name': 'deleteRole',
'route': '/roles/<roleId>',
'stability': 'stable',
},
"disableClient": {
'args': ['clientId'],
'method': 'post',
'name': 'disableClient',
'output': 'v1/get-client-response.json#',
'route': '/clients/<clientId>/disable',
'stability': 'stable',
},
"enableClient": {
'args': ['clientId'],
'method': 'post',
'name': 'enableClient',
'output': 'v1/get-client-response.json#',
'route': '/clients/<clientId>/enable',
'stability': 'stable',
},
"expandScopes": {
'args': [],
'input': 'v1/scopeset.json#',
'method': 'post',
'name': 'expandScopes',
'output': 'v1/scopeset.json#',
'route': '/scopes/expand',
'stability': 'stable',
},
"expandScopesGet": {
'args': [],
'input': 'v1/scopeset.json#',
'method': 'get',
'name': 'expandScopesGet',
'output': 'v1/scopeset.json#',
'route': '/scopes/expand',
'stability': 'deprecated',
},
"listClients": {
'args': [],
'method': 'get',
'name': 'listClients',
'output': 'v1/list-clients-response.json#',
'query': ['prefix', 'continuationToken', 'limit'],
'route': '/clients/',
'stability': 'stable',
},
"listRoles": {
'args': [],
'method': 'get',
'name': 'listRoles',
'output': 'v1/list-roles-response.json#',
'route': '/roles/',
'stability': 'stable',
},
"ping": {
'args': [],
'method': 'get',
'name': 'ping',
'route': '/ping',
'stability': 'stable',
},
"resetAccessToken": {
'args': ['clientId'],
'method': 'post',
'name': 'resetAccessToken',
'output': 'v1/create-client-response.json#',
'route': '/clients/<clientId>/reset',
'stability': 'stable',
},
"role": {
'args': ['roleId'],
'method': 'get',
'name': 'role',
'output': 'v1/get-role-response.json#',
'route': '/roles/<roleId>',
'stability': 'stable',
},
"sentryDSN": {
'args': ['project'],
'method': 'get',
'name': 'sentryDSN',
'output': 'v1/sentry-dsn-response.json#',
'route': '/sentry/<project>/dsn',
'stability': 'stable',
},
"statsumToken": {
'args': ['project'],
'method': 'get',
'name': 'statsumToken',
'output': 'v1/statsum-token-response.json#',
'route': '/statsum/<project>/token',
'stability': 'stable',
},
"testAuthenticate": {
'args': [],
'input': 'v1/test-authenticate-request.json#',
'method': 'post',
'name': 'testAuthenticate',
'output': 'v1/test-authenticate-response.json#',
'route': '/test-authenticate',
'stability': 'stable',
},
"testAuthenticateGet": {
'args': [],
'method': 'get',
'name': 'testAuthenticateGet',
'output': 'v1/test-authenticate-response.json#',
'route': '/test-authenticate-get/',
'stability': 'stable',
},
"updateClient": {
'args': ['clientId'],
'input': 'v1/create-client-request.json#',
'method': 'post',
'name': 'updateClient',
'output': 'v1/get-client-response.json#',
'route': '/clients/<clientId>',
'stability': 'stable',
},
"updateRole": {
'args': ['roleId'],
'input': 'v1/create-role-request.json#',
'method': 'post',
'name': 'updateRole',
'output': 'v1/get-role-response.json#',
'route': '/roles/<roleId>',
'stability': 'stable',
},
"webhooktunnelToken": {
'args': [],
'method': 'get',
'name': 'webhooktunnelToken',
'output': 'v1/webhooktunnel-token-response.json#',
'route': '/webhooktunnel',
'stability': 'stable',
},
}
__all__ = ['createTemporaryCredentials', 'config', '_defaultConfig', 'createApiClient', 'createSession', 'Auth']

View File

@ -0,0 +1,178 @@
# coding=utf-8
#####################################################
# THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT #
#####################################################
# noqa: E128,E201
from .client import BaseClient
from .client import createApiClient
from .client import config
from .client import createTemporaryCredentials
from .client import createSession
_defaultConfig = config
class AuthEvents(BaseClient):
"""
The auth service, typically available at `auth.taskcluster.net`
is responsible for storing credentials, managing assignment of scopes,
and validation of request signatures from other services.
These exchanges provides notifications when credentials or roles are
updated. This is mostly so that multiple instances of the auth service
can purge their caches and synchronize state. But you are of course
welcome to use these for other purposes, monitoring changes for example.
"""
classOptions = {
"exchangePrefix": "exchange/taskcluster-auth/v1/"
}
def clientCreated(self, *args, **kwargs):
"""
Client Created Messages
Message that a new client has been created.
This exchange outputs: ``v1/client-message.json#``This exchange takes the following keys:
* reserved: Space reserved for future routing-key entries, you should always match this entry with `#`. As automatically done by our tooling, if not specified.
"""
ref = {
'exchange': 'client-created',
'name': 'clientCreated',
'routingKey': [
{
'multipleWords': True,
'name': 'reserved',
},
],
'schema': 'v1/client-message.json#',
}
return self._makeTopicExchange(ref, *args, **kwargs)
def clientUpdated(self, *args, **kwargs):
"""
Client Updated Messages
Message that a new client has been updated.
This exchange outputs: ``v1/client-message.json#``This exchange takes the following keys:
* reserved: Space reserved for future routing-key entries, you should always match this entry with `#`. As automatically done by our tooling, if not specified.
"""
ref = {
'exchange': 'client-updated',
'name': 'clientUpdated',
'routingKey': [
{
'multipleWords': True,
'name': 'reserved',
},
],
'schema': 'v1/client-message.json#',
}
return self._makeTopicExchange(ref, *args, **kwargs)
def clientDeleted(self, *args, **kwargs):
"""
Client Deleted Messages
Message that a new client has been deleted.
This exchange outputs: ``v1/client-message.json#``This exchange takes the following keys:
* reserved: Space reserved for future routing-key entries, you should always match this entry with `#`. As automatically done by our tooling, if not specified.
"""
ref = {
'exchange': 'client-deleted',
'name': 'clientDeleted',
'routingKey': [
{
'multipleWords': True,
'name': 'reserved',
},
],
'schema': 'v1/client-message.json#',
}
return self._makeTopicExchange(ref, *args, **kwargs)
def roleCreated(self, *args, **kwargs):
"""
Role Created Messages
Message that a new role has been created.
This exchange outputs: ``v1/role-message.json#``This exchange takes the following keys:
* reserved: Space reserved for future routing-key entries, you should always match this entry with `#`. As automatically done by our tooling, if not specified.
"""
ref = {
'exchange': 'role-created',
'name': 'roleCreated',
'routingKey': [
{
'multipleWords': True,
'name': 'reserved',
},
],
'schema': 'v1/role-message.json#',
}
return self._makeTopicExchange(ref, *args, **kwargs)
def roleUpdated(self, *args, **kwargs):
"""
Role Updated Messages
Message that a new role has been updated.
This exchange outputs: ``v1/role-message.json#``This exchange takes the following keys:
* reserved: Space reserved for future routing-key entries, you should always match this entry with `#`. As automatically done by our tooling, if not specified.
"""
ref = {
'exchange': 'role-updated',
'name': 'roleUpdated',
'routingKey': [
{
'multipleWords': True,
'name': 'reserved',
},
],
'schema': 'v1/role-message.json#',
}
return self._makeTopicExchange(ref, *args, **kwargs)
def roleDeleted(self, *args, **kwargs):
"""
Role Deleted Messages
Message that a new role has been deleted.
This exchange outputs: ``v1/role-message.json#``This exchange takes the following keys:
* reserved: Space reserved for future routing-key entries, you should always match this entry with `#`. As automatically done by our tooling, if not specified.
"""
ref = {
'exchange': 'role-deleted',
'name': 'roleDeleted',
'routingKey': [
{
'multipleWords': True,
'name': 'reserved',
},
],
'schema': 'v1/role-message.json#',
}
return self._makeTopicExchange(ref, *args, **kwargs)
funcinfo = {
}
__all__ = ['createTemporaryCredentials', 'config', '_defaultConfig', 'createApiClient', 'createSession', 'AuthEvents']

View File

@ -0,0 +1,449 @@
# coding=utf-8
#####################################################
# THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT #
#####################################################
# noqa: E128,E201
from .client import BaseClient
from .client import createApiClient
from .client import config
from .client import createTemporaryCredentials
from .client import createSession
_defaultConfig = config
class AwsProvisioner(BaseClient):
"""
The AWS Provisioner is responsible for provisioning instances on EC2 for use in
Taskcluster. The provisioner maintains a set of worker configurations which
can be managed with an API that is typically available at
aws-provisioner.taskcluster.net/v1. This API can also perform basic instance
management tasks in addition to maintaining the internal state of worker type
configuration information.
The Provisioner runs at a configurable interval. Each iteration of the
provisioner fetches a current copy the state that the AWS EC2 api reports. In
each iteration, we ask the Queue how many tasks are pending for that worker
type. Based on the number of tasks pending and the scaling ratio, we may
submit requests for new instances. We use pricing information, capacity and
utility factor information to decide which instance type in which region would
be the optimal configuration.
Each EC2 instance type will declare a capacity and utility factor. Capacity is
the number of tasks that a given machine is capable of running concurrently.
Utility factor is a relative measure of performance between two instance types.
We multiply the utility factor by the spot price to compare instance types and
regions when making the bidding choices.
When a new EC2 instance is instantiated, its user data contains a token in
`securityToken` that can be used with the `getSecret` method to retrieve
the worker's credentials and any needed passwords or other restricted
information. The worker is responsible for deleting the secret after
retrieving it, to prevent dissemination of the secret to other proceses
which can read the instance user data.
"""
classOptions = {
"baseUrl": "https://aws-provisioner.taskcluster.net/v1"
}
def listWorkerTypeSummaries(self, *args, **kwargs):
"""
List worker types with details
Return a list of worker types, including some summary information about
current capacity for each. While this list includes all defined worker types,
there may be running EC2 instances for deleted worker types that are not
included here. The list is unordered.
This method gives output: ``http://schemas.taskcluster.net/aws-provisioner/v1/list-worker-types-summaries-response.json#``
This method is ``stable``
"""
return self._makeApiCall(self.funcinfo["listWorkerTypeSummaries"], *args, **kwargs)
def createWorkerType(self, *args, **kwargs):
"""
Create new Worker Type
Create a worker type. A worker type contains all the configuration
needed for the provisioner to manage the instances. Each worker type
knows which regions and which instance types are allowed for that
worker type. Remember that Capacity is the number of concurrent tasks
that can be run on a given EC2 resource and that Utility is the relative
performance rate between different instance types. There is no way to
configure different regions to have different sets of instance types
so ensure that all instance types are available in all regions.
This function is idempotent.
Once a worker type is in the provisioner, a back ground process will
begin creating instances for it based on its capacity bounds and its
pending task count from the Queue. It is the worker's responsibility
to shut itself down. The provisioner has a limit (currently 96hours)
for all instances to prevent zombie instances from running indefinitely.
The provisioner will ensure that all instances created are tagged with
aws resource tags containing the provisioner id and the worker type.
If provided, the secrets in the global, region and instance type sections
are available using the secrets api. If specified, the scopes provided
will be used to generate a set of temporary credentials available with
the other secrets.
This method takes input: ``http://schemas.taskcluster.net/aws-provisioner/v1/create-worker-type-request.json#``
This method gives output: ``http://schemas.taskcluster.net/aws-provisioner/v1/get-worker-type-response.json#``
This method is ``stable``
"""
return self._makeApiCall(self.funcinfo["createWorkerType"], *args, **kwargs)
def updateWorkerType(self, *args, **kwargs):
"""
Update Worker Type
Provide a new copy of a worker type to replace the existing one.
This will overwrite the existing worker type definition if there
is already a worker type of that name. This method will return a
200 response along with a copy of the worker type definition created
Note that if you are using the result of a GET on the worker-type
end point that you will need to delete the lastModified and workerType
keys from the object returned, since those fields are not allowed
the request body for this method
Otherwise, all input requirements and actions are the same as the
create method.
This method takes input: ``http://schemas.taskcluster.net/aws-provisioner/v1/create-worker-type-request.json#``
This method gives output: ``http://schemas.taskcluster.net/aws-provisioner/v1/get-worker-type-response.json#``
This method is ``stable``
"""
return self._makeApiCall(self.funcinfo["updateWorkerType"], *args, **kwargs)
def workerTypeLastModified(self, *args, **kwargs):
"""
Get Worker Type Last Modified Time
This method is provided to allow workers to see when they were
last modified. The value provided through UserData can be
compared against this value to see if changes have been made
If the worker type definition has not been changed, the date
should be identical as it is the same stored value.
This method gives output: ``http://schemas.taskcluster.net/aws-provisioner/v1/get-worker-type-last-modified.json#``
This method is ``stable``
"""
return self._makeApiCall(self.funcinfo["workerTypeLastModified"], *args, **kwargs)
def workerType(self, *args, **kwargs):
"""
Get Worker Type
Retrieve a copy of the requested worker type definition.
This copy contains a lastModified field as well as the worker
type name. As such, it will require manipulation to be able to
use the results of this method to submit date to the update
method.
This method gives output: ``http://schemas.taskcluster.net/aws-provisioner/v1/get-worker-type-response.json#``
This method is ``stable``
"""
return self._makeApiCall(self.funcinfo["workerType"], *args, **kwargs)
def removeWorkerType(self, *args, **kwargs):
"""
Delete Worker Type
Delete a worker type definition. This method will only delete
the worker type definition from the storage table. The actual
deletion will be handled by a background worker. As soon as this
method is called for a worker type, the background worker will
immediately submit requests to cancel all spot requests for this
worker type as well as killing all instances regardless of their
state. If you want to gracefully remove a worker type, you must
either ensure that no tasks are created with that worker type name
or you could theoretically set maxCapacity to 0, though, this is
not a supported or tested action
This method is ``stable``
"""
return self._makeApiCall(self.funcinfo["removeWorkerType"], *args, **kwargs)
def listWorkerTypes(self, *args, **kwargs):
"""
List Worker Types
Return a list of string worker type names. These are the names
of all managed worker types known to the provisioner. This does
not include worker types which are left overs from a deleted worker
type definition but are still running in AWS.
This method gives output: ``http://schemas.taskcluster.net/aws-provisioner/v1/list-worker-types-response.json#``
This method is ``stable``
"""
return self._makeApiCall(self.funcinfo["listWorkerTypes"], *args, **kwargs)
def createSecret(self, *args, **kwargs):
"""
Create new Secret
Insert a secret into the secret storage. The supplied secrets will
be provided verbatime via `getSecret`, while the supplied scopes will
be converted into credentials by `getSecret`.
This method is not ordinarily used in production; instead, the provisioner
creates a new secret directly for each spot bid.
This method takes input: ``http://schemas.taskcluster.net/aws-provisioner/v1/create-secret-request.json#``
This method is ``stable``
"""
return self._makeApiCall(self.funcinfo["createSecret"], *args, **kwargs)
def getSecret(self, *args, **kwargs):
"""
Get a Secret
Retrieve a secret from storage. The result contains any passwords or
other restricted information verbatim as well as a temporary credential
based on the scopes specified when the secret was created.
It is important that this secret is deleted by the consumer (`removeSecret`),
or else the secrets will be visible to any process which can access the
user data associated with the instance.
This method gives output: ``http://schemas.taskcluster.net/aws-provisioner/v1/get-secret-response.json#``
This method is ``stable``
"""
return self._makeApiCall(self.funcinfo["getSecret"], *args, **kwargs)
def instanceStarted(self, *args, **kwargs):
"""
Report an instance starting
An instance will report in by giving its instance id as well
as its security token. The token is given and checked to ensure
that it matches a real token that exists to ensure that random
machines do not check in. We could generate a different token
but that seems like overkill
This method is ``stable``
"""
return self._makeApiCall(self.funcinfo["instanceStarted"], *args, **kwargs)
def removeSecret(self, *args, **kwargs):
"""
Remove a Secret
Remove a secret. After this call, a call to `getSecret` with the given
token will return no information.
It is very important that the consumer of a
secret delete the secret from storage before handing over control
to untrusted processes to prevent credential and/or secret leakage.
This method is ``stable``
"""
return self._makeApiCall(self.funcinfo["removeSecret"], *args, **kwargs)
def getLaunchSpecs(self, *args, **kwargs):
"""
Get All Launch Specifications for WorkerType
This method returns a preview of all possible launch specifications
that this worker type definition could submit to EC2. It is used to
test worker types, nothing more
**This API end-point is experimental and may be subject to change without warning.**
This method gives output: ``http://schemas.taskcluster.net/aws-provisioner/v1/get-launch-specs-response.json#``
This method is ``experimental``
"""
return self._makeApiCall(self.funcinfo["getLaunchSpecs"], *args, **kwargs)
def state(self, *args, **kwargs):
"""
Get AWS State for a worker type
Return the state of a given workertype as stored by the provisioner.
This state is stored as three lists: 1 for running instances, 1 for
pending requests. The `summary` property contains an updated summary
similar to that returned from `listWorkerTypeSummaries`.
This method is ``stable``
"""
return self._makeApiCall(self.funcinfo["state"], *args, **kwargs)
def backendStatus(self, *args, **kwargs):
"""
Backend Status
This endpoint is used to show when the last time the provisioner
has checked in. A check in is done through the deadman's snitch
api. It is done at the conclusion of a provisioning iteration
and used to tell if the background provisioning process is still
running.
**Warning** this api end-point is **not stable**.
This method gives output: ``http://schemas.taskcluster.net/aws-provisioner/v1/backend-status-response.json#``
This method is ``experimental``
"""
return self._makeApiCall(self.funcinfo["backendStatus"], *args, **kwargs)
def ping(self, *args, **kwargs):
"""
Ping Server
Respond without doing anything.
This endpoint is used to check that the service is up.
This method is ``stable``
"""
return self._makeApiCall(self.funcinfo["ping"], *args, **kwargs)
funcinfo = {
"backendStatus": {
'args': [],
'method': 'get',
'name': 'backendStatus',
'output': 'http://schemas.taskcluster.net/aws-provisioner/v1/backend-status-response.json#',
'route': '/backend-status',
'stability': 'experimental',
},
"createSecret": {
'args': ['token'],
'input': 'http://schemas.taskcluster.net/aws-provisioner/v1/create-secret-request.json#',
'method': 'put',
'name': 'createSecret',
'route': '/secret/<token>',
'stability': 'stable',
},
"createWorkerType": {
'args': ['workerType'],
'input': 'http://schemas.taskcluster.net/aws-provisioner/v1/create-worker-type-request.json#',
'method': 'put',
'name': 'createWorkerType',
'output': 'http://schemas.taskcluster.net/aws-provisioner/v1/get-worker-type-response.json#',
'route': '/worker-type/<workerType>',
'stability': 'stable',
},
"getLaunchSpecs": {
'args': ['workerType'],
'method': 'get',
'name': 'getLaunchSpecs',
'output': 'http://schemas.taskcluster.net/aws-provisioner/v1/get-launch-specs-response.json#',
'route': '/worker-type/<workerType>/launch-specifications',
'stability': 'experimental',
},
"getSecret": {
'args': ['token'],
'method': 'get',
'name': 'getSecret',
'output': 'http://schemas.taskcluster.net/aws-provisioner/v1/get-secret-response.json#',
'route': '/secret/<token>',
'stability': 'stable',
},
"instanceStarted": {
'args': ['instanceId', 'token'],
'method': 'get',
'name': 'instanceStarted',
'route': '/instance-started/<instanceId>/<token>',
'stability': 'stable',
},
"listWorkerTypeSummaries": {
'args': [],
'method': 'get',
'name': 'listWorkerTypeSummaries',
'output': 'http://schemas.taskcluster.net/aws-provisioner/v1/list-worker-types-summaries-response.json#',
'route': '/list-worker-type-summaries',
'stability': 'stable',
},
"listWorkerTypes": {
'args': [],
'method': 'get',
'name': 'listWorkerTypes',
'output': 'http://schemas.taskcluster.net/aws-provisioner/v1/list-worker-types-response.json#',
'route': '/list-worker-types',
'stability': 'stable',
},
"ping": {
'args': [],
'method': 'get',
'name': 'ping',
'route': '/ping',
'stability': 'stable',
},
"removeSecret": {
'args': ['token'],
'method': 'delete',
'name': 'removeSecret',
'route': '/secret/<token>',
'stability': 'stable',
},
"removeWorkerType": {
'args': ['workerType'],
'method': 'delete',
'name': 'removeWorkerType',
'route': '/worker-type/<workerType>',
'stability': 'stable',
},
"state": {
'args': ['workerType'],
'method': 'get',
'name': 'state',
'route': '/state/<workerType>',
'stability': 'stable',
},
"updateWorkerType": {
'args': ['workerType'],
'input': 'http://schemas.taskcluster.net/aws-provisioner/v1/create-worker-type-request.json#',
'method': 'post',
'name': 'updateWorkerType',
'output': 'http://schemas.taskcluster.net/aws-provisioner/v1/get-worker-type-response.json#',
'route': '/worker-type/<workerType>/update',
'stability': 'stable',
},
"workerType": {
'args': ['workerType'],
'method': 'get',
'name': 'workerType',
'output': 'http://schemas.taskcluster.net/aws-provisioner/v1/get-worker-type-response.json#',
'route': '/worker-type/<workerType>',
'stability': 'stable',
},
"workerTypeLastModified": {
'args': ['workerType'],
'method': 'get',
'name': 'workerTypeLastModified',
'output': 'http://schemas.taskcluster.net/aws-provisioner/v1/get-worker-type-last-modified.json#',
'route': '/worker-type-last-modified/<workerType>',
'stability': 'stable',
},
}
__all__ = ['createTemporaryCredentials', 'config', '_defaultConfig', 'createApiClient', 'createSession', 'AwsProvisioner']

View File

@ -0,0 +1,141 @@
# coding=utf-8
#####################################################
# THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT #
#####################################################
# noqa: E128,E201
from .client import BaseClient
from .client import createApiClient
from .client import config
from .client import createTemporaryCredentials
from .client import createSession
_defaultConfig = config
class AwsProvisionerEvents(BaseClient):
"""
Exchanges from the provisioner... more docs later
"""
classOptions = {
"exchangePrefix": "exchange/taskcluster-aws-provisioner/v1/"
}
def workerTypeCreated(self, *args, **kwargs):
"""
WorkerType Created Message
When a new `workerType` is created a message will be published to this
exchange.
This exchange outputs: ``http://schemas.taskcluster.net/aws-provisioner/v1/worker-type-message.json#``This exchange takes the following keys:
* routingKeyKind: Identifier for the routing-key kind. This is always `'primary'` for the formalized routing key. (required)
* workerType: WorkerType that this message concerns. (required)
* reserved: Space reserved for future routing-key entries, you should always match this entry with `#`. As automatically done by our tooling, if not specified.
"""
ref = {
'exchange': 'worker-type-created',
'name': 'workerTypeCreated',
'routingKey': [
{
'constant': 'primary',
'multipleWords': False,
'name': 'routingKeyKind',
},
{
'multipleWords': False,
'name': 'workerType',
},
{
'multipleWords': True,
'name': 'reserved',
},
],
'schema': 'http://schemas.taskcluster.net/aws-provisioner/v1/worker-type-message.json#',
}
return self._makeTopicExchange(ref, *args, **kwargs)
def workerTypeUpdated(self, *args, **kwargs):
"""
WorkerType Updated Message
When a `workerType` is updated a message will be published to this
exchange.
This exchange outputs: ``http://schemas.taskcluster.net/aws-provisioner/v1/worker-type-message.json#``This exchange takes the following keys:
* routingKeyKind: Identifier for the routing-key kind. This is always `'primary'` for the formalized routing key. (required)
* workerType: WorkerType that this message concerns. (required)
* reserved: Space reserved for future routing-key entries, you should always match this entry with `#`. As automatically done by our tooling, if not specified.
"""
ref = {
'exchange': 'worker-type-updated',
'name': 'workerTypeUpdated',
'routingKey': [
{
'constant': 'primary',
'multipleWords': False,
'name': 'routingKeyKind',
},
{
'multipleWords': False,
'name': 'workerType',
},
{
'multipleWords': True,
'name': 'reserved',
},
],
'schema': 'http://schemas.taskcluster.net/aws-provisioner/v1/worker-type-message.json#',
}
return self._makeTopicExchange(ref, *args, **kwargs)
def workerTypeRemoved(self, *args, **kwargs):
"""
WorkerType Removed Message
When a `workerType` is removed a message will be published to this
exchange.
This exchange outputs: ``http://schemas.taskcluster.net/aws-provisioner/v1/worker-type-message.json#``This exchange takes the following keys:
* routingKeyKind: Identifier for the routing-key kind. This is always `'primary'` for the formalized routing key. (required)
* workerType: WorkerType that this message concerns. (required)
* reserved: Space reserved for future routing-key entries, you should always match this entry with `#`. As automatically done by our tooling, if not specified.
"""
ref = {
'exchange': 'worker-type-removed',
'name': 'workerTypeRemoved',
'routingKey': [
{
'constant': 'primary',
'multipleWords': False,
'name': 'routingKeyKind',
},
{
'multipleWords': False,
'name': 'workerType',
},
{
'multipleWords': True,
'name': 'reserved',
},
],
'schema': 'http://schemas.taskcluster.net/aws-provisioner/v1/worker-type-message.json#',
}
return self._makeTopicExchange(ref, *args, **kwargs)
funcinfo = {
}
__all__ = ['createTemporaryCredentials', 'config', '_defaultConfig', 'createApiClient', 'createSession', 'AwsProvisionerEvents']

View File

@ -0,0 +1,691 @@
"""This module is used to interact with taskcluster rest apis"""
from __future__ import absolute_import, division, print_function
import os
import json
import logging
import copy
import hashlib
import hmac
import datetime
import calendar
import requests
import time
import six
import warnings
from six.moves import urllib
import mohawk
import mohawk.bewit
import taskcluster.exceptions as exceptions
import taskcluster.utils as utils
log = logging.getLogger(__name__)
# Default configuration
_defaultConfig = config = {
'credentials': {
'clientId': os.environ.get('TASKCLUSTER_CLIENT_ID'),
'accessToken': os.environ.get('TASKCLUSTER_ACCESS_TOKEN'),
'certificate': os.environ.get('TASKCLUSTER_CERTIFICATE'),
},
'maxRetries': 5,
'signedUrlExpiration': 15 * 60,
}
def createSession(*args, **kwargs):
""" Create a new requests session. This passes through all positional and
keyword arguments to the requests.Session() constructor
"""
return requests.Session(*args, **kwargs)
class BaseClient(object):
""" Base Class for API Client Classes. Each individual Client class
needs to set up its own methods for REST endpoints and Topic Exchange
routing key patterns. The _makeApiCall() and _topicExchange() methods
help with this.
"""
def __init__(self, options=None, session=None):
o = copy.deepcopy(self.classOptions)
o.update(_defaultConfig)
if options:
o.update(options)
credentials = o.get('credentials')
if credentials:
for x in ('accessToken', 'clientId', 'certificate'):
value = credentials.get(x)
if value and not isinstance(value, six.binary_type):
try:
credentials[x] = credentials[x].encode('ascii')
except:
s = '%s (%s) must be unicode encodable' % (x, credentials[x])
raise exceptions.TaskclusterAuthFailure(s)
self.options = o
if 'credentials' in o:
log.debug('credentials key scrubbed from logging output')
log.debug(dict((k, v) for k, v in o.items() if k != 'credentials'))
if session:
self.session = session
else:
self.session = self._createSession()
def _createSession(self):
""" Create a requests session.
Helper method which can be overridden by child classes.
"""
return createSession()
def makeHawkExt(self):
""" Make an 'ext' for Hawk authentication """
o = self.options
c = o.get('credentials', {})
if c.get('clientId') and c.get('accessToken'):
ext = {}
cert = c.get('certificate')
if cert:
if six.PY3 and isinstance(cert, six.binary_type):
cert = cert.decode()
if isinstance(cert, six.string_types):
cert = json.loads(cert)
ext['certificate'] = cert
if 'authorizedScopes' in o:
ext['authorizedScopes'] = o['authorizedScopes']
# .encode('base64') inserts a newline, which hawk doesn't
# like but doesn't strip itself
return utils.makeB64UrlSafe(utils.encodeStringForB64Header(utils.dumpJson(ext)).strip())
else:
return {}
def _makeTopicExchange(self, entry, *args, **kwargs):
if len(args) == 0 and not kwargs:
routingKeyPattern = {}
elif len(args) >= 1:
if kwargs or len(args) != 1:
errStr = 'Pass either a string, single dictionary or only kwargs'
raise exceptions.TaskclusterTopicExchangeFailure(errStr)
routingKeyPattern = args[0]
else:
routingKeyPattern = kwargs
data = {
'exchange': '%s/%s' % (self.options['exchangePrefix'].rstrip('/'),
entry['exchange'].lstrip('/'))
}
# If we are passed in a string, we can short-circuit this function
if isinstance(routingKeyPattern, six.string_types):
log.debug('Passing through string for topic exchange key')
data['routingKeyPattern'] = routingKeyPattern
return data
if type(routingKeyPattern) != dict:
errStr = 'routingKeyPattern must eventually be a dict'
raise exceptions.TaskclusterTopicExchangeFailure(errStr)
if not routingKeyPattern:
routingKeyPattern = {}
# There is no canonical meaning for the maxSize and required
# reference entry in the JS client, so we don't try to define
# them here, even though they sound pretty obvious
routingKey = []
for key in entry['routingKey']:
if 'constant' in key:
value = key['constant']
elif key['name'] in routingKeyPattern:
log.debug('Found %s in routing key params', key['name'])
value = str(routingKeyPattern[key['name']])
if not key.get('multipleWords') and '.' in value:
raise exceptions.TaskclusterTopicExchangeFailure(
'Cannot have periods in single word keys')
else:
value = '#' if key.get('multipleWords') else '*'
log.debug('Did not find %s in input params, using %s', key['name'], value)
routingKey.append(value)
data['routingKeyPattern'] = '.'.join([str(x) for x in routingKey])
return data
def buildUrl(self, methodName, *args, **kwargs):
entry = self.funcinfo.get(methodName)
if not entry:
raise exceptions.TaskclusterFailure(
'Requested method "%s" not found in API Reference' % methodName)
routeParams, _, query, _, _ = self._processArgs(entry, *args, **kwargs)
route = self._subArgsInRoute(entry, routeParams)
if query:
route += '?' + urllib.parse.urlencode(query)
return self._joinBaseUrlAndRoute(route)
def buildSignedUrl(self, methodName, *args, **kwargs):
""" Build a signed URL. This URL contains the credentials needed to access
a resource."""
if 'expiration' in kwargs:
expiration = kwargs['expiration']
del kwargs['expiration']
else:
expiration = self.options['signedUrlExpiration']
expiration = int(time.time() + expiration) # Mainly so that we throw if it's not a number
requestUrl = self.buildUrl(methodName, *args, **kwargs)
if not self._hasCredentials():
raise exceptions.TaskclusterAuthFailure('Invalid Hawk Credentials')
clientId = utils.toStr(self.options['credentials']['clientId'])
accessToken = utils.toStr(self.options['credentials']['accessToken'])
def genBewit():
# We need to fix the output of get_bewit. It returns a url-safe base64
# encoded string, which contains a list of tokens separated by '\'.
# The first one is the clientId, the second is an int, the third is
# url-safe base64 encoded MAC, the fourth is the ext param.
# The problem is that the nested url-safe base64 encoded MAC must be
# base64 (i.e. not url safe) or server-side will complain.
# id + '\\' + exp + '\\' + mac + '\\' + options.ext;
resource = mohawk.base.Resource(
credentials={
'id': clientId,
'key': accessToken,
'algorithm': 'sha256',
},
method='GET',
ext=utils.toStr(self.makeHawkExt()),
url=requestUrl,
timestamp=expiration,
nonce='',
# content='',
# content_type='',
)
bewit = mohawk.bewit.get_bewit(resource)
return bewit.rstrip('=')
bewit = genBewit()
if not bewit:
raise exceptions.TaskclusterFailure('Did not receive a bewit')
u = urllib.parse.urlparse(requestUrl)
qs = u.query
if qs:
qs += '&'
qs += 'bewit=%s' % bewit
return urllib.parse.urlunparse((
u.scheme,
u.netloc,
u.path,
u.params,
qs,
u.fragment,
))
def _joinBaseUrlAndRoute(self, route):
return urllib.parse.urljoin(
'{}/'.format(self.options['baseUrl'].rstrip('/')),
route.lstrip('/')
)
def _makeApiCall(self, entry, *args, **kwargs):
""" This function is used to dispatch calls to other functions
for a given API Reference entry"""
x = self._processArgs(entry, *args, **kwargs)
routeParams, payload, query, paginationHandler, paginationLimit = x
route = self._subArgsInRoute(entry, routeParams)
# TODO: Check for limit being in the Query of the api ref
if paginationLimit and 'limit' in entry.get('query', []):
query['limit'] = paginationLimit
if query:
_route = route + '?' + urllib.parse.urlencode(query)
else:
_route = route
response = self._makeHttpRequest(entry['method'], _route, payload)
if paginationHandler:
paginationHandler(response)
while response.get('continuationToken'):
query['continuationToken'] = response['continuationToken']
_route = route + '?' + urllib.parse.urlencode(query)
response = self._makeHttpRequest(entry['method'], _route, payload)
paginationHandler(response)
else:
return response
def _processArgs(self, entry, *_args, **_kwargs):
""" Given an entry, positional and keyword arguments, figure out what
the query-string options, payload and api arguments are.
"""
# We need the args to be a list so we can mutate them
args = list(_args)
kwargs = copy.deepcopy(_kwargs)
reqArgs = entry['args']
routeParams = {}
query = {}
payload = None
kwApiArgs = {}
paginationHandler = None
paginationLimit = None
# There are three formats for calling methods:
# 1. method(v1, v1, payload)
# 2. method(payload, k1=v1, k2=v2)
# 3. method(payload=payload, query=query, params={k1: v1, k2: v2})
if len(kwargs) == 0:
if 'input' in entry and len(args) == len(reqArgs) + 1:
payload = args.pop()
if len(args) != len(reqArgs):
log.debug(args)
log.debug(reqArgs)
raise exceptions.TaskclusterFailure('Incorrect number of positional arguments')
log.debug('Using method(v1, v2, payload) calling convention')
else:
# We're considering kwargs which are the api route parameters to be
# called 'flat' because they're top level keys. We're special
# casing calls which have only api-arg kwargs and possibly a payload
# value and handling them directly.
isFlatKwargs = True
if len(kwargs) == len(reqArgs):
for arg in reqArgs:
if not kwargs.get(arg, False):
isFlatKwargs = False
break
if 'input' in entry and len(args) != 1:
isFlatKwargs = False
if 'input' not in entry and len(args) != 0:
isFlatKwargs = False
else:
pass # We're using payload=, query= and param=
else:
isFlatKwargs = False
# Now we're going to handle the two types of kwargs. The first is
# 'flat' ones, which are where the api params
if isFlatKwargs:
if 'input' in entry:
payload = args.pop()
kwApiArgs = kwargs
log.debug('Using method(payload, k1=v1, k2=v2) calling convention')
warnings.warn(
"The method(payload, k1=v1, k2=v2) calling convention will soon be deprecated",
PendingDeprecationWarning
)
else:
kwApiArgs = kwargs.get('params', {})
payload = kwargs.get('payload', None)
query = kwargs.get('query', {})
paginationHandler = kwargs.get('paginationHandler', None)
paginationLimit = kwargs.get('paginationLimit', None)
log.debug('Using method(payload=payload, query=query, params={k1: v1, k2: v2}) calling convention')
if 'input' in entry and isinstance(payload, type(None)):
raise exceptions.TaskclusterFailure('Payload is required')
# These all need to be rendered down to a string, let's just check that
# they are up front and fail fast
for arg in args:
if not isinstance(arg, six.string_types) and not isinstance(arg, int):
raise exceptions.TaskclusterFailure(
'Positional arg "%s" to %s is not a string or int' % (arg, entry['name']))
for name, arg in six.iteritems(kwApiArgs):
if not isinstance(arg, six.string_types) and not isinstance(arg, int):
raise exceptions.TaskclusterFailure(
'KW arg "%s: %s" to %s is not a string or int' % (name, arg, entry['name']))
if len(args) > 0 and len(kwApiArgs) > 0:
raise exceptions.TaskclusterFailure('Specify either positional or key word arguments')
# We know for sure that if we don't give enough arguments that the call
# should fail. We don't yet know if we should fail because of two many
# arguments because we might be overwriting positional ones with kw ones
if len(reqArgs) > len(args) + len(kwApiArgs):
raise exceptions.TaskclusterFailure(
'%s takes %d args, only %d were given' % (
entry['name'], len(reqArgs), len(args) + len(kwApiArgs)))
# We also need to error out when we have more positional args than required
# because we'll need to go through the lists of provided and required args
# at the same time. Not disqualifying early means we'll get IndexErrors if
# there are more positional arguments than required
if len(args) > len(reqArgs):
raise exceptions.TaskclusterFailure('%s called with too many positional args',
entry['name'])
i = 0
for arg in args:
log.debug('Found a positional argument: %s', arg)
routeParams[reqArgs[i]] = arg
i += 1
log.debug('After processing positional arguments, we have: %s', routeParams)
routeParams.update(kwApiArgs)
log.debug('After keyword arguments, we have: %s', routeParams)
if len(reqArgs) != len(routeParams):
errMsg = '%s takes %s args, %s given' % (
entry['name'],
','.join(reqArgs),
routeParams.keys())
log.error(errMsg)
raise exceptions.TaskclusterFailure(errMsg)
for reqArg in reqArgs:
if reqArg not in routeParams:
errMsg = '%s requires a "%s" argument which was not provided' % (
entry['name'], reqArg)
log.error(errMsg)
raise exceptions.TaskclusterFailure(errMsg)
return routeParams, payload, query, paginationHandler, paginationLimit
def _subArgsInRoute(self, entry, args):
""" Given a route like "/task/<taskId>/artifacts" and a mapping like
{"taskId": "12345"}, return a string like "/task/12345/artifacts"
"""
route = entry['route']
for arg, val in six.iteritems(args):
toReplace = "<%s>" % arg
if toReplace not in route:
raise exceptions.TaskclusterFailure(
'Arg %s not found in route for %s' % (arg, entry['name']))
val = urllib.parse.quote(str(val).encode("utf-8"), '')
route = route.replace("<%s>" % arg, val)
return route.lstrip('/')
def _hasCredentials(self):
""" Return True, if credentials is given """
cred = self.options.get('credentials')
return (
cred and
'clientId' in cred and
'accessToken' in cred and
cred['clientId'] and
cred['accessToken']
)
def _makeHttpRequest(self, method, route, payload):
""" Make an HTTP Request for the API endpoint. This method wraps
the logic about doing failure retry and passes off the actual work
of doing an HTTP request to another method."""
url = self._joinBaseUrlAndRoute(route)
log.debug('Full URL used is: %s', url)
hawkExt = self.makeHawkExt()
# Serialize payload if given
if payload is not None:
payload = utils.dumpJson(payload)
# Do a loop of retries
retry = -1 # we plus first in the loop, and attempt 1 is retry 0
retries = self.options['maxRetries']
while retry < retries:
retry += 1
# if this isn't the first retry then we sleep
if retry > 0:
time.sleep(utils.calculateSleepTime(retry))
# Construct header
if self._hasCredentials():
sender = mohawk.Sender(
credentials={
'id': self.options['credentials']['clientId'],
'key': self.options['credentials']['accessToken'],
'algorithm': 'sha256',
},
ext=hawkExt if hawkExt else {},
url=url,
content=payload if payload else '',
content_type='application/json' if payload else '',
method=method,
)
headers = {'Authorization': sender.request_header}
else:
log.debug('Not using hawk!')
headers = {}
if payload:
# Set header for JSON if payload is given, note that we serialize
# outside this loop.
headers['Content-Type'] = 'application/json'
log.debug('Making attempt %d', retry)
try:
response = utils.makeSingleHttpRequest(method, url, payload, headers)
except requests.exceptions.RequestException as rerr:
if retry < retries:
log.warn('Retrying because of: %s' % rerr)
continue
# raise a connection exception
raise exceptions.TaskclusterConnectionError(
"Failed to establish connection",
superExc=rerr
)
# Handle non 2xx status code and retry if possible
status = response.status_code
if status == 204:
return None
# Catch retryable errors and go to the beginning of the loop
# to do the retry
if 500 <= status and status < 600 and retry < retries:
log.warn('Retrying because of a %s status code' % status)
continue
# Throw errors for non-retryable errors
if status < 200 or status >= 300:
data = {}
try:
data = response.json()
except:
pass # Ignore JSON errors in error messages
# Find error message
message = "Unknown Server Error"
if isinstance(data, dict):
message = data.get('message')
else:
if status == 401:
message = "Authentication Error"
elif status == 500:
message = "Internal Server Error"
# Raise TaskclusterAuthFailure if this is an auth issue
if status == 401:
raise exceptions.TaskclusterAuthFailure(
message,
status_code=status,
body=data,
superExc=None
)
# Raise TaskclusterRestFailure for all other issues
raise exceptions.TaskclusterRestFailure(
message,
status_code=status,
body=data,
superExc=None
)
# Try to load JSON
try:
return response.json()
except ValueError:
return {"response": response}
# This code-path should be unreachable
assert False, "Error from last retry should have been raised!"
def createApiClient(name, api):
attributes = dict(
name=name,
__doc__=api.get('description'),
classOptions={},
funcinfo={},
)
copiedOptions = ('baseUrl', 'exchangePrefix')
for opt in copiedOptions:
if opt in api['reference']:
attributes['classOptions'][opt] = api['reference'][opt]
for entry in api['reference']['entries']:
if entry['type'] == 'function':
def addApiCall(e):
def apiCall(self, *args, **kwargs):
return self._makeApiCall(e, *args, **kwargs)
return apiCall
f = addApiCall(entry)
docStr = "Call the %s api's %s method. " % (name, entry['name'])
if entry['args'] and len(entry['args']) > 0:
docStr += "This method takes:\n\n"
docStr += '\n'.join(['- ``%s``' % x for x in entry['args']])
docStr += '\n\n'
else:
docStr += "This method takes no arguments. "
if 'input' in entry:
docStr += "This method takes input ``%s``. " % entry['input']
if 'output' in entry:
docStr += "This method gives output ``%s``" % entry['output']
docStr += '\n\nThis method does a ``%s`` to ``%s``.' % (
entry['method'].upper(), entry['route'])
f.__doc__ = docStr
attributes['funcinfo'][entry['name']] = entry
elif entry['type'] == 'topic-exchange':
def addTopicExchange(e):
def topicExchange(self, *args, **kwargs):
return self._makeTopicExchange(e, *args, **kwargs)
return topicExchange
f = addTopicExchange(entry)
docStr = 'Generate a routing key pattern for the %s exchange. ' % entry['exchange']
docStr += 'This method takes a given routing key as a string or a '
docStr += 'dictionary. For each given dictionary key, the corresponding '
docStr += 'routing key token takes its value. For routing key tokens '
docStr += 'which are not specified by the dictionary, the * or # character '
docStr += 'is used depending on whether or not the key allows multiple words.\n\n'
docStr += 'This exchange takes the following keys:\n\n'
docStr += '\n'.join(['- ``%s``' % x['name'] for x in entry['routingKey']])
f.__doc__ = docStr
# Add whichever function we created
f.__name__ = str(entry['name'])
attributes[entry['name']] = f
return type(utils.toStr(name), (BaseClient,), attributes)
def createTemporaryCredentials(clientId, accessToken, start, expiry, scopes, name=None):
""" Create a set of temporary credentials
Callers should not apply any clock skew; clock drift is accounted for by
auth service.
clientId: the issuing clientId
accessToken: the issuer's accessToken
start: start time of credentials (datetime.datetime)
expiry: expiration time of credentials, (datetime.datetime)
scopes: list of scopes granted
name: credential name (optional)
Returns a dictionary in the form:
{ 'clientId': str, 'accessToken: str, 'certificate': str}
"""
for scope in scopes:
if not isinstance(scope, six.string_types):
raise exceptions.TaskclusterFailure('Scope must be string')
# Credentials can only be valid for 31 days. I hope that
# this is validated on the server somehow...
if expiry - start > datetime.timedelta(days=31):
raise exceptions.TaskclusterFailure('Only 31 days allowed')
# We multiply times by 1000 because the auth service is JS and as a result
# uses milliseconds instead of seconds
cert = dict(
version=1,
scopes=scopes,
start=calendar.timegm(start.utctimetuple()) * 1000,
expiry=calendar.timegm(expiry.utctimetuple()) * 1000,
seed=utils.slugId() + utils.slugId(),
)
# if this is a named temporary credential, include the issuer in the certificate
if name:
cert['issuer'] = utils.toStr(clientId)
sig = ['version:' + utils.toStr(cert['version'])]
if name:
sig.extend([
'clientId:' + utils.toStr(name),
'issuer:' + utils.toStr(clientId),
])
sig.extend([
'seed:' + utils.toStr(cert['seed']),
'start:' + utils.toStr(cert['start']),
'expiry:' + utils.toStr(cert['expiry']),
'scopes:'
] + scopes)
sigStr = '\n'.join(sig).encode()
if isinstance(accessToken, six.text_type):
accessToken = accessToken.encode()
sig = hmac.new(accessToken, sigStr, hashlib.sha256).digest()
cert['signature'] = utils.encodeStringForB64Header(sig)
newToken = hmac.new(accessToken, cert['seed'], hashlib.sha256).digest()
newToken = utils.makeB64UrlSafe(utils.encodeStringForB64Header(newToken)).replace(b'=', b'')
return {
'clientId': name or clientId,
'accessToken': newToken,
'certificate': utils.dumpJson(cert),
}
__all__ = [
'createTemporaryCredentials',
'config',
'BaseClient',
'createApiClient',
]

View File

@ -0,0 +1,492 @@
# coding=utf-8
#####################################################
# THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT #
#####################################################
# noqa: E128,E201
from .client import BaseClient
from .client import createApiClient
from .client import config
from .client import createTemporaryCredentials
from .client import createSession
_defaultConfig = config
class EC2Manager(BaseClient):
"""
A taskcluster service which manages EC2 instances. This service does not understand any taskcluster concepts intrinsicaly other than using the name `workerType` to refer to a group of associated instances. Unless you are working on building a provisioner for AWS, you almost certainly do not want to use this service
"""
classOptions = {
"baseUrl": "https://ec2-manager.taskcluster.net/v1"
}
def listWorkerTypes(self, *args, **kwargs):
"""
See the list of worker types which are known to be managed
This method is only for debugging the ec2-manager
This method gives output: ``http://schemas.taskcluster.net/ec2-manager/v1/list-worker-types.json#``
This method is ``experimental``
"""
return self._makeApiCall(self.funcinfo["listWorkerTypes"], *args, **kwargs)
def runInstance(self, *args, **kwargs):
"""
Run an instance
Request an instance of a worker type
This method takes input: ``http://schemas.taskcluster.net/ec2-manager/v1/run-instance-request.json#``
This method is ``experimental``
"""
return self._makeApiCall(self.funcinfo["runInstance"], *args, **kwargs)
def terminateWorkerType(self, *args, **kwargs):
"""
Terminate all resources from a worker type
Terminate all instances for this worker type
This method is ``experimental``
"""
return self._makeApiCall(self.funcinfo["terminateWorkerType"], *args, **kwargs)
def workerTypeStats(self, *args, **kwargs):
"""
Look up the resource stats for a workerType
Return an object which has a generic state description. This only contains counts of instances
This method gives output: ``http://schemas.taskcluster.net/ec2-manager/v1/worker-type-resources.json#``
This method is ``experimental``
"""
return self._makeApiCall(self.funcinfo["workerTypeStats"], *args, **kwargs)
def workerTypeHealth(self, *args, **kwargs):
"""
Look up the resource health for a workerType
Return a view of the health of a given worker type
This method gives output: ``http://schemas.taskcluster.net/ec2-manager/v1/health.json#``
This method is ``experimental``
"""
return self._makeApiCall(self.funcinfo["workerTypeHealth"], *args, **kwargs)
def workerTypeErrors(self, *args, **kwargs):
"""
Look up the most recent errors of a workerType
Return a list of the most recent errors encountered by a worker type
This method gives output: ``http://schemas.taskcluster.net/ec2-manager/v1/errors.json#``
This method is ``experimental``
"""
return self._makeApiCall(self.funcinfo["workerTypeErrors"], *args, **kwargs)
def workerTypeState(self, *args, **kwargs):
"""
Look up the resource state for a workerType
Return state information for a given worker type
This method gives output: ``http://schemas.taskcluster.net/ec2-manager/v1/worker-type-state.json#``
This method is ``experimental``
"""
return self._makeApiCall(self.funcinfo["workerTypeState"], *args, **kwargs)
def ensureKeyPair(self, *args, **kwargs):
"""
Ensure a KeyPair for a given worker type exists
Idempotently ensure that a keypair of a given name exists
This method takes input: ``http://schemas.taskcluster.net/ec2-manager/v1/create-key-pair.json#``
This method is ``experimental``
"""
return self._makeApiCall(self.funcinfo["ensureKeyPair"], *args, **kwargs)
def removeKeyPair(self, *args, **kwargs):
"""
Ensure a KeyPair for a given worker type does not exist
Ensure that a keypair of a given name does not exist.
This method is ``experimental``
"""
return self._makeApiCall(self.funcinfo["removeKeyPair"], *args, **kwargs)
def terminateInstance(self, *args, **kwargs):
"""
Terminate an instance
Terminate an instance in a specified region
This method is ``experimental``
"""
return self._makeApiCall(self.funcinfo["terminateInstance"], *args, **kwargs)
def getPrices(self, *args, **kwargs):
"""
Request prices for EC2
Return a list of possible prices for EC2
This method gives output: ``http://schemas.taskcluster.net/ec2-manager/v1/prices.json#``
This method is ``experimental``
"""
return self._makeApiCall(self.funcinfo["getPrices"], *args, **kwargs)
def getSpecificPrices(self, *args, **kwargs):
"""
Request prices for EC2
Return a list of possible prices for EC2
This method takes input: ``http://schemas.taskcluster.net/ec2-manager/v1/prices-request.json#``
This method gives output: ``http://schemas.taskcluster.net/ec2-manager/v1/prices.json#``
This method is ``experimental``
"""
return self._makeApiCall(self.funcinfo["getSpecificPrices"], *args, **kwargs)
def getHealth(self, *args, **kwargs):
"""
Get EC2 account health metrics
Give some basic stats on the health of our EC2 account
This method gives output: ``http://schemas.taskcluster.net/ec2-manager/v1/health.json#``
This method is ``experimental``
"""
return self._makeApiCall(self.funcinfo["getHealth"], *args, **kwargs)
def getRecentErrors(self, *args, **kwargs):
"""
Look up the most recent errors in the provisioner across all worker types
Return a list of recent errors encountered
This method gives output: ``http://schemas.taskcluster.net/ec2-manager/v1/errors.json#``
This method is ``experimental``
"""
return self._makeApiCall(self.funcinfo["getRecentErrors"], *args, **kwargs)
def regions(self, *args, **kwargs):
"""
See the list of regions managed by this ec2-manager
This method is only for debugging the ec2-manager
This method is ``experimental``
"""
return self._makeApiCall(self.funcinfo["regions"], *args, **kwargs)
def amiUsage(self, *args, **kwargs):
"""
See the list of AMIs and their usage
List AMIs and their usage by returning a list of objects in the form:
{
region: string
volumetype: string
lastused: timestamp
}
This method is ``experimental``
"""
return self._makeApiCall(self.funcinfo["amiUsage"], *args, **kwargs)
def ebsUsage(self, *args, **kwargs):
"""
See the current EBS volume usage list
Lists current EBS volume usage by returning a list of objects
that are uniquely defined by {region, volumetype, state} in the form:
{
region: string,
volumetype: string,
state: string,
totalcount: integer,
totalgb: integer,
touched: timestamp (last time that information was updated),
}
This method is ``experimental``
"""
return self._makeApiCall(self.funcinfo["ebsUsage"], *args, **kwargs)
def dbpoolStats(self, *args, **kwargs):
"""
Statistics on the Database client pool
This method is only for debugging the ec2-manager
This method is ``experimental``
"""
return self._makeApiCall(self.funcinfo["dbpoolStats"], *args, **kwargs)
def allState(self, *args, **kwargs):
"""
List out the entire internal state
This method is only for debugging the ec2-manager
This method is ``experimental``
"""
return self._makeApiCall(self.funcinfo["allState"], *args, **kwargs)
def sqsStats(self, *args, **kwargs):
"""
Statistics on the sqs queues
This method is only for debugging the ec2-manager
This method is ``experimental``
"""
return self._makeApiCall(self.funcinfo["sqsStats"], *args, **kwargs)
def purgeQueues(self, *args, **kwargs):
"""
Purge the SQS queues
This method is only for debugging the ec2-manager
This method is ``experimental``
"""
return self._makeApiCall(self.funcinfo["purgeQueues"], *args, **kwargs)
def apiReference(self, *args, **kwargs):
"""
API Reference
Generate an API reference for this service
This method is ``experimental``
"""
return self._makeApiCall(self.funcinfo["apiReference"], *args, **kwargs)
def ping(self, *args, **kwargs):
"""
Ping Server
Respond without doing anything.
This endpoint is used to check that the service is up.
This method is ``stable``
"""
return self._makeApiCall(self.funcinfo["ping"], *args, **kwargs)
funcinfo = {
"allState": {
'args': [],
'method': 'get',
'name': 'allState',
'route': '/internal/all-state',
'stability': 'experimental',
},
"amiUsage": {
'args': [],
'method': 'get',
'name': 'amiUsage',
'route': '/internal/ami-usage',
'stability': 'experimental',
},
"apiReference": {
'args': [],
'method': 'get',
'name': 'apiReference',
'route': '/internal/api-reference',
'stability': 'experimental',
},
"dbpoolStats": {
'args': [],
'method': 'get',
'name': 'dbpoolStats',
'route': '/internal/db-pool-stats',
'stability': 'experimental',
},
"ebsUsage": {
'args': [],
'method': 'get',
'name': 'ebsUsage',
'route': '/internal/ebs-usage',
'stability': 'experimental',
},
"ensureKeyPair": {
'args': ['name'],
'input': 'http://schemas.taskcluster.net/ec2-manager/v1/create-key-pair.json#',
'method': 'get',
'name': 'ensureKeyPair',
'route': '/key-pairs/<name>',
'stability': 'experimental',
},
"getHealth": {
'args': [],
'method': 'get',
'name': 'getHealth',
'output': 'http://schemas.taskcluster.net/ec2-manager/v1/health.json#',
'route': '/health',
'stability': 'experimental',
},
"getPrices": {
'args': [],
'method': 'get',
'name': 'getPrices',
'output': 'http://schemas.taskcluster.net/ec2-manager/v1/prices.json#',
'route': '/prices',
'stability': 'experimental',
},
"getRecentErrors": {
'args': [],
'method': 'get',
'name': 'getRecentErrors',
'output': 'http://schemas.taskcluster.net/ec2-manager/v1/errors.json#',
'route': '/errors',
'stability': 'experimental',
},
"getSpecificPrices": {
'args': [],
'input': 'http://schemas.taskcluster.net/ec2-manager/v1/prices-request.json#',
'method': 'post',
'name': 'getSpecificPrices',
'output': 'http://schemas.taskcluster.net/ec2-manager/v1/prices.json#',
'route': '/prices',
'stability': 'experimental',
},
"listWorkerTypes": {
'args': [],
'method': 'get',
'name': 'listWorkerTypes',
'output': 'http://schemas.taskcluster.net/ec2-manager/v1/list-worker-types.json#',
'route': '/worker-types',
'stability': 'experimental',
},
"ping": {
'args': [],
'method': 'get',
'name': 'ping',
'route': '/ping',
'stability': 'stable',
},
"purgeQueues": {
'args': [],
'method': 'get',
'name': 'purgeQueues',
'route': '/internal/purge-queues',
'stability': 'experimental',
},
"regions": {
'args': [],
'method': 'get',
'name': 'regions',
'route': '/internal/regions',
'stability': 'experimental',
},
"removeKeyPair": {
'args': ['name'],
'method': 'delete',
'name': 'removeKeyPair',
'route': '/key-pairs/<name>',
'stability': 'experimental',
},
"runInstance": {
'args': ['workerType'],
'input': 'http://schemas.taskcluster.net/ec2-manager/v1/run-instance-request.json#',
'method': 'put',
'name': 'runInstance',
'route': '/worker-types/<workerType>/instance',
'stability': 'experimental',
},
"sqsStats": {
'args': [],
'method': 'get',
'name': 'sqsStats',
'route': '/internal/sqs-stats',
'stability': 'experimental',
},
"terminateInstance": {
'args': ['region', 'instanceId'],
'method': 'delete',
'name': 'terminateInstance',
'route': '/region/<region>/instance/<instanceId>',
'stability': 'experimental',
},
"terminateWorkerType": {
'args': ['workerType'],
'method': 'delete',
'name': 'terminateWorkerType',
'route': '/worker-types/<workerType>/resources',
'stability': 'experimental',
},
"workerTypeErrors": {
'args': ['workerType'],
'method': 'get',
'name': 'workerTypeErrors',
'output': 'http://schemas.taskcluster.net/ec2-manager/v1/errors.json#',
'route': '/worker-types/<workerType>/errors',
'stability': 'experimental',
},
"workerTypeHealth": {
'args': ['workerType'],
'method': 'get',
'name': 'workerTypeHealth',
'output': 'http://schemas.taskcluster.net/ec2-manager/v1/health.json#',
'route': '/worker-types/<workerType>/health',
'stability': 'experimental',
},
"workerTypeState": {
'args': ['workerType'],
'method': 'get',
'name': 'workerTypeState',
'output': 'http://schemas.taskcluster.net/ec2-manager/v1/worker-type-state.json#',
'route': '/worker-types/<workerType>/state',
'stability': 'experimental',
},
"workerTypeStats": {
'args': ['workerType'],
'method': 'get',
'name': 'workerTypeStats',
'output': 'http://schemas.taskcluster.net/ec2-manager/v1/worker-type-resources.json#',
'route': '/worker-types/<workerType>/stats',
'stability': 'experimental',
},
}
__all__ = ['createTemporaryCredentials', 'config', '_defaultConfig', 'createApiClient', 'createSession', 'EC2Manager']

View File

@ -0,0 +1,36 @@
""" Taskcluster client exceptions """
class TaskclusterFailure(Exception):
""" Base exception for all Taskcluster client errors"""
pass
class TaskclusterRestFailure(TaskclusterFailure):
""" Failures in the HTTP Rest API """
def __init__(self, msg, superExc, status_code=500, body={}):
TaskclusterFailure.__init__(self, msg)
self.superExc = superExc
self.status_code = status_code
self.body = body
class TaskclusterConnectionError(TaskclusterFailure):
""" Error connecting to resource """
def __init__(self, msg, superExc):
TaskclusterFailure.__init__(self, msg, superExc)
self.superExc = superExc
class TaskclusterAuthFailure(TaskclusterFailure):
""" Invalid Credentials """
def __init__(self, msg, superExc=None, status_code=500, body={}):
TaskclusterFailure.__init__(self, msg)
self.superExc = superExc
self.status_code = status_code
self.body = body
class TaskclusterTopicExchangeFailure(TaskclusterFailure):
""" Error while creating a Topic Exchange routing key """
pass

View File

@ -0,0 +1,205 @@
# coding=utf-8
#####################################################
# THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT #
#####################################################
# noqa: E128,E201
from .client import BaseClient
from .client import createApiClient
from .client import config
from .client import createTemporaryCredentials
from .client import createSession
_defaultConfig = config
class Github(BaseClient):
"""
The github service, typically available at
`github.taskcluster.net`, is responsible for publishing pulse
messages in response to GitHub events.
This document describes the API end-point for consuming GitHub
web hooks, as well as some useful consumer APIs.
When Github forbids an action, this service returns an HTTP 403
with code ForbiddenByGithub.
"""
classOptions = {
"baseUrl": "https://github.taskcluster.net/v1/"
}
def ping(self, *args, **kwargs):
"""
Ping Server
Respond without doing anything.
This endpoint is used to check that the service is up.
This method is ``stable``
"""
return self._makeApiCall(self.funcinfo["ping"], *args, **kwargs)
def githubWebHookConsumer(self, *args, **kwargs):
"""
Consume GitHub WebHook
Capture a GitHub event and publish it via pulse, if it's a push,
release or pull request.
This method is ``experimental``
"""
return self._makeApiCall(self.funcinfo["githubWebHookConsumer"], *args, **kwargs)
def builds(self, *args, **kwargs):
"""
List of Builds
A paginated list of builds that have been run in
Taskcluster. Can be filtered on various git-specific
fields.
This method gives output: ``v1/build-list.json#``
This method is ``experimental``
"""
return self._makeApiCall(self.funcinfo["builds"], *args, **kwargs)
def badge(self, *args, **kwargs):
"""
Latest Build Status Badge
Checks the status of the latest build of a given branch
and returns corresponding badge svg.
This method is ``experimental``
"""
return self._makeApiCall(self.funcinfo["badge"], *args, **kwargs)
def repository(self, *args, **kwargs):
"""
Get Repository Info
Returns any repository metadata that is
useful within Taskcluster related services.
This method gives output: ``v1/repository.json#``
This method is ``experimental``
"""
return self._makeApiCall(self.funcinfo["repository"], *args, **kwargs)
def latest(self, *args, **kwargs):
"""
Latest Status for Branch
For a given branch of a repository, this will always point
to a status page for the most recent task triggered by that
branch.
Note: This is a redirect rather than a direct link.
This method is ``experimental``
"""
return self._makeApiCall(self.funcinfo["latest"], *args, **kwargs)
def createStatus(self, *args, **kwargs):
"""
Post a status against a given changeset
For a given changeset (SHA) of a repository, this will attach a "commit status"
on github. These statuses are links displayed next to each revision.
The status is either OK (green check) or FAILURE (red cross),
made of a custom title and link.
This method takes input: ``v1/create-status.json#``
This method is ``experimental``
"""
return self._makeApiCall(self.funcinfo["createStatus"], *args, **kwargs)
def createComment(self, *args, **kwargs):
"""
Post a comment on a given GitHub Issue or Pull Request
For a given Issue or Pull Request of a repository, this will write a new message.
This method takes input: ``v1/create-comment.json#``
This method is ``experimental``
"""
return self._makeApiCall(self.funcinfo["createComment"], *args, **kwargs)
funcinfo = {
"badge": {
'args': ['owner', 'repo', 'branch'],
'method': 'get',
'name': 'badge',
'route': '/repository/<owner>/<repo>/<branch>/badge.svg',
'stability': 'experimental',
},
"builds": {
'args': [],
'method': 'get',
'name': 'builds',
'output': 'v1/build-list.json#',
'query': ['continuationToken', 'limit', 'organization', 'repository', 'sha'],
'route': '/builds',
'stability': 'experimental',
},
"createComment": {
'args': ['owner', 'repo', 'number'],
'input': 'v1/create-comment.json#',
'method': 'post',
'name': 'createComment',
'route': '/repository/<owner>/<repo>/issues/<number>/comments',
'stability': 'experimental',
},
"createStatus": {
'args': ['owner', 'repo', 'sha'],
'input': 'v1/create-status.json#',
'method': 'post',
'name': 'createStatus',
'route': '/repository/<owner>/<repo>/statuses/<sha>',
'stability': 'experimental',
},
"githubWebHookConsumer": {
'args': [],
'method': 'post',
'name': 'githubWebHookConsumer',
'route': '/github',
'stability': 'experimental',
},
"latest": {
'args': ['owner', 'repo', 'branch'],
'method': 'get',
'name': 'latest',
'route': '/repository/<owner>/<repo>/<branch>/latest',
'stability': 'experimental',
},
"ping": {
'args': [],
'method': 'get',
'name': 'ping',
'route': '/ping',
'stability': 'stable',
},
"repository": {
'args': ['owner', 'repo'],
'method': 'get',
'name': 'repository',
'output': 'v1/repository.json#',
'route': '/repository/<owner>/<repo>',
'stability': 'experimental',
},
}
__all__ = ['createTemporaryCredentials', 'config', '_defaultConfig', 'createApiClient', 'createSession', 'Github']

View File

@ -0,0 +1,155 @@
# coding=utf-8
#####################################################
# THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT #
#####################################################
# noqa: E128,E201
from .client import BaseClient
from .client import createApiClient
from .client import config
from .client import createTemporaryCredentials
from .client import createSession
_defaultConfig = config
class GithubEvents(BaseClient):
"""
The github service publishes a pulse
message for supported github events, translating Github webhook
events into pulse messages.
This document describes the exchange offered by the taskcluster
github service
"""
classOptions = {
"exchangePrefix": "exchange/taskcluster-github/v1/"
}
def pullRequest(self, *args, **kwargs):
"""
GitHub Pull Request Event
When a GitHub pull request event is posted it will be broadcast on this
exchange with the designated `organization` and `repository`
in the routing-key along with event specific metadata in the payload.
This exchange outputs: ``v1/github-pull-request-message.json#``This exchange takes the following keys:
* routingKeyKind: Identifier for the routing-key kind. This is always `"primary"` for the formalized routing key. (required)
* organization: The GitHub `organization` which had an event. All periods have been replaced by % - such that foo.bar becomes foo%bar - and all other special characters aside from - and _ have been stripped. (required)
* repository: The GitHub `repository` which had an event.All periods have been replaced by % - such that foo.bar becomes foo%bar - and all other special characters aside from - and _ have been stripped. (required)
* action: The GitHub `action` which triggered an event. See for possible values see the payload actions property. (required)
"""
ref = {
'exchange': 'pull-request',
'name': 'pullRequest',
'routingKey': [
{
'constant': 'primary',
'multipleWords': False,
'name': 'routingKeyKind',
},
{
'multipleWords': False,
'name': 'organization',
},
{
'multipleWords': False,
'name': 'repository',
},
{
'multipleWords': False,
'name': 'action',
},
],
'schema': 'v1/github-pull-request-message.json#',
}
return self._makeTopicExchange(ref, *args, **kwargs)
def push(self, *args, **kwargs):
"""
GitHub push Event
When a GitHub push event is posted it will be broadcast on this
exchange with the designated `organization` and `repository`
in the routing-key along with event specific metadata in the payload.
This exchange outputs: ``v1/github-push-message.json#``This exchange takes the following keys:
* routingKeyKind: Identifier for the routing-key kind. This is always `"primary"` for the formalized routing key. (required)
* organization: The GitHub `organization` which had an event. All periods have been replaced by % - such that foo.bar becomes foo%bar - and all other special characters aside from - and _ have been stripped. (required)
* repository: The GitHub `repository` which had an event.All periods have been replaced by % - such that foo.bar becomes foo%bar - and all other special characters aside from - and _ have been stripped. (required)
"""
ref = {
'exchange': 'push',
'name': 'push',
'routingKey': [
{
'constant': 'primary',
'multipleWords': False,
'name': 'routingKeyKind',
},
{
'multipleWords': False,
'name': 'organization',
},
{
'multipleWords': False,
'name': 'repository',
},
],
'schema': 'v1/github-push-message.json#',
}
return self._makeTopicExchange(ref, *args, **kwargs)
def release(self, *args, **kwargs):
"""
GitHub release Event
When a GitHub release event is posted it will be broadcast on this
exchange with the designated `organization` and `repository`
in the routing-key along with event specific metadata in the payload.
This exchange outputs: ``v1/github-release-message.json#``This exchange takes the following keys:
* routingKeyKind: Identifier for the routing-key kind. This is always `"primary"` for the formalized routing key. (required)
* organization: The GitHub `organization` which had an event. All periods have been replaced by % - such that foo.bar becomes foo%bar - and all other special characters aside from - and _ have been stripped. (required)
* repository: The GitHub `repository` which had an event.All periods have been replaced by % - such that foo.bar becomes foo%bar - and all other special characters aside from - and _ have been stripped. (required)
"""
ref = {
'exchange': 'release',
'name': 'release',
'routingKey': [
{
'constant': 'primary',
'multipleWords': False,
'name': 'routingKeyKind',
},
{
'multipleWords': False,
'name': 'organization',
},
{
'multipleWords': False,
'name': 'repository',
},
],
'schema': 'v1/github-release-message.json#',
}
return self._makeTopicExchange(ref, *args, **kwargs)
funcinfo = {
}
__all__ = ['createTemporaryCredentials', 'config', '_defaultConfig', 'createApiClient', 'createSession', 'GithubEvents']

View File

@ -0,0 +1,323 @@
# coding=utf-8
#####################################################
# THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT #
#####################################################
# noqa: E128,E201
from .client import BaseClient
from .client import createApiClient
from .client import config
from .client import createTemporaryCredentials
from .client import createSession
_defaultConfig = config
class Hooks(BaseClient):
"""
Hooks are a mechanism for creating tasks in response to events.
Hooks are identified with a `hookGroupId` and a `hookId`.
When an event occurs, the resulting task is automatically created. The
task is created using the scope `assume:hook-id:<hookGroupId>/<hookId>`,
which must have scopes to make the createTask call, including satisfying all
scopes in `task.scopes`. The new task has a `taskGroupId` equal to its
`taskId`, as is the convention for decision tasks.
Hooks can have a "schedule" indicating specific times that new tasks should
be created. Each schedule is in a simple cron format, per
https://www.npmjs.com/package/cron-parser. For example:
* `['0 0 1 * * *']` -- daily at 1:00 UTC
* `['0 0 9,21 * * 1-5', '0 0 12 * * 0,6']` -- weekdays at 9:00 and 21:00 UTC, weekends at noon
The task definition is used as a JSON-e template, with a context depending on how it is fired. See
https://docs.taskcluster.net/reference/core/taskcluster-hooks/docs/firing-hooks
for more information.
"""
classOptions = {
"baseUrl": "https://hooks.taskcluster.net/v1/"
}
def ping(self, *args, **kwargs):
"""
Ping Server
Respond without doing anything.
This endpoint is used to check that the service is up.
This method is ``stable``
"""
return self._makeApiCall(self.funcinfo["ping"], *args, **kwargs)
def listHookGroups(self, *args, **kwargs):
"""
List hook groups
This endpoint will return a list of all hook groups with at least one hook.
This method gives output: ``v1/list-hook-groups-response.json#``
This method is ``stable``
"""
return self._makeApiCall(self.funcinfo["listHookGroups"], *args, **kwargs)
def listHooks(self, *args, **kwargs):
"""
List hooks in a given group
This endpoint will return a list of all the hook definitions within a
given hook group.
This method gives output: ``v1/list-hooks-response.json#``
This method is ``stable``
"""
return self._makeApiCall(self.funcinfo["listHooks"], *args, **kwargs)
def hook(self, *args, **kwargs):
"""
Get hook definition
This endpoint will return the hook definition for the given `hookGroupId`
and hookId.
This method gives output: ``v1/hook-definition.json#``
This method is ``stable``
"""
return self._makeApiCall(self.funcinfo["hook"], *args, **kwargs)
def getHookStatus(self, *args, **kwargs):
"""
Get hook status
This endpoint will return the current status of the hook. This represents a
snapshot in time and may vary from one call to the next.
This method gives output: ``v1/hook-status.json#``
This method is ``stable``
"""
return self._makeApiCall(self.funcinfo["getHookStatus"], *args, **kwargs)
def createHook(self, *args, **kwargs):
"""
Create a hook
This endpoint will create a new hook.
The caller's credentials must include the role that will be used to
create the task. That role must satisfy task.scopes as well as the
necessary scopes to add the task to the queue.
This method takes input: ``v1/create-hook-request.json#``
This method gives output: ``v1/hook-definition.json#``
This method is ``stable``
"""
return self._makeApiCall(self.funcinfo["createHook"], *args, **kwargs)
def updateHook(self, *args, **kwargs):
"""
Update a hook
This endpoint will update an existing hook. All fields except
`hookGroupId` and `hookId` can be modified.
This method takes input: ``v1/create-hook-request.json#``
This method gives output: ``v1/hook-definition.json#``
This method is ``stable``
"""
return self._makeApiCall(self.funcinfo["updateHook"], *args, **kwargs)
def removeHook(self, *args, **kwargs):
"""
Delete a hook
This endpoint will remove a hook definition.
This method is ``stable``
"""
return self._makeApiCall(self.funcinfo["removeHook"], *args, **kwargs)
def triggerHook(self, *args, **kwargs):
"""
Trigger a hook
This endpoint will trigger the creation of a task from a hook definition.
The HTTP payload must match the hooks `triggerSchema`. If it does, it is
provided as the `payload` property of the JSON-e context used to render the
task template.
This method takes input: ``v1/trigger-hook.json#``
This method gives output: ``v1/task-status.json#``
This method is ``stable``
"""
return self._makeApiCall(self.funcinfo["triggerHook"], *args, **kwargs)
def getTriggerToken(self, *args, **kwargs):
"""
Get a trigger token
Retrieve a unique secret token for triggering the specified hook. This
token can be deactivated with `resetTriggerToken`.
This method gives output: ``v1/trigger-token-response.json#``
This method is ``stable``
"""
return self._makeApiCall(self.funcinfo["getTriggerToken"], *args, **kwargs)
def resetTriggerToken(self, *args, **kwargs):
"""
Reset a trigger token
Reset the token for triggering a given hook. This invalidates token that
may have been issued via getTriggerToken with a new token.
This method gives output: ``v1/trigger-token-response.json#``
This method is ``stable``
"""
return self._makeApiCall(self.funcinfo["resetTriggerToken"], *args, **kwargs)
def triggerHookWithToken(self, *args, **kwargs):
"""
Trigger a hook with a token
This endpoint triggers a defined hook with a valid token.
The HTTP payload must match the hooks `triggerSchema`. If it does, it is
provided as the `payload` property of the JSON-e context used to render the
task template.
This method takes input: ``v1/trigger-hook.json#``
This method gives output: ``v1/task-status.json#``
This method is ``stable``
"""
return self._makeApiCall(self.funcinfo["triggerHookWithToken"], *args, **kwargs)
funcinfo = {
"createHook": {
'args': ['hookGroupId', 'hookId'],
'input': 'v1/create-hook-request.json#',
'method': 'put',
'name': 'createHook',
'output': 'v1/hook-definition.json#',
'route': '/hooks/<hookGroupId>/<hookId>',
'stability': 'stable',
},
"getHookStatus": {
'args': ['hookGroupId', 'hookId'],
'method': 'get',
'name': 'getHookStatus',
'output': 'v1/hook-status.json#',
'route': '/hooks/<hookGroupId>/<hookId>/status',
'stability': 'stable',
},
"getTriggerToken": {
'args': ['hookGroupId', 'hookId'],
'method': 'get',
'name': 'getTriggerToken',
'output': 'v1/trigger-token-response.json#',
'route': '/hooks/<hookGroupId>/<hookId>/token',
'stability': 'stable',
},
"hook": {
'args': ['hookGroupId', 'hookId'],
'method': 'get',
'name': 'hook',
'output': 'v1/hook-definition.json#',
'route': '/hooks/<hookGroupId>/<hookId>',
'stability': 'stable',
},
"listHookGroups": {
'args': [],
'method': 'get',
'name': 'listHookGroups',
'output': 'v1/list-hook-groups-response.json#',
'route': '/hooks',
'stability': 'stable',
},
"listHooks": {
'args': ['hookGroupId'],
'method': 'get',
'name': 'listHooks',
'output': 'v1/list-hooks-response.json#',
'route': '/hooks/<hookGroupId>',
'stability': 'stable',
},
"ping": {
'args': [],
'method': 'get',
'name': 'ping',
'route': '/ping',
'stability': 'stable',
},
"removeHook": {
'args': ['hookGroupId', 'hookId'],
'method': 'delete',
'name': 'removeHook',
'route': '/hooks/<hookGroupId>/<hookId>',
'stability': 'stable',
},
"resetTriggerToken": {
'args': ['hookGroupId', 'hookId'],
'method': 'post',
'name': 'resetTriggerToken',
'output': 'v1/trigger-token-response.json#',
'route': '/hooks/<hookGroupId>/<hookId>/token',
'stability': 'stable',
},
"triggerHook": {
'args': ['hookGroupId', 'hookId'],
'input': 'v1/trigger-hook.json#',
'method': 'post',
'name': 'triggerHook',
'output': 'v1/task-status.json#',
'route': '/hooks/<hookGroupId>/<hookId>/trigger',
'stability': 'stable',
},
"triggerHookWithToken": {
'args': ['hookGroupId', 'hookId', 'token'],
'input': 'v1/trigger-hook.json#',
'method': 'post',
'name': 'triggerHookWithToken',
'output': 'v1/task-status.json#',
'route': '/hooks/<hookGroupId>/<hookId>/trigger/<token>',
'stability': 'stable',
},
"updateHook": {
'args': ['hookGroupId', 'hookId'],
'input': 'v1/create-hook-request.json#',
'method': 'post',
'name': 'updateHook',
'output': 'v1/hook-definition.json#',
'route': '/hooks/<hookGroupId>/<hookId>',
'stability': 'stable',
},
}
__all__ = ['createTemporaryCredentials', 'config', '_defaultConfig', 'createApiClient', 'createSession', 'Hooks']

View File

@ -0,0 +1,277 @@
# coding=utf-8
#####################################################
# THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT #
#####################################################
# noqa: E128,E201
from .client import BaseClient
from .client import createApiClient
from .client import config
from .client import createTemporaryCredentials
from .client import createSession
_defaultConfig = config
class Index(BaseClient):
"""
The task index, typically available at `index.taskcluster.net`, is
responsible for indexing tasks. The service ensures that tasks can be
located by recency and/or arbitrary strings. Common use-cases include:
* Locate tasks by git or mercurial `<revision>`, or
* Locate latest task from given `<branch>`, such as a release.
**Index hierarchy**, tasks are indexed in a dot (`.`) separated hierarchy
called a namespace. For example a task could be indexed with the index path
`some-app.<revision>.linux-64.release-build`. In this case the following
namespaces is created.
1. `some-app`,
1. `some-app.<revision>`, and,
2. `some-app.<revision>.linux-64`
Inside the namespace `some-app.<revision>` you can find the namespace
`some-app.<revision>.linux-64` inside which you can find the indexed task
`some-app.<revision>.linux-64.release-build`. This is an example of indexing
builds for a given platform and revision.
**Task Rank**, when a task is indexed, it is assigned a `rank` (defaults
to `0`). If another task is already indexed in the same namespace with
lower or equal `rank`, the index for that task will be overwritten. For example
consider index path `mozilla-central.linux-64.release-build`. In
this case one might choose to use a UNIX timestamp or mercurial revision
number as `rank`. This way the latest completed linux 64 bit release
build is always available at `mozilla-central.linux-64.release-build`.
Note that this does mean index paths are not immutable: the same path may
point to a different task now than it did a moment ago.
**Indexed Data**, when a task is retrieved from the index the result includes
a `taskId` and an additional user-defined JSON blob that was indexed with
the task.
**Entry Expiration**, all indexed entries must have an expiration date.
Typically this defaults to one year, if not specified. If you are
indexing tasks to make it easy to find artifacts, consider using the
artifact's expiration date.
**Valid Characters**, all keys in a namespace `<key1>.<key2>` must be
in the form `/[a-zA-Z0-9_!~*'()%-]+/`. Observe that this is URL-safe and
that if you strictly want to put another character you can URL encode it.
**Indexing Routes**, tasks can be indexed using the API below, but the
most common way to index tasks is adding a custom route to `task.routes` of the
form `index.<namespace>`. In order to add this route to a task you'll
need the scope `queue:route:index.<namespace>`. When a task has
this route, it will be indexed when the task is **completed successfully**.
The task will be indexed with `rank`, `data` and `expires` as specified
in `task.extra.index`. See the example below:
```
{
payload: { /* ... */ },
routes: [
// index.<namespace> prefixed routes, tasks CC'ed such a route will
// be indexed under the given <namespace>
"index.mozilla-central.linux-64.release-build",
"index.<revision>.linux-64.release-build"
],
extra: {
// Optional details for indexing service
index: {
// Ordering, this taskId will overwrite any thing that has
// rank <= 4000 (defaults to zero)
rank: 4000,
// Specify when the entries expire (Defaults to 1 year)
expires: new Date().toJSON(),
// A little informal data to store along with taskId
// (less 16 kb when encoded as JSON)
data: {
hgRevision: "...",
commitMessae: "...",
whatever...
}
},
// Extra properties for other services...
}
// Other task properties...
}
```
**Remark**, when indexing tasks using custom routes, it's also possible
to listen for messages about these tasks. For
example one could bind to `route.index.some-app.*.release-build`,
and pick up all messages about release builds. Hence, it is a
good idea to document task index hierarchies, as these make up extension
points in their own.
"""
classOptions = {
"baseUrl": "https://index.taskcluster.net/v1/"
}
def ping(self, *args, **kwargs):
"""
Ping Server
Respond without doing anything.
This endpoint is used to check that the service is up.
This method is ``stable``
"""
return self._makeApiCall(self.funcinfo["ping"], *args, **kwargs)
def findTask(self, *args, **kwargs):
"""
Find Indexed Task
Find a task by index path, returning the highest-rank task with that path. If no
task exists for the given path, this API end-point will respond with a 404 status.
This method gives output: ``v1/indexed-task-response.json#``
This method is ``stable``
"""
return self._makeApiCall(self.funcinfo["findTask"], *args, **kwargs)
def listNamespaces(self, *args, **kwargs):
"""
List Namespaces
List the namespaces immediately under a given namespace.
This endpoint
lists up to 1000 namespaces. If more namespaces are present, a
`continuationToken` will be returned, which can be given in the next
request. For the initial request, the payload should be an empty JSON
object.
This method gives output: ``v1/list-namespaces-response.json#``
This method is ``stable``
"""
return self._makeApiCall(self.funcinfo["listNamespaces"], *args, **kwargs)
def listTasks(self, *args, **kwargs):
"""
List Tasks
List the tasks immediately under a given namespace.
This endpoint
lists up to 1000 tasks. If more tasks are present, a
`continuationToken` will be returned, which can be given in the next
request. For the initial request, the payload should be an empty JSON
object.
**Remark**, this end-point is designed for humans browsing for tasks, not
services, as that makes little sense.
This method gives output: ``v1/list-tasks-response.json#``
This method is ``stable``
"""
return self._makeApiCall(self.funcinfo["listTasks"], *args, **kwargs)
def insertTask(self, *args, **kwargs):
"""
Insert Task into Index
Insert a task into the index. If the new rank is less than the existing rank
at the given index path, the task is not indexed but the response is still 200 OK.
Please see the introduction above for information
about indexing successfully completed tasks automatically using custom routes.
This method takes input: ``v1/insert-task-request.json#``
This method gives output: ``v1/indexed-task-response.json#``
This method is ``stable``
"""
return self._makeApiCall(self.funcinfo["insertTask"], *args, **kwargs)
def findArtifactFromTask(self, *args, **kwargs):
"""
Get Artifact From Indexed Task
Find a task by index path and redirect to the artifact on the most recent
run with the given `name`.
Note that multiple calls to this endpoint may return artifacts from differen tasks
if a new task is inserted into the index between calls. Avoid using this method as
a stable link to multiple, connected files if the index path does not contain a
unique identifier. For example, the following two links may return unrelated files:
* https://index.taskcluster.net/task/some-app.win64.latest.installer/artifacts/public/installer.exe`
* https://index.taskcluster.net/task/some-app.win64.latest.installer/artifacts/public/debug-symbols.zip`
This problem be remedied by including the revision in the index path or by bundling both
installer and debug symbols into a single artifact.
If no task exists for the given index path, this API end-point responds with 404.
This method is ``stable``
"""
return self._makeApiCall(self.funcinfo["findArtifactFromTask"], *args, **kwargs)
funcinfo = {
"findArtifactFromTask": {
'args': ['indexPath', 'name'],
'method': 'get',
'name': 'findArtifactFromTask',
'route': '/task/<indexPath>/artifacts/<name>',
'stability': 'stable',
},
"findTask": {
'args': ['indexPath'],
'method': 'get',
'name': 'findTask',
'output': 'v1/indexed-task-response.json#',
'route': '/task/<indexPath>',
'stability': 'stable',
},
"insertTask": {
'args': ['namespace'],
'input': 'v1/insert-task-request.json#',
'method': 'put',
'name': 'insertTask',
'output': 'v1/indexed-task-response.json#',
'route': '/task/<namespace>',
'stability': 'stable',
},
"listNamespaces": {
'args': ['namespace'],
'method': 'get',
'name': 'listNamespaces',
'output': 'v1/list-namespaces-response.json#',
'query': ['continuationToken', 'limit'],
'route': '/namespaces/<namespace>',
'stability': 'stable',
},
"listTasks": {
'args': ['namespace'],
'method': 'get',
'name': 'listTasks',
'output': 'v1/list-tasks-response.json#',
'query': ['continuationToken', 'limit'],
'route': '/tasks/<namespace>',
'stability': 'stable',
},
"ping": {
'args': [],
'method': 'get',
'name': 'ping',
'route': '/ping',
'stability': 'stable',
},
}
__all__ = ['createTemporaryCredentials', 'config', '_defaultConfig', 'createApiClient', 'createSession', 'Index']

View File

@ -0,0 +1,88 @@
# coding=utf-8
#####################################################
# THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT #
#####################################################
# noqa: E128,E201
from .client import BaseClient
from .client import createApiClient
from .client import config
from .client import createTemporaryCredentials
from .client import createSession
_defaultConfig = config
class Login(BaseClient):
"""
The Login service serves as the interface between external authentication
systems and Taskcluster credentials.
"""
classOptions = {
"baseUrl": "https://login.taskcluster.net/v1"
}
def oidcCredentials(self, *args, **kwargs):
"""
Get Taskcluster credentials given a suitable `access_token`
Given an OIDC `access_token` from a trusted OpenID provider, return a
set of Taskcluster credentials for use on behalf of the identified
user.
This method is typically not called with a Taskcluster client library
and does not accept Hawk credentials. The `access_token` should be
given in an `Authorization` header:
```
Authorization: Bearer abc.xyz
```
The `access_token` is first verified against the named
:provider, then passed to the provider's API to retrieve a user
profile. That profile is then used to generate Taskcluster credentials
appropriate to the user. Note that the resulting credentials may or may
not include a `certificate` property. Callers should be prepared for either
alternative.
The given credentials will expire in a relatively short time. Callers should
monitor this expiration and refresh the credentials if necessary, by calling
this endpoint again, if they have expired.
This method gives output: ``http://schemas.taskcluster.net/login/v1/oidc-credentials-response.json``
This method is ``experimental``
"""
return self._makeApiCall(self.funcinfo["oidcCredentials"], *args, **kwargs)
def ping(self, *args, **kwargs):
"""
Ping Server
Respond without doing anything.
This endpoint is used to check that the service is up.
This method is ``stable``
"""
return self._makeApiCall(self.funcinfo["ping"], *args, **kwargs)
funcinfo = {
"oidcCredentials": {
'args': ['provider'],
'method': 'get',
'name': 'oidcCredentials',
'output': 'http://schemas.taskcluster.net/login/v1/oidc-credentials-response.json',
'route': '/oidc-credentials/<provider>',
'stability': 'experimental',
},
"ping": {
'args': [],
'method': 'get',
'name': 'ping',
'route': '/ping',
'stability': 'stable',
},
}
__all__ = ['createTemporaryCredentials', 'config', '_defaultConfig', 'createApiClient', 'createSession', 'Login']

View File

@ -0,0 +1,124 @@
# coding=utf-8
#####################################################
# THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT #
#####################################################
# noqa: E128,E201
from .client import BaseClient
from .client import createApiClient
from .client import config
from .client import createTemporaryCredentials
from .client import createSession
_defaultConfig = config
class Notify(BaseClient):
"""
The notification service, typically available at `notify.taskcluster.net`
listens for tasks with associated notifications and handles requests to
send emails and post pulse messages.
"""
classOptions = {
"baseUrl": "https://notify.taskcluster.net/v1/"
}
def ping(self, *args, **kwargs):
"""
Ping Server
Respond without doing anything.
This endpoint is used to check that the service is up.
This method is ``stable``
"""
return self._makeApiCall(self.funcinfo["ping"], *args, **kwargs)
def email(self, *args, **kwargs):
"""
Send an Email
Send an email to `address`. The content is markdown and will be rendered
to HTML, but both the HTML and raw markdown text will be sent in the
email. If a link is included, it will be rendered to a nice button in the
HTML version of the email
This method takes input: ``v1/email-request.json#``
This method is ``experimental``
"""
return self._makeApiCall(self.funcinfo["email"], *args, **kwargs)
def pulse(self, *args, **kwargs):
"""
Publish a Pulse Message
Publish a message on pulse with the given `routingKey`.
This method takes input: ``v1/pulse-request.json#``
This method is ``experimental``
"""
return self._makeApiCall(self.funcinfo["pulse"], *args, **kwargs)
def irc(self, *args, **kwargs):
"""
Post IRC Message
Post a message on IRC to a specific channel or user, or a specific user
on a specific channel.
Success of this API method does not imply the message was successfully
posted. This API method merely inserts the IRC message into a queue
that will be processed by a background process.
This allows us to re-send the message in face of connection issues.
However, if the user isn't online the message will be dropped without
error. We maybe improve this behavior in the future. For now just keep
in mind that IRC is a best-effort service.
This method takes input: ``v1/irc-request.json#``
This method is ``experimental``
"""
return self._makeApiCall(self.funcinfo["irc"], *args, **kwargs)
funcinfo = {
"email": {
'args': [],
'input': 'v1/email-request.json#',
'method': 'post',
'name': 'email',
'route': '/email',
'stability': 'experimental',
},
"irc": {
'args': [],
'input': 'v1/irc-request.json#',
'method': 'post',
'name': 'irc',
'route': '/irc',
'stability': 'experimental',
},
"ping": {
'args': [],
'method': 'get',
'name': 'ping',
'route': '/ping',
'stability': 'stable',
},
"pulse": {
'args': [],
'input': 'v1/pulse-request.json#',
'method': 'post',
'name': 'pulse',
'route': '/pulse',
'stability': 'experimental',
},
}
__all__ = ['createTemporaryCredentials', 'config', '_defaultConfig', 'createApiClient', 'createSession', 'Notify']

View File

@ -0,0 +1,151 @@
# coding=utf-8
#####################################################
# THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT #
#####################################################
# noqa: E128,E201
from .client import BaseClient
from .client import createApiClient
from .client import config
from .client import createTemporaryCredentials
from .client import createSession
_defaultConfig = config
class Pulse(BaseClient):
"""
The taskcluster-pulse service, typically available at `pulse.taskcluster.net`
manages pulse credentials for taskcluster users.
A service to manage Pulse credentials for anything using
Taskcluster credentials. This allows for self-service pulse
access and greater control within the Taskcluster project.
"""
classOptions = {
"baseUrl": "https://pulse.taskcluster.net/v1"
}
def overview(self, *args, **kwargs):
"""
Rabbit Overview
Get an overview of the Rabbit cluster.
This method gives output: ``http://schemas.taskcluster.net/pulse/v1/rabbit-overview.json``
This method is ``experimental``
"""
return self._makeApiCall(self.funcinfo["overview"], *args, **kwargs)
def listNamespaces(self, *args, **kwargs):
"""
List Namespaces
List the namespaces managed by this service.
This will list up to 1000 namespaces. If more namespaces are present a
`continuationToken` will be returned, which can be given in the next
request. For the initial request, do not provide continuation.
This method gives output: ``http://schemas.taskcluster.net/pulse/v1/list-namespaces-response.json``
This method is ``experimental``
"""
return self._makeApiCall(self.funcinfo["listNamespaces"], *args, **kwargs)
def namespace(self, *args, **kwargs):
"""
Get a namespace
Get public information about a single namespace. This is the same information
as returned by `listNamespaces`.
This method gives output: ``http://schemas.taskcluster.net/pulse/v1/namespace.json``
This method is ``experimental``
"""
return self._makeApiCall(self.funcinfo["namespace"], *args, **kwargs)
def claimNamespace(self, *args, **kwargs):
"""
Claim a namespace
Claim a namespace, returning a username and password with access to that
namespace good for a short time. Clients should call this endpoint again
at the re-claim time given in the response, as the password will be rotated
soon after that time. The namespace will expire, and any associated queues
and exchanges will be deleted, at the given expiration time.
The `expires` and `contact` properties can be updated at any time in a reclaim
operation.
This method takes input: ``http://schemas.taskcluster.net/pulse/v1/namespace-request.json``
This method gives output: ``http://schemas.taskcluster.net/pulse/v1/namespace-response.json``
This method is ``experimental``
"""
return self._makeApiCall(self.funcinfo["claimNamespace"], *args, **kwargs)
def ping(self, *args, **kwargs):
"""
Ping Server
Respond without doing anything.
This endpoint is used to check that the service is up.
This method is ``stable``
"""
return self._makeApiCall(self.funcinfo["ping"], *args, **kwargs)
funcinfo = {
"claimNamespace": {
'args': ['namespace'],
'input': 'http://schemas.taskcluster.net/pulse/v1/namespace-request.json',
'method': 'post',
'name': 'claimNamespace',
'output': 'http://schemas.taskcluster.net/pulse/v1/namespace-response.json',
'route': '/namespace/<namespace>',
'stability': 'experimental',
},
"listNamespaces": {
'args': [],
'method': 'get',
'name': 'listNamespaces',
'output': 'http://schemas.taskcluster.net/pulse/v1/list-namespaces-response.json',
'query': ['limit', 'continuation'],
'route': '/namespaces',
'stability': 'experimental',
},
"namespace": {
'args': ['namespace'],
'method': 'get',
'name': 'namespace',
'output': 'http://schemas.taskcluster.net/pulse/v1/namespace.json',
'route': '/namespace/<namespace>',
'stability': 'experimental',
},
"overview": {
'args': [],
'method': 'get',
'name': 'overview',
'output': 'http://schemas.taskcluster.net/pulse/v1/rabbit-overview.json',
'route': '/overview',
'stability': 'experimental',
},
"ping": {
'args': [],
'method': 'get',
'name': 'ping',
'route': '/ping',
'stability': 'stable',
},
}
__all__ = ['createTemporaryCredentials', 'config', '_defaultConfig', 'createApiClient', 'createSession', 'Pulse']

View File

@ -0,0 +1,124 @@
# coding=utf-8
#####################################################
# THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT #
#####################################################
# noqa: E128,E201
from .client import BaseClient
from .client import createApiClient
from .client import config
from .client import createTemporaryCredentials
from .client import createSession
_defaultConfig = config
class PurgeCache(BaseClient):
"""
The purge-cache service, typically available at
`purge-cache.taskcluster.net`, is responsible for publishing a pulse
message for workers, so they can purge cache upon request.
This document describes the API end-point for publishing the pulse
message. This is mainly intended to be used by tools.
"""
classOptions = {
"baseUrl": "https://purge-cache.taskcluster.net/v1/"
}
def ping(self, *args, **kwargs):
"""
Ping Server
Respond without doing anything.
This endpoint is used to check that the service is up.
This method is ``stable``
"""
return self._makeApiCall(self.funcinfo["ping"], *args, **kwargs)
def purgeCache(self, *args, **kwargs):
"""
Purge Worker Cache
Publish a purge-cache message to purge caches named `cacheName` with
`provisionerId` and `workerType` in the routing-key. Workers should
be listening for this message and purge caches when they see it.
This method takes input: ``v1/purge-cache-request.json#``
This method is ``stable``
"""
return self._makeApiCall(self.funcinfo["purgeCache"], *args, **kwargs)
def allPurgeRequests(self, *args, **kwargs):
"""
All Open Purge Requests
This is useful mostly for administors to view
the set of open purge requests. It should not
be used by workers. They should use the purgeRequests
endpoint that is specific to their workerType and
provisionerId.
This method gives output: ``v1/all-purge-cache-request-list.json#``
This method is ``stable``
"""
return self._makeApiCall(self.funcinfo["allPurgeRequests"], *args, **kwargs)
def purgeRequests(self, *args, **kwargs):
"""
Open Purge Requests for a provisionerId/workerType pair
List of caches that need to be purged if they are from before
a certain time. This is safe to be used in automation from
workers.
This method gives output: ``v1/purge-cache-request-list.json#``
This method is ``stable``
"""
return self._makeApiCall(self.funcinfo["purgeRequests"], *args, **kwargs)
funcinfo = {
"allPurgeRequests": {
'args': [],
'method': 'get',
'name': 'allPurgeRequests',
'output': 'v1/all-purge-cache-request-list.json#',
'query': ['continuationToken', 'limit'],
'route': '/purge-cache/list',
'stability': 'stable',
},
"ping": {
'args': [],
'method': 'get',
'name': 'ping',
'route': '/ping',
'stability': 'stable',
},
"purgeCache": {
'args': ['provisionerId', 'workerType'],
'input': 'v1/purge-cache-request.json#',
'method': 'post',
'name': 'purgeCache',
'route': '/purge-cache/<provisionerId>/<workerType>',
'stability': 'stable',
},
"purgeRequests": {
'args': ['provisionerId', 'workerType'],
'method': 'get',
'name': 'purgeRequests',
'output': 'v1/purge-cache-request-list.json#',
'query': ['since'],
'route': '/purge-cache/<provisionerId>/<workerType>',
'stability': 'stable',
},
}
__all__ = ['createTemporaryCredentials', 'config', '_defaultConfig', 'createApiClient', 'createSession', 'PurgeCache']

View File

@ -0,0 +1,71 @@
# coding=utf-8
#####################################################
# THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT #
#####################################################
# noqa: E128,E201
from .client import BaseClient
from .client import createApiClient
from .client import config
from .client import createTemporaryCredentials
from .client import createSession
_defaultConfig = config
class PurgeCacheEvents(BaseClient):
"""
The purge-cache service, typically available at
`purge-cache.taskcluster.net`, is responsible for publishing a pulse
message for workers, so they can purge cache upon request.
This document describes the exchange offered for workers by the
cache-purge service.
"""
classOptions = {
"exchangePrefix": "exchange/taskcluster-purge-cache/v1/"
}
def purgeCache(self, *args, **kwargs):
"""
Purge Cache Messages
When a cache purge is requested a message will be posted on this
exchange with designated `provisionerId` and `workerType` in the
routing-key and the name of the `cacheFolder` as payload
This exchange outputs: ``v1/purge-cache-message.json#``This exchange takes the following keys:
* routingKeyKind: Identifier for the routing-key kind. This is always `'primary'` for the formalized routing key. (required)
* provisionerId: `provisionerId` under which to purge cache. (required)
* workerType: `workerType` for which to purge cache. (required)
"""
ref = {
'exchange': 'purge-cache',
'name': 'purgeCache',
'routingKey': [
{
'constant': 'primary',
'multipleWords': False,
'name': 'routingKeyKind',
},
{
'multipleWords': False,
'name': 'provisionerId',
},
{
'multipleWords': False,
'name': 'workerType',
},
],
'schema': 'v1/purge-cache-message.json#',
}
return self._makeTopicExchange(ref, *args, **kwargs)
funcinfo = {
}
__all__ = ['createTemporaryCredentials', 'config', '_defaultConfig', 'createApiClient', 'createSession', 'PurgeCacheEvents']

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,716 @@
# coding=utf-8
#####################################################
# THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT #
#####################################################
# noqa: E128,E201
from .client import BaseClient
from .client import createApiClient
from .client import config
from .client import createTemporaryCredentials
from .client import createSession
_defaultConfig = config
class QueueEvents(BaseClient):
"""
The queue, typically available at `queue.taskcluster.net`, is responsible
for accepting tasks and track their state as they are executed by
workers. In order ensure they are eventually resolved.
This document describes AMQP exchanges offered by the queue, which allows
third-party listeners to monitor tasks as they progress to resolution.
These exchanges targets the following audience:
* Schedulers, who takes action after tasks are completed,
* Workers, who wants to listen for new or canceled tasks (optional),
* Tools, that wants to update their view as task progress.
You'll notice that all the exchanges in the document shares the same
routing key pattern. This makes it very easy to bind to all messages
about a certain kind tasks.
**Task specific routes**, a task can define a task specific route using
the `task.routes` property. See task creation documentation for details
on permissions required to provide task specific routes. If a task has
the entry `'notify.by-email'` in as task specific route defined in
`task.routes` all messages about this task will be CC'ed with the
routing-key `'route.notify.by-email'`.
These routes will always be prefixed `route.`, so that cannot interfere
with the _primary_ routing key as documented here. Notice that the
_primary_ routing key is always prefixed `primary.`. This is ensured
in the routing key reference, so API clients will do this automatically.
Please, note that the way RabbitMQ works, the message will only arrive
in your queue once, even though you may have bound to the exchange with
multiple routing key patterns that matches more of the CC'ed routing
routing keys.
**Delivery guarantees**, most operations on the queue are idempotent,
which means that if repeated with the same arguments then the requests
will ensure completion of the operation and return the same response.
This is useful if the server crashes or the TCP connection breaks, but
when re-executing an idempotent operation, the queue will also resend
any related AMQP messages. Hence, messages may be repeated.
This shouldn't be much of a problem, as the best you can achieve using
confirm messages with AMQP is at-least-once delivery semantics. Hence,
this only prevents you from obtaining at-most-once delivery semantics.
**Remark**, some message generated by timeouts maybe dropped if the
server crashes at wrong time. Ideally, we'll address this in the
future. For now we suggest you ignore this corner case, and notify us
if this corner case is of concern to you.
"""
classOptions = {
"exchangePrefix": "exchange/taskcluster-queue/v1/"
}
def taskDefined(self, *args, **kwargs):
"""
Task Defined Messages
When a task is created or just defined a message is posted to this
exchange.
This message exchange is mainly useful when tasks are scheduled by a
scheduler that uses `defineTask` as this does not make the task
`pending`. Thus, no `taskPending` message is published.
Please, note that messages are also published on this exchange if defined
using `createTask`.
This exchange outputs: ``v1/task-defined-message.json#``This exchange takes the following keys:
* routingKeyKind: Identifier for the routing-key kind. This is always `'primary'` for the formalized routing key. (required)
* taskId: `taskId` for the task this message concerns (required)
* runId: `runId` of latest run for the task, `_` if no run is exists for the task.
* workerGroup: `workerGroup` of latest run for the task, `_` if no run is exists for the task.
* workerId: `workerId` of latest run for the task, `_` if no run is exists for the task.
* provisionerId: `provisionerId` this task is targeted at. (required)
* workerType: `workerType` this task must run on. (required)
* schedulerId: `schedulerId` this task was created by. (required)
* taskGroupId: `taskGroupId` this task was created in. (required)
* reserved: Space reserved for future routing-key entries, you should always match this entry with `#`. As automatically done by our tooling, if not specified.
"""
ref = {
'exchange': 'task-defined',
'name': 'taskDefined',
'routingKey': [
{
'constant': 'primary',
'multipleWords': False,
'name': 'routingKeyKind',
},
{
'multipleWords': False,
'name': 'taskId',
},
{
'multipleWords': False,
'name': 'runId',
},
{
'multipleWords': False,
'name': 'workerGroup',
},
{
'multipleWords': False,
'name': 'workerId',
},
{
'multipleWords': False,
'name': 'provisionerId',
},
{
'multipleWords': False,
'name': 'workerType',
},
{
'multipleWords': False,
'name': 'schedulerId',
},
{
'multipleWords': False,
'name': 'taskGroupId',
},
{
'multipleWords': True,
'name': 'reserved',
},
],
'schema': 'v1/task-defined-message.json#',
}
return self._makeTopicExchange(ref, *args, **kwargs)
def taskPending(self, *args, **kwargs):
"""
Task Pending Messages
When a task becomes `pending` a message is posted to this exchange.
This is useful for workers who doesn't want to constantly poll the queue
for new tasks. The queue will also be authority for task states and
claims. But using this exchange workers should be able to distribute work
efficiently and they would be able to reduce their polling interval
significantly without affecting general responsiveness.
This exchange outputs: ``v1/task-pending-message.json#``This exchange takes the following keys:
* routingKeyKind: Identifier for the routing-key kind. This is always `'primary'` for the formalized routing key. (required)
* taskId: `taskId` for the task this message concerns (required)
* runId: `runId` of latest run for the task, `_` if no run is exists for the task. (required)
* workerGroup: `workerGroup` of latest run for the task, `_` if no run is exists for the task.
* workerId: `workerId` of latest run for the task, `_` if no run is exists for the task.
* provisionerId: `provisionerId` this task is targeted at. (required)
* workerType: `workerType` this task must run on. (required)
* schedulerId: `schedulerId` this task was created by. (required)
* taskGroupId: `taskGroupId` this task was created in. (required)
* reserved: Space reserved for future routing-key entries, you should always match this entry with `#`. As automatically done by our tooling, if not specified.
"""
ref = {
'exchange': 'task-pending',
'name': 'taskPending',
'routingKey': [
{
'constant': 'primary',
'multipleWords': False,
'name': 'routingKeyKind',
},
{
'multipleWords': False,
'name': 'taskId',
},
{
'multipleWords': False,
'name': 'runId',
},
{
'multipleWords': False,
'name': 'workerGroup',
},
{
'multipleWords': False,
'name': 'workerId',
},
{
'multipleWords': False,
'name': 'provisionerId',
},
{
'multipleWords': False,
'name': 'workerType',
},
{
'multipleWords': False,
'name': 'schedulerId',
},
{
'multipleWords': False,
'name': 'taskGroupId',
},
{
'multipleWords': True,
'name': 'reserved',
},
],
'schema': 'v1/task-pending-message.json#',
}
return self._makeTopicExchange(ref, *args, **kwargs)
def taskRunning(self, *args, **kwargs):
"""
Task Running Messages
Whenever a task is claimed by a worker, a run is started on the worker,
and a message is posted on this exchange.
This exchange outputs: ``v1/task-running-message.json#``This exchange takes the following keys:
* routingKeyKind: Identifier for the routing-key kind. This is always `'primary'` for the formalized routing key. (required)
* taskId: `taskId` for the task this message concerns (required)
* runId: `runId` of latest run for the task, `_` if no run is exists for the task. (required)
* workerGroup: `workerGroup` of latest run for the task, `_` if no run is exists for the task. (required)
* workerId: `workerId` of latest run for the task, `_` if no run is exists for the task. (required)
* provisionerId: `provisionerId` this task is targeted at. (required)
* workerType: `workerType` this task must run on. (required)
* schedulerId: `schedulerId` this task was created by. (required)
* taskGroupId: `taskGroupId` this task was created in. (required)
* reserved: Space reserved for future routing-key entries, you should always match this entry with `#`. As automatically done by our tooling, if not specified.
"""
ref = {
'exchange': 'task-running',
'name': 'taskRunning',
'routingKey': [
{
'constant': 'primary',
'multipleWords': False,
'name': 'routingKeyKind',
},
{
'multipleWords': False,
'name': 'taskId',
},
{
'multipleWords': False,
'name': 'runId',
},
{
'multipleWords': False,
'name': 'workerGroup',
},
{
'multipleWords': False,
'name': 'workerId',
},
{
'multipleWords': False,
'name': 'provisionerId',
},
{
'multipleWords': False,
'name': 'workerType',
},
{
'multipleWords': False,
'name': 'schedulerId',
},
{
'multipleWords': False,
'name': 'taskGroupId',
},
{
'multipleWords': True,
'name': 'reserved',
},
],
'schema': 'v1/task-running-message.json#',
}
return self._makeTopicExchange(ref, *args, **kwargs)
def artifactCreated(self, *args, **kwargs):
"""
Artifact Creation Messages
Whenever the `createArtifact` end-point is called, the queue will create
a record of the artifact and post a message on this exchange. All of this
happens before the queue returns a signed URL for the caller to upload
the actual artifact with (pending on `storageType`).
This means that the actual artifact is rarely available when this message
is posted. But it is not unreasonable to assume that the artifact will
will become available at some point later. Most signatures will expire in
30 minutes or so, forcing the uploader to call `createArtifact` with
the same payload again in-order to continue uploading the artifact.
However, in most cases (especially for small artifacts) it's very
reasonable assume the artifact will be available within a few minutes.
This property means that this exchange is mostly useful for tools
monitoring task evaluation. One could also use it count number of
artifacts per task, or _index_ artifacts though in most cases it'll be
smarter to index artifacts after the task in question have completed
successfully.
This exchange outputs: ``v1/artifact-created-message.json#``This exchange takes the following keys:
* routingKeyKind: Identifier for the routing-key kind. This is always `'primary'` for the formalized routing key. (required)
* taskId: `taskId` for the task this message concerns (required)
* runId: `runId` of latest run for the task, `_` if no run is exists for the task. (required)
* workerGroup: `workerGroup` of latest run for the task, `_` if no run is exists for the task. (required)
* workerId: `workerId` of latest run for the task, `_` if no run is exists for the task. (required)
* provisionerId: `provisionerId` this task is targeted at. (required)
* workerType: `workerType` this task must run on. (required)
* schedulerId: `schedulerId` this task was created by. (required)
* taskGroupId: `taskGroupId` this task was created in. (required)
* reserved: Space reserved for future routing-key entries, you should always match this entry with `#`. As automatically done by our tooling, if not specified.
"""
ref = {
'exchange': 'artifact-created',
'name': 'artifactCreated',
'routingKey': [
{
'constant': 'primary',
'multipleWords': False,
'name': 'routingKeyKind',
},
{
'multipleWords': False,
'name': 'taskId',
},
{
'multipleWords': False,
'name': 'runId',
},
{
'multipleWords': False,
'name': 'workerGroup',
},
{
'multipleWords': False,
'name': 'workerId',
},
{
'multipleWords': False,
'name': 'provisionerId',
},
{
'multipleWords': False,
'name': 'workerType',
},
{
'multipleWords': False,
'name': 'schedulerId',
},
{
'multipleWords': False,
'name': 'taskGroupId',
},
{
'multipleWords': True,
'name': 'reserved',
},
],
'schema': 'v1/artifact-created-message.json#',
}
return self._makeTopicExchange(ref, *args, **kwargs)
def taskCompleted(self, *args, **kwargs):
"""
Task Completed Messages
When a task is successfully completed by a worker a message is posted
this exchange.
This message is routed using the `runId`, `workerGroup` and `workerId`
that completed the task. But information about additional runs is also
available from the task status structure.
This exchange outputs: ``v1/task-completed-message.json#``This exchange takes the following keys:
* routingKeyKind: Identifier for the routing-key kind. This is always `'primary'` for the formalized routing key. (required)
* taskId: `taskId` for the task this message concerns (required)
* runId: `runId` of latest run for the task, `_` if no run is exists for the task. (required)
* workerGroup: `workerGroup` of latest run for the task, `_` if no run is exists for the task. (required)
* workerId: `workerId` of latest run for the task, `_` if no run is exists for the task. (required)
* provisionerId: `provisionerId` this task is targeted at. (required)
* workerType: `workerType` this task must run on. (required)
* schedulerId: `schedulerId` this task was created by. (required)
* taskGroupId: `taskGroupId` this task was created in. (required)
* reserved: Space reserved for future routing-key entries, you should always match this entry with `#`. As automatically done by our tooling, if not specified.
"""
ref = {
'exchange': 'task-completed',
'name': 'taskCompleted',
'routingKey': [
{
'constant': 'primary',
'multipleWords': False,
'name': 'routingKeyKind',
},
{
'multipleWords': False,
'name': 'taskId',
},
{
'multipleWords': False,
'name': 'runId',
},
{
'multipleWords': False,
'name': 'workerGroup',
},
{
'multipleWords': False,
'name': 'workerId',
},
{
'multipleWords': False,
'name': 'provisionerId',
},
{
'multipleWords': False,
'name': 'workerType',
},
{
'multipleWords': False,
'name': 'schedulerId',
},
{
'multipleWords': False,
'name': 'taskGroupId',
},
{
'multipleWords': True,
'name': 'reserved',
},
],
'schema': 'v1/task-completed-message.json#',
}
return self._makeTopicExchange(ref, *args, **kwargs)
def taskFailed(self, *args, **kwargs):
"""
Task Failed Messages
When a task ran, but failed to complete successfully a message is posted
to this exchange. This is same as worker ran task-specific code, but the
task specific code exited non-zero.
This exchange outputs: ``v1/task-failed-message.json#``This exchange takes the following keys:
* routingKeyKind: Identifier for the routing-key kind. This is always `'primary'` for the formalized routing key. (required)
* taskId: `taskId` for the task this message concerns (required)
* runId: `runId` of latest run for the task, `_` if no run is exists for the task.
* workerGroup: `workerGroup` of latest run for the task, `_` if no run is exists for the task.
* workerId: `workerId` of latest run for the task, `_` if no run is exists for the task.
* provisionerId: `provisionerId` this task is targeted at. (required)
* workerType: `workerType` this task must run on. (required)
* schedulerId: `schedulerId` this task was created by. (required)
* taskGroupId: `taskGroupId` this task was created in. (required)
* reserved: Space reserved for future routing-key entries, you should always match this entry with `#`. As automatically done by our tooling, if not specified.
"""
ref = {
'exchange': 'task-failed',
'name': 'taskFailed',
'routingKey': [
{
'constant': 'primary',
'multipleWords': False,
'name': 'routingKeyKind',
},
{
'multipleWords': False,
'name': 'taskId',
},
{
'multipleWords': False,
'name': 'runId',
},
{
'multipleWords': False,
'name': 'workerGroup',
},
{
'multipleWords': False,
'name': 'workerId',
},
{
'multipleWords': False,
'name': 'provisionerId',
},
{
'multipleWords': False,
'name': 'workerType',
},
{
'multipleWords': False,
'name': 'schedulerId',
},
{
'multipleWords': False,
'name': 'taskGroupId',
},
{
'multipleWords': True,
'name': 'reserved',
},
],
'schema': 'v1/task-failed-message.json#',
}
return self._makeTopicExchange(ref, *args, **kwargs)
def taskException(self, *args, **kwargs):
"""
Task Exception Messages
Whenever Taskcluster fails to run a message is posted to this exchange.
This happens if the task isn't completed before its `deadlìne`,
all retries failed (i.e. workers stopped responding), the task was
canceled by another entity, or the task carried a malformed payload.
The specific _reason_ is evident from that task status structure, refer
to the `reasonResolved` property for the last run.
This exchange outputs: ``v1/task-exception-message.json#``This exchange takes the following keys:
* routingKeyKind: Identifier for the routing-key kind. This is always `'primary'` for the formalized routing key. (required)
* taskId: `taskId` for the task this message concerns (required)
* runId: `runId` of latest run for the task, `_` if no run is exists for the task.
* workerGroup: `workerGroup` of latest run for the task, `_` if no run is exists for the task.
* workerId: `workerId` of latest run for the task, `_` if no run is exists for the task.
* provisionerId: `provisionerId` this task is targeted at. (required)
* workerType: `workerType` this task must run on. (required)
* schedulerId: `schedulerId` this task was created by. (required)
* taskGroupId: `taskGroupId` this task was created in. (required)
* reserved: Space reserved for future routing-key entries, you should always match this entry with `#`. As automatically done by our tooling, if not specified.
"""
ref = {
'exchange': 'task-exception',
'name': 'taskException',
'routingKey': [
{
'constant': 'primary',
'multipleWords': False,
'name': 'routingKeyKind',
},
{
'multipleWords': False,
'name': 'taskId',
},
{
'multipleWords': False,
'name': 'runId',
},
{
'multipleWords': False,
'name': 'workerGroup',
},
{
'multipleWords': False,
'name': 'workerId',
},
{
'multipleWords': False,
'name': 'provisionerId',
},
{
'multipleWords': False,
'name': 'workerType',
},
{
'multipleWords': False,
'name': 'schedulerId',
},
{
'multipleWords': False,
'name': 'taskGroupId',
},
{
'multipleWords': True,
'name': 'reserved',
},
],
'schema': 'v1/task-exception-message.json#',
}
return self._makeTopicExchange(ref, *args, **kwargs)
def taskGroupResolved(self, *args, **kwargs):
"""
Task Group Resolved Messages
A message is published on task-group-resolved whenever all submitted
tasks (whether scheduled or unscheduled) for a given task group have
been resolved, regardless of whether they resolved as successful or
not. A task group may be resolved multiple times, since new tasks may
be submitted against an already resolved task group.
This exchange outputs: ``v1/task-group-resolved.json#``This exchange takes the following keys:
* routingKeyKind: Identifier for the routing-key kind. This is always `'primary'` for the formalized routing key. (required)
* taskGroupId: `taskGroupId` for the task-group this message concerns (required)
* schedulerId: `schedulerId` for the task-group this message concerns (required)
* reserved: Space reserved for future routing-key entries, you should always match this entry with `#`. As automatically done by our tooling, if not specified.
"""
ref = {
'exchange': 'task-group-resolved',
'name': 'taskGroupResolved',
'routingKey': [
{
'constant': 'primary',
'multipleWords': False,
'name': 'routingKeyKind',
},
{
'multipleWords': False,
'name': 'taskGroupId',
},
{
'multipleWords': False,
'name': 'schedulerId',
},
{
'multipleWords': True,
'name': 'reserved',
},
],
'schema': 'v1/task-group-resolved.json#',
}
return self._makeTopicExchange(ref, *args, **kwargs)
funcinfo = {
}
__all__ = ['createTemporaryCredentials', 'config', '_defaultConfig', 'createApiClient', 'createSession', 'QueueEvents']

View File

@ -0,0 +1,148 @@
# coding=utf-8
#####################################################
# THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT #
#####################################################
# noqa: E128,E201
from .client import BaseClient
from .client import createApiClient
from .client import config
from .client import createTemporaryCredentials
from .client import createSession
_defaultConfig = config
class Secrets(BaseClient):
"""
The secrets service provides a simple key/value store for small bits of secret
data. Access is limited by scopes, so values can be considered secret from
those who do not have the relevant scopes.
Secrets also have an expiration date, and once a secret has expired it can no
longer be read. This is useful for short-term secrets such as a temporary
service credential or a one-time signing key.
"""
classOptions = {
"baseUrl": "https://secrets.taskcluster.net/v1/"
}
def ping(self, *args, **kwargs):
"""
Ping Server
Respond without doing anything.
This endpoint is used to check that the service is up.
This method is ``stable``
"""
return self._makeApiCall(self.funcinfo["ping"], *args, **kwargs)
def set(self, *args, **kwargs):
"""
Set Secret
Set the secret associated with some key. If the secret already exists, it is
updated instead.
This method takes input: ``v1/secret.json#``
This method is ``stable``
"""
return self._makeApiCall(self.funcinfo["set"], *args, **kwargs)
def remove(self, *args, **kwargs):
"""
Delete Secret
Delete the secret associated with some key.
This method is ``stable``
"""
return self._makeApiCall(self.funcinfo["remove"], *args, **kwargs)
def get(self, *args, **kwargs):
"""
Read Secret
Read the secret associated with some key. If the secret has recently
expired, the response code 410 is returned. If the caller lacks the
scope necessary to get the secret, the call will fail with a 403 code
regardless of whether the secret exists.
This method gives output: ``v1/secret.json#``
This method is ``stable``
"""
return self._makeApiCall(self.funcinfo["get"], *args, **kwargs)
def list(self, *args, **kwargs):
"""
List Secrets
List the names of all secrets.
By default this end-point will try to return up to 1000 secret names in one
request. But it **may return less**, even if more tasks are available.
It may also return a `continuationToken` even though there are no more
results. However, you can only be sure to have seen all results if you
keep calling `listTaskGroup` with the last `continuationToken` until you
get a result without a `continuationToken`.
If you are not interested in listing all the members at once, you may
use the query-string option `limit` to return fewer.
This method gives output: ``v1/secret-list.json#``
This method is ``stable``
"""
return self._makeApiCall(self.funcinfo["list"], *args, **kwargs)
funcinfo = {
"get": {
'args': ['name'],
'method': 'get',
'name': 'get',
'output': 'v1/secret.json#',
'route': '/secret/<name>',
'stability': 'stable',
},
"list": {
'args': [],
'method': 'get',
'name': 'list',
'output': 'v1/secret-list.json#',
'query': ['continuationToken', 'limit'],
'route': '/secrets',
'stability': 'stable',
},
"ping": {
'args': [],
'method': 'get',
'name': 'ping',
'route': '/ping',
'stability': 'stable',
},
"remove": {
'args': ['name'],
'method': 'delete',
'name': 'remove',
'route': '/secret/<name>',
'stability': 'stable',
},
"set": {
'args': ['name'],
'input': 'v1/secret.json#',
'method': 'put',
'name': 'set',
'route': '/secret/<name>',
'stability': 'stable',
},
}
__all__ = ['createTemporaryCredentials', 'config', '_defaultConfig', 'createApiClient', 'createSession', 'Secrets']

View File

@ -0,0 +1,70 @@
# coding=utf-8
#####################################################
# THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT #
#####################################################
# noqa: E128,E201
from .client import BaseClient
from .client import createApiClient
from .client import config
from .client import createTemporaryCredentials
from .client import createSession
_defaultConfig = config
class TreeherderEvents(BaseClient):
"""
The taskcluster-treeherder service is responsible for processing
task events published by TaskCluster Queue and producing job messages
that are consumable by Treeherder.
This exchange provides that job messages to be consumed by any queue that
attached to the exchange. This could be a production Treeheder instance,
a local development environment, or a custom dashboard.
"""
classOptions = {
"exchangePrefix": "exchange/taskcluster-treeherder/v1/"
}
def jobs(self, *args, **kwargs):
"""
Job Messages
When a task run is scheduled or resolved, a message is posted to
this exchange in a Treeherder consumable format.
This exchange outputs: ``http://schemas.taskcluster.net/taskcluster-treeherder/v1/pulse-job.json#``This exchange takes the following keys:
* destination: destination (required)
* project: project (required)
* reserved: Space reserved for future routing-key entries, you should always match this entry with `#`. As automatically done by our tooling, if not specified.
"""
ref = {
'exchange': 'jobs',
'name': 'jobs',
'routingKey': [
{
'multipleWords': False,
'name': 'destination',
},
{
'multipleWords': False,
'name': 'project',
},
{
'multipleWords': True,
'name': 'reserved',
},
],
'schema': 'http://schemas.taskcluster.net/taskcluster-treeherder/v1/pulse-job.json#',
}
return self._makeTopicExchange(ref, *args, **kwargs)
funcinfo = {
}
__all__ = ['createTemporaryCredentials', 'config', '_defaultConfig', 'createApiClient', 'createSession', 'TreeherderEvents']

View File

@ -0,0 +1,407 @@
from __future__ import absolute_import, division, print_function
import re
import json
import datetime
import base64
import logging
import os
import requests
import requests.exceptions
import slugid
import time
import six
import sys
import random
from . import exceptions
MAX_RETRIES = 5
DELAY_FACTOR = 0.1
RANDOMIZATION_FACTOR = 0.25
MAX_DELAY = 30
log = logging.getLogger(__name__)
# Regular expression matching: X days Y hours Z minutes
# todo: support hr, wk, yr
r = re.compile(''.join([
'^(\s*(?P<years>\d+)\s*y(ears?)?)?',
'(\s*(?P<months>\d+)\s*mo(nths?)?)?',
'(\s*(?P<weeks>\d+)\s*w(eeks?)?)?',
'(\s*(?P<days>\d+)\s*d(ays?)?)?',
'(\s*(?P<hours>\d+)\s*h(ours?)?)?',
'(\s*(?P<minutes>\d+)\s*m(in(utes?)?)?)?\s*',
'(\s*(?P<seconds>\d+)\s*s(ec(onds?)?)?)?\s*$',
]))
def calculateSleepTime(attempt):
""" From the go client
https://github.com/taskcluster/go-got/blob/031f55c/backoff.go#L24-L29
"""
if attempt <= 0:
return 0
# We subtract one to get exponents: 1, 2, 3, 4, 5, ..
delay = float(2 ** (attempt - 1)) * float(DELAY_FACTOR)
# Apply randomization factor
delay = delay * (RANDOMIZATION_FACTOR * (random.random() * 2 - 1) + 1)
# Always limit with a maximum delay
return min(delay, MAX_DELAY)
def toStr(obj, encoding='utf-8'):
if six.PY3 and isinstance(obj, six.binary_type):
obj = obj.decode(encoding)
else:
obj = str(obj)
return obj
def fromNow(offset, dateObj=None):
"""
Generate a `datetime.datetime` instance which is offset using a string.
See the README.md for a full example, but offset could be '1 day' for
a datetime object one day in the future
"""
# We want to handle past dates as well as future
future = True
offset = offset.lstrip()
if offset.startswith('-'):
future = False
offset = offset[1:].lstrip()
if offset.startswith('+'):
offset = offset[1:].lstrip()
# Parse offset
m = r.match(offset)
if m is None:
raise ValueError("offset string: '%s' does not parse" % offset)
# In order to calculate years and months we need to calculate how many days
# to offset the offset by, since timedelta only goes as high as weeks
days = 0
hours = 0
minutes = 0
seconds = 0
if m.group('years'):
years = int(m.group('years'))
days += 365 * years
if m.group('months'):
months = int(m.group('months'))
days += 30 * months
days += int(m.group('days') or 0)
hours += int(m.group('hours') or 0)
minutes += int(m.group('minutes') or 0)
seconds += int(m.group('seconds') or 0)
# Offset datetime from utc
delta = datetime.timedelta(
weeks=int(m.group('weeks') or 0),
days=days,
hours=hours,
minutes=minutes,
seconds=seconds,
)
if not dateObj:
dateObj = datetime.datetime.utcnow()
return dateObj + delta if future else dateObj - delta
def fromNowJSON(offset):
"""
Like fromNow() but returns in a taskcluster-json compatible way
"""
return stringDate(fromNow(offset))
def dumpJson(obj, **kwargs):
""" Match JS's JSON.stringify. When using the default seperators,
base64 encoding JSON results in \n sequences in the output. Hawk
barfs in your face if you have that in the text"""
def handleDateAndBinaryForJs(x):
if six.PY3 and isinstance(x, six.binary_type):
x = x.decode()
if isinstance(x, datetime.datetime) or isinstance(x, datetime.date):
return stringDate(x)
else:
return x
d = json.dumps(obj, separators=(',', ':'), default=handleDateAndBinaryForJs, **kwargs)
assert '\n' not in d
return d
def stringDate(date):
# Convert to isoFormat
string = date.isoformat()
# If there is no timezone and no Z added, we'll add one at the end.
# This is just to be fully compliant with:
# https://tools.ietf.org/html/rfc3339#section-5.6
if string.endswith('+00:00'):
return string[:-6] + 'Z'
if date.utcoffset() is None and string[-1] != 'Z':
return string + 'Z'
return string
def makeB64UrlSafe(b64str):
""" Make a base64 string URL Safe """
if isinstance(b64str, six.text_type):
b64str = b64str.encode()
# see RFC 4648, sec. 5
return b64str.replace(b'+', b'-').replace(b'/', b'_')
def makeB64UrlUnsafe(b64str):
""" Make a base64 string URL Unsafe """
if isinstance(b64str, six.text_type):
b64str = b64str.encode()
# see RFC 4648, sec. 5
return b64str.replace(b'-', b'+').replace(b'_', b'/')
def encodeStringForB64Header(s):
""" HTTP Headers can't have new lines in them, let's """
if isinstance(s, six.text_type):
s = s.encode()
return base64.encodestring(s).strip().replace(b'\n', b'')
def slugId():
""" Generate a taskcluster slugid. This is a V4 UUID encoded into
URL-Safe Base64 (RFC 4648, sec 5) with '=' padding removed """
return slugid.nice()
def stableSlugId():
"""Returns a closure which can be used to generate stable slugIds.
Stable slugIds can be used in a graph to specify task IDs in multiple
places without regenerating them, e.g. taskId, requires, etc.
"""
_cache = {}
def closure(name):
if name not in _cache:
_cache[name] = slugId()
return _cache[name]
return closure
def scopeMatch(assumedScopes, requiredScopeSets):
"""
Take a list of a assumed scopes, and a list of required scope sets on
disjunctive normal form, and check if any of the required scope sets are
satisfied.
Example:
requiredScopeSets = [
["scopeA", "scopeB"],
["scopeC"]
]
In this case assumed_scopes must contain, either:
"scopeA" AND "scopeB", OR just "scopeC".
"""
for scopeSet in requiredScopeSets:
for requiredScope in scopeSet:
for scope in assumedScopes:
if scope == requiredScope:
# requiredScope satisifed, no need to check more scopes
break
if scope.endswith("*") and requiredScope.startswith(scope[:-1]):
# requiredScope satisifed, no need to check more scopes
break
else:
# requiredScope not satisfied, stop checking scopeSet
break
else:
# scopeSet satisfied, so we're happy
return True
# none of the requiredScopeSets were satisfied
return False
def scope_match(assumed_scopes, required_scope_sets):
""" This is a deprecated form of def scopeMatch(assumedScopes, requiredScopeSets).
That form should be used.
"""
import warnings
warnings.warn('NOTE: scope_match is deprecated. Use scopeMatch')
return scopeMatch(assumed_scopes, required_scope_sets)
def makeHttpRequest(method, url, payload, headers, retries=MAX_RETRIES, session=None):
""" Make an HTTP request and retry it until success, return request """
retry = -1
response = None
while retry < retries:
retry += 1
# if this isn't the first retry then we sleep
if retry > 0:
snooze = float(retry * retry) / 10.0
log.info('Sleeping %0.2f seconds for exponential backoff', snooze)
time.sleep(snooze)
# Seek payload to start, if it is a file
if hasattr(payload, 'seek'):
payload.seek(0)
log.debug('Making attempt %d', retry)
try:
response = makeSingleHttpRequest(method, url, payload, headers, session)
except requests.exceptions.RequestException as rerr:
if retry < retries:
log.warn('Retrying because of: %s' % rerr)
continue
# raise a connection exception
raise rerr
# Handle non 2xx status code and retry if possible
try:
response.raise_for_status()
except requests.exceptions.RequestException as rerr:
pass
status = response.status_code
if 500 <= status and status < 600 and retry < retries:
if retry < retries:
log.warn('Retrying because of: %d status' % status)
continue
else:
raise exceptions.TaskclusterRestFailure("Unknown Server Error", superExc=None)
return response
# This code-path should be unreachable
assert False, "Error from last retry should have been raised!"
def makeSingleHttpRequest(method, url, payload, headers, session=None):
method = method.upper()
log.debug('Making a %s request to %s', method, url)
log.debug('HTTP Headers: %s' % str(headers))
log.debug('HTTP Payload: %s (limit 100 char)' % str(payload)[:100])
obj = session if session else requests
response = obj.request(method.upper(), url, data=payload, headers=headers)
log.debug('Received HTTP Status: %s' % response.status_code)
log.debug('Received HTTP Headers: %s' % str(response.headers))
return response
def putFile(filename, url, contentType):
with open(filename, 'rb') as f:
contentLength = os.fstat(f.fileno()).st_size
return makeHttpRequest('put', url, f, headers={
'Content-Length': contentLength,
'Content-Type': contentType,
})
def encryptEnvVar(taskId, startTime, endTime, name, value, keyFile):
raise Exception("Encrypted environment variables are no longer supported")
def decryptMessage(message, privateKey):
raise Exception("Decryption is no longer supported")
def isExpired(certificate):
""" Check if certificate is expired """
if isinstance(certificate, six.string_types):
certificate = json.loads(certificate)
expiry = certificate.get('expiry', 0)
return expiry < int(time.time() * 1000) + 20 * 60
def authenticate(description=None):
"""
Open a web-browser to login.taskcluster.net and listen on localhost for
a callback with credentials in query-string.
The description will be shown on login.taskcluster.net, if not provided
a default message with script path will be displayed.
"""
# Importing here to avoid loading these 'obscure' module before it's needed.
# Most clients won't use this feature, so we don't want issues with these
# modules to affect the library. Maybe they don't work in some environments
import webbrowser
from six.moves import urllib
from six.moves.urllib.parse import quote
import BaseHTTPServer
if not description:
script = '[interpreter/unknown]'
main = sys.modules.get('__main__', None)
if main and hasattr(main, '__file__'):
script = os.path.abspath(main.__file__)
description = (
"Python script: `%s`\n\nWould like some temporary credentials."
% script
)
creds = [None]
class AuthCallBackRequestHandler(BaseHTTPServer.BaseHTTPRequestHandler):
def log_message(format, *args):
pass
def do_GET(self):
url = urllib.parse.urlparse(self.path)
query = urllib.parse.parse_qs(url.query)
clientId = query.get('clientId', [None])[0]
accessToken = query.get('accessToken', [None])[0]
certificate = query.get('certificate', [None])[0]
hasCreds = clientId and accessToken and certificate
if hasCreds:
creds[0] = {
"clientId": clientId,
"accessToken": accessToken,
"certificate": certificate
}
self.send_response(200)
self.send_header('Content-type', 'text/html')
self.end_headers()
if hasCreds:
self.wfile.write("""
<h1>Credentials transferred successfully</h1>
<i>You can close this window now.</i>
<script>window.close();</script>
""")
else:
self.wfile.write("""
<h1>Transfer of credentials failed!</h1>
<p>Something went wrong, you can navigate back and try again...</p>
""")
return
# Create server on localhost at random port
retries = 5
while retries > 0:
try:
server = BaseHTTPServer.HTTPServer(('', 0), AuthCallBackRequestHandler)
except:
retries -= 1
break
port = server.server_address[1]
query = "?target=" + quote('http://localhost:' + str(port), '')
query += "&description=" + quote(description, '')
webbrowser.open('https://login.taskcluster.net' + query, 1, True)
print("")
print("-------------------------------------------------------")
print(" Opening browser window to login.taskcluster.net")
print(" Asking you to grant temporary credentials to:")
print(" http://localhost:" + str(port))
print("-------------------------------------------------------")
print("")
while not creds[0]:
server.handle_request()
return creds[0]

View File

@ -0,0 +1,61 @@
from __future__ import division, print_function, absolute_import
import unittest
import datetime
import os
import asyncio
import base
import taskcluster.aio.auth as subjectAsync
@unittest.skipIf(os.environ.get('NO_TESTS_OVER_WIRE'), "Skipping tests over wire")
class TestAuthenticationAsync(base.TCTest):
def test_async_works_with_permanent_credentials(self):
"""we can call methods which require authentication with valid
permacreds"""
loop = asyncio.get_event_loop()
async def x():
async with subjectAsync.createSession(loop=loop) as session:
client = subjectAsync.Auth({
'credentials': {
'clientId': 'tester',
'accessToken': 'no-secret',
},
}, session=session)
result = await client.testAuthenticate({
'clientScopes': ['test:a'],
'requiredScopes': ['test:a'],
})
self.assertEqual(result, {'scopes': ['test:a'], 'clientId': 'tester'})
loop.run_until_complete(x())
def test_async_works_with_temporary_credentials(self):
"""we can call methods which require authentication with temporary
credentials generated by python client"""
loop = asyncio.get_event_loop()
async def x():
async with subjectAsync.createSession(loop=loop) as session:
tempCred = subjectAsync.createTemporaryCredentials(
'tester',
'no-secret',
datetime.datetime.utcnow(),
datetime.datetime.utcnow() + datetime.timedelta(hours=1),
['test:xyz'],
)
client = subjectAsync.Auth({
'credentials': tempCred,
}, session=session)
result = client.testAuthenticate({
'clientScopes': ['test:*'],
'requiredScopes': ['test:xyz'],
})
self.assertEqual(result, {'scopes': ['test:xyz'], 'clientId': 'tester'})
loop.run_until_complete

View File

@ -0,0 +1,918 @@
from __future__ import division, print_function
import types
import unittest
import time
import datetime
from six.moves import urllib
import os
import re
import json
import mock
import httmock
import requests
import base
import taskcluster.auth as subject
import taskcluster.exceptions as exc
import taskcluster.utils as utils
class ClientTest(base.TCTest):
realTimeSleep = time.sleep
def setUp(self):
subject.config['credentials'] = {
'clientId': 'clientId',
'accessToken': 'accessToken',
}
keys = [
base.createTopicExchangeKey('primary_key', constant='primary'),
base.createTopicExchangeKey('norm1'),
base.createTopicExchangeKey('norm2'),
base.createTopicExchangeKey('norm3'),
base.createTopicExchangeKey('multi_key', multipleWords=True),
]
topicEntry = base.createApiEntryTopicExchange('topicName', 'topicExchange', routingKey=keys)
entries = [
base.createApiEntryFunction('no_args_no_input', 0, False),
base.createApiEntryFunction('two_args_no_input', 2, False),
base.createApiEntryFunction('no_args_with_input', 0, True),
base.createApiEntryFunction('two_args_with_input', 2, True),
base.createApiEntryFunction('NEVER_CALL_ME', 0, False),
topicEntry
]
self.apiRef = base.createApiRef(entries=entries)
self.clientClass = subject.createApiClient('testApi', self.apiRef)
self.client = self.clientClass()
# Patch time.sleep so that we don't delay tests
sleepPatcher = mock.patch('time.sleep')
sleepSleep = sleepPatcher.start()
sleepSleep.return_value = None
self.addCleanup(sleepSleep.stop)
def tearDown(self):
time.sleep = self.realTimeSleep
class TestSubArgsInRoute(ClientTest):
def test_valid_no_subs(self):
provided = {'route': '/no/args/here', 'name': 'test'}
expected = 'no/args/here'
result = self.client._subArgsInRoute(provided, {})
self.assertEqual(expected, result)
def test_valid_one_sub(self):
provided = {'route': '/one/<argToSub>/here', 'name': 'test'}
expected = 'one/value/here'
arguments = {'argToSub': 'value'}
result = self.client._subArgsInRoute(provided, arguments)
self.assertEqual(expected, result)
def test_invalid_one_sub(self):
with self.assertRaises(exc.TaskclusterFailure):
self.client._subArgsInRoute({
'route': '/one/<argToSub>/here',
'name': 'test'
}, {'unused': 'value'})
def test_invalid_route_no_sub(self):
with self.assertRaises(exc.TaskclusterFailure):
self.client._subArgsInRoute({
'route': 'askldjflkasdf',
'name': 'test'
}, {'should': 'fail'})
def test_invalid_route_no_arg(self):
with self.assertRaises(exc.TaskclusterFailure):
self.client._subArgsInRoute({
'route': 'askldjflkasdf',
'name': 'test'
}, {'should': 'fail'})
class TestProcessArgs(ClientTest):
def test_no_args(self):
self.assertEqual(({}, None, {}, None, None), self.client._processArgs({'args': [], 'name': 'test'}))
def test_finds_payload(self):
expected = ({}, {'a': 123}, {}, None, None)
actual = self.client._processArgs({'args': [], 'name': 'test', 'input': True}, {'a': 123})
self.assertEqual(expected, actual)
def test_positional_args_only(self):
expected = {'test': 'works', 'test2': 'still works'}
entry = {'args': ['test', 'test2'], 'name': 'test'}
actual = self.client._processArgs(entry, 'works', 'still works')
self.assertEqual((expected, None, {}, None, None), actual)
def test_keyword_args_only(self):
expected = {'test': 'works', 'test2': 'still works'}
entry = {'args': ['test', 'test2'], 'name': 'test'}
actual = self.client._processArgs(entry, test2='still works', test='works')
self.assertEqual((expected, None, {}, None, None), actual)
def test_int_args(self):
expected = {'test': 'works', 'test2': 42}
entry = {'args': ['test', 'test2'], 'name': 'test'}
actual = self.client._processArgs(entry, 'works', 42)
self.assertEqual((expected, None, {}, None, None), actual)
def test_keyword_and_positional(self):
entry = {'args': ['test'], 'name': 'test'}
with self.assertRaises(exc.TaskclusterFailure):
self.client._processArgs(entry, ['broken'], test='works')
def test_invalid_not_enough_args(self):
with self.assertRaises(exc.TaskclusterFailure):
self.client._processArgs({'args': ['test'], 'name': 'test'})
def test_invalid_too_many_positional_args(self):
with self.assertRaises(exc.TaskclusterFailure):
self.client._processArgs({'args': ['test'], 'name': 'test'}, 'enough', 'one too many')
def test_invalid_too_many_keyword_args(self):
with self.assertRaises(exc.TaskclusterFailure):
self.client._processArgs({
'args': ['test'],
'name': 'test'
}, test='enough', test2='one too many')
def test_invalid_missing_arg_positional(self):
with self.assertRaises(exc.TaskclusterFailure):
self.client._processArgs({'args': ['test', 'test2'], 'name': 'test'}, 'enough')
def test_invalid_not_enough_args_because_of_overwriting(self):
with self.assertRaises(exc.TaskclusterFailure):
self.client._processArgs({
'args': ['test', 'test2'],
'name': 'test'
}, 'enough', test='enough')
def test_invalid_positional_not_string_empty_dict(self):
with self.assertRaises(exc.TaskclusterFailure):
self.client._processArgs({'args': ['test'], 'name': 'test'}, {})
def test_invalid_positional_not_string_non_empty_dict(self):
with self.assertRaises(exc.TaskclusterFailure):
self.client._processArgs({'args': ['test'], 'name': 'test'}, {'john': 'ford'})
def test_calling_convention_1_without_payload(self):
params, payload, query, _, _ = self.client._processArgs({'args': ['k1', 'k2'], 'name': 'test'}, 1, 2)
self.assertEqual(params, {'k1': 1, 'k2': 2})
self.assertEqual(payload, None)
self.assertEqual(query, {})
def test_calling_convention_1_with_payload(self):
params, payload, query, _, _ = self.client._processArgs(
{'args': ['k1', 'k2'], 'name': 'test', 'input': True},
1,
2,
{'A': 123}
)
self.assertEqual(params, {'k1': 1, 'k2': 2})
self.assertEqual(payload, {'A': 123})
self.assertEqual(query, {})
def test_calling_convention_2_without_payload(self):
params, payload, query, _, _ = self.client._processArgs({'args': ['k1', 'k2'], 'name': 'test'}, k1=1, k2=2)
self.assertEqual(params, {'k1': 1, 'k2': 2})
self.assertEqual(payload, None)
self.assertEqual(query, {})
def test_calling_convention_2_with_payload(self):
params, payload, query, _, _ = self.client._processArgs(
{'args': ['k1', 'k2'], 'name': 'test', 'input': True},
{'A': 123}, k1=1, k2=2
)
self.assertEqual(params, {'k1': 1, 'k2': 2})
self.assertEqual(payload, {'A': 123})
self.assertEqual(query, {})
def test_calling_convention_3_without_payload_without_query(self):
params, payload, query, _, _ = self.client._processArgs(
{'args': ['k1', 'k2'], 'name': 'test'},
params={'k1': 1, 'k2': 2}
)
self.assertEqual(params, {'k1': 1, 'k2': 2})
self.assertEqual(payload, None)
self.assertEqual(query, {})
def test_calling_convention_3_with_payload_without_query(self):
params, payload, query, _, _ = self.client._processArgs(
{'args': ['k1', 'k2'], 'name': 'test'},
params={'k1': 1, 'k2': 2},
payload={'A': 123}
)
self.assertEqual(params, {'k1': 1, 'k2': 2})
self.assertEqual(payload, {'A': 123})
self.assertEqual(query, {})
def test_calling_convention_3_with_payload_with_query(self):
params, payload, query, _, _ = self.client._processArgs(
{'args': ['k1', 'k2'], 'name': 'test'},
params={'k1': 1, 'k2': 2},
payload={'A': 123},
query={'B': 456}
)
self.assertEqual(params, {'k1': 1, 'k2': 2})
self.assertEqual(payload, {'A': 123})
self.assertEqual(query, {'B': 456})
def test_calling_convention_3_without_payload_with_query(self):
params, payload, query, _, _ = self.client._processArgs(
{'args': ['k1', 'k2'], 'name': 'test'},
params={'k1': 1, 'k2': 2},
query={'B': 456}
)
self.assertEqual(params, {'k1': 1, 'k2': 2})
self.assertEqual(payload, None)
self.assertEqual(query, {'B': 456})
def test_calling_convention_3_with_positional_arguments_with_payload_with_query(self):
params, payload, query, _, _ = self.client._processArgs(
{'args': ['k1', 'k2'], 'name': 'test'},
1,
2,
query={'B': 456},
payload={'A': 123}
)
self.assertEqual(params, {'k1': 1, 'k2': 2})
self.assertEqual(payload, {'A': 123})
self.assertEqual(query, {'B': 456})
def test_calling_convention_3_with_pagination(self):
def a(x):
return x
_, _, _, ph, _ = self.client._processArgs({
'args': ['k1', 'k2'],
'name': 'test',
'query': ['continuationToken', 'limit'],
}, 1, 2, paginationHandler=a)
self.assertIs(ph, a)
def test_calling_convention_3_with_pos_args_same_as_param_kwarg_dict_vals_with_payload_with_query(self):
with self.assertRaises(exc.TaskclusterFailure):
params, payload, query, _, _ = self.client._processArgs(
{'args': ['k1', 'k2'], 'name': 'test'},
1,
2,
params={'k1': 1, 'k2': 2},
query={'B': 456},
payload={'A': 123}
)
# This could probably be done better with Mock
class ObjWithDotJson(object):
def __init__(self, status_code, x):
self.status_code = status_code
self.x = x
def json(self):
return self.x
def raise_for_status(self):
if self.status_code >= 300 or self.status_code < 200:
raise requests.exceptions.HTTPError()
class TestMakeHttpRequest(ClientTest):
def setUp(self):
ClientTest.setUp(self)
def test_success_first_try(self):
with mock.patch.object(utils, 'makeSingleHttpRequest') as p:
expected = {'test': 'works'}
p.return_value = ObjWithDotJson(200, expected)
v = self.client._makeHttpRequest('GET', 'http://www.example.com', None)
p.assert_called_once_with('GET', 'http://www.example.com', None, mock.ANY)
self.assertEqual(expected, v)
def test_success_first_try_payload(self):
with mock.patch.object(utils, 'makeSingleHttpRequest') as p:
expected = {'test': 'works'}
p.return_value = ObjWithDotJson(200, expected)
v = self.client._makeHttpRequest('GET', 'http://www.example.com', {'payload': 2})
p.assert_called_once_with('GET', 'http://www.example.com',
utils.dumpJson({'payload': 2}), mock.ANY)
self.assertEqual(expected, v)
def test_success_fifth_try_status_code(self):
with mock.patch.object(utils, 'makeSingleHttpRequest') as p:
expected = {'test': 'works'}
sideEffect = [
ObjWithDotJson(500, None),
ObjWithDotJson(500, None),
ObjWithDotJson(500, None),
ObjWithDotJson(500, None),
ObjWithDotJson(200, expected)
]
p.side_effect = sideEffect
expectedCalls = [mock.call('GET', 'http://www.example.com', None, mock.ANY)
for x in range(self.client.options['maxRetries'])]
v = self.client._makeHttpRequest('GET', 'http://www.example.com', None)
p.assert_has_calls(expectedCalls)
self.assertEqual(expected, v)
def test_exhaust_retries_try_status_code(self):
with mock.patch.object(utils, 'makeSingleHttpRequest') as p:
msg = {'message': 'msg', 'test': 'works'}
sideEffect = [
ObjWithDotJson(500, msg),
ObjWithDotJson(500, msg),
ObjWithDotJson(500, msg),
ObjWithDotJson(500, msg),
ObjWithDotJson(500, msg), # exhaust retries
ObjWithDotJson(500, msg),
ObjWithDotJson(500, msg),
ObjWithDotJson(500, msg),
ObjWithDotJson(500, msg),
ObjWithDotJson(500, msg),
ObjWithDotJson(500, msg),
ObjWithDotJson(200, {'got this': 'wrong'})
]
p.side_effect = sideEffect
expectedCalls = [mock.call('GET', 'http://www.example.com', None, mock.ANY)
for x in range(self.client.options['maxRetries'] + 1)]
with self.assertRaises(exc.TaskclusterRestFailure):
try:
self.client._makeHttpRequest('GET', 'http://www.example.com', None)
except exc.TaskclusterRestFailure as err:
self.assertEqual('msg', str(err))
self.assertEqual(500, err.status_code)
self.assertEqual(msg, err.body)
raise err
p.assert_has_calls(expectedCalls)
def test_success_fifth_try_connection_errors(self):
with mock.patch.object(utils, 'makeSingleHttpRequest') as p:
expected = {'test': 'works'}
sideEffect = [
requests.exceptions.RequestException,
requests.exceptions.RequestException,
requests.exceptions.RequestException,
requests.exceptions.RequestException,
ObjWithDotJson(200, expected)
]
p.side_effect = sideEffect
expectedCalls = [mock.call('GET', 'http://www.example.com', None, mock.ANY)
for x in range(self.client.options['maxRetries'])]
v = self.client._makeHttpRequest('GET', 'http://www.example.com', None)
p.assert_has_calls(expectedCalls)
self.assertEqual(expected, v)
def test_failure_status_code(self):
with mock.patch.object(utils, 'makeSingleHttpRequest') as p:
p.return_value = ObjWithDotJson(500, None)
expectedCalls = [mock.call('GET', 'http://www.example.com', None, mock.ANY)
for x in range(self.client.options['maxRetries'])]
with self.assertRaises(exc.TaskclusterRestFailure):
self.client._makeHttpRequest('GET', 'http://www.example.com', None)
p.assert_has_calls(expectedCalls)
def test_failure_connection_errors(self):
with mock.patch.object(utils, 'makeSingleHttpRequest') as p:
p.side_effect = requests.exceptions.RequestException
expectedCalls = [mock.call('GET', 'http://www.example.com', None, mock.ANY)
for x in range(self.client.options['maxRetries'])]
with self.assertRaises(exc.TaskclusterConnectionError):
self.client._makeHttpRequest('GET', 'http://www.example.com', None)
p.assert_has_calls(expectedCalls)
class TestOptions(ClientTest):
def setUp(self):
ClientTest.setUp(self)
self.clientClass2 = subject.createApiClient('testApi', base.createApiRef())
self.client2 = self.clientClass2({'baseUrl': 'http://notlocalhost:5888/v2'})
def test_defaults_should_work(self):
self.assertEqual(self.client.options['baseUrl'], 'https://fake.taskcluster.net/v1')
self.assertEqual(self.client2.options['baseUrl'], 'http://notlocalhost:5888/v2')
def test_change_default_doesnt_change_previous_instances(self):
prevMaxRetries = subject._defaultConfig['maxRetries']
with mock.patch.dict(subject._defaultConfig, {'maxRetries': prevMaxRetries + 1}):
self.assertEqual(self.client.options['maxRetries'], prevMaxRetries)
def test_credentials_which_cannot_be_encoded_in_unicode_work(self):
badCredentials = {
'accessToken': u"\U0001F4A9",
'clientId': u"\U0001F4A9",
}
with self.assertRaises(exc.TaskclusterAuthFailure):
subject.Auth({'credentials': badCredentials})
class TestMakeApiCall(ClientTest):
""" This class covers both the _makeApiCall function logic as well as the
logic involved in setting up the api member functions since these are very
related things"""
def setUp(self):
ClientTest.setUp(self)
patcher = mock.patch.object(self.client, 'NEVER_CALL_ME')
never_call = patcher.start()
never_call.side_effect = AssertionError
self.addCleanup(never_call.stop)
def test_creates_methods(self):
self.assertIsInstance(self.client.no_args_no_input, types.MethodType)
def test_methods_setup_correctly(self):
# Because of how scoping works, I've had trouble where the last API Entry
# dict is used for all entires, which is wrong. This is to make sure that
# the scoping stuff isn't broken
self.assertIsNot(self.client.NEVER_CALL_ME, self.client.no_args_no_input)
def test_hits_no_args_no_input(self):
expected = 'works'
with mock.patch.object(self.client, '_makeHttpRequest') as patcher:
patcher.return_value = expected
actual = self.client.no_args_no_input()
self.assertEqual(expected, actual)
patcher.assert_called_once_with('get', 'no_args_no_input', None)
def test_hits_two_args_no_input(self):
expected = 'works'
with mock.patch.object(self.client, '_makeHttpRequest') as patcher:
patcher.return_value = expected
actual = self.client.two_args_no_input('argone', 'argtwo')
self.assertEqual(expected, actual)
patcher.assert_called_once_with('get', 'two_args_no_input/argone/argtwo', None)
def test_hits_no_args_with_input(self):
expected = 'works'
with mock.patch.object(self.client, '_makeHttpRequest') as patcher:
patcher.return_value = expected
actual = self.client.no_args_with_input({})
self.assertEqual(expected, actual)
patcher.assert_called_once_with('get', 'no_args_with_input', {})
def test_hits_two_args_with_input(self):
expected = 'works'
with mock.patch.object(self.client, '_makeHttpRequest') as patcher:
patcher.return_value = expected
actual = self.client.two_args_with_input('argone', 'argtwo', {})
self.assertEqual(expected, actual)
patcher.assert_called_once_with('get', 'two_args_with_input/argone/argtwo', {})
def test_input_is_procesed(self):
expected = 'works'
expected_input = {'test': 'does work'}
with mock.patch.object(self.client, '_makeHttpRequest') as patcher:
patcher.return_value = expected
actual = self.client.no_args_with_input(expected_input)
self.assertEqual(expected, actual)
patcher.assert_called_once_with('get', 'no_args_with_input', expected_input)
def test_kwargs(self):
expected = 'works'
with mock.patch.object(self.client, '_makeHttpRequest') as patcher:
patcher.return_value = expected
actual = self.client.two_args_with_input({}, arg0='argone', arg1='argtwo')
self.assertEqual(expected, actual)
patcher.assert_called_once_with('get', 'two_args_with_input/argone/argtwo', {})
def test_mixing_kw_and_positional_fails(self):
with self.assertRaises(exc.TaskclusterFailure):
self.client.two_args_no_input('arg1', arg2='arg2')
def test_missing_input_raises(self):
with self.assertRaises(exc.TaskclusterFailure):
self.client.no_args_with_input()
# TODO: I should run the same things through the node client and compare the output
class TestTopicExchange(ClientTest):
def test_string_pass_through(self):
expected = 'johnwrotethis'
actual = self.client.topicName(expected)
self.assertEqual(expected, actual['routingKeyPattern'])
def test_exchange(self):
expected = 'test/v1/topicExchange'
actual = self.client.topicName('')
self.assertEqual(expected, actual['exchange'])
def test_exchange_trailing_slash(self):
self.client.options['exchangePrefix'] = 'test/v1/'
expected = 'test/v1/topicExchange'
actual = self.client.topicName('')
self.assertEqual(expected, actual['exchange'])
def test_constant(self):
expected = 'primary.*.*.*.#'
actual = self.client.topicName({})
self.assertEqual(expected, actual['routingKeyPattern'])
def test_does_insertion(self):
expected = 'primary.*.value2.*.#'
actual = self.client.topicName({'norm2': 'value2'})
self.assertEqual(expected, actual['routingKeyPattern'])
def test_too_many_star_args(self):
with self.assertRaises(exc.TaskclusterTopicExchangeFailure):
self.client.topicName({'taskId': '123'}, 'another')
def test_both_args_and_kwargs(self):
with self.assertRaises(exc.TaskclusterTopicExchangeFailure):
self.client.topicName({'taskId': '123'}, taskId='123')
def test_no_args_no_kwargs(self):
expected = 'primary.*.*.*.#'
actual = self.client.topicName()
self.assertEqual(expected, actual['routingKeyPattern'])
actual = self.client.topicName({})
self.assertEqual(expected, actual['routingKeyPattern'])
class TestBuildUrl(ClientTest):
def test_build_url_positional(self):
expected = 'https://fake.taskcluster.net/v1/two_args_no_input/arg0/arg1'
actual = self.client.buildUrl('two_args_no_input', 'arg0', 'arg1')
self.assertEqual(expected, actual)
def test_build_url_keyword(self):
expected = 'https://fake.taskcluster.net/v1/two_args_no_input/arg0/arg1'
actual = self.client.buildUrl('two_args_no_input', arg0='arg0', arg1='arg1')
self.assertEqual(expected, actual)
def test_build_url_query_string(self):
expected = 'https://fake.taskcluster.net/v1/two_args_no_input/arg0/arg1?qs0=1'
actual = self.client.buildUrl(
'two_args_no_input',
params={
'arg0': 'arg0',
'arg1': 'arg1'
},
query={'qs0': 1}
)
self.assertEqual(expected, actual)
def test_fails_to_build_url_for_missing_method(self):
with self.assertRaises(exc.TaskclusterFailure):
self.client.buildUrl('non-existing')
def test_fails_to_build_not_enough_args(self):
with self.assertRaises(exc.TaskclusterFailure):
self.client.buildUrl('two_args_no_input', 'not-enough-args')
class TestBuildSignedUrl(ClientTest):
def test_builds_surl_positional(self):
expected = 'https://fake.taskcluster.net/v1/two_args_no_input/arg0/arg1?bewit=X'
actual = self.client.buildSignedUrl('two_args_no_input', 'arg0', 'arg1')
actual = re.sub('bewit=[^&]*', 'bewit=X', actual)
self.assertEqual(expected, actual)
def test_builds_surl_keyword(self):
expected = 'https://fake.taskcluster.net/v1/two_args_no_input/arg0/arg1?bewit=X'
actual = self.client.buildSignedUrl('two_args_no_input', arg0='arg0', arg1='arg1')
actual = re.sub('bewit=[^&]*', 'bewit=X', actual)
self.assertEqual(expected, actual)
class TestMockHttpCalls(ClientTest):
"""Test entire calls down to the requests layer, ensuring they have
well-formed URLs and handle request and response bodies properly. This
verifies that we can call real methods with both position and keyword
args"""
def setUp(self):
ClientTest.setUp(self)
self.fakeResponse = ''
def fakeSite(url, request):
self.gotUrl = urllib.parse.urlunsplit(url)
self.gotRequest = request
return self.fakeResponse
self.fakeSite = fakeSite
def test_no_args_no_input(self):
with httmock.HTTMock(self.fakeSite):
self.client.no_args_no_input()
self.assertEqual(self.gotUrl, 'https://fake.taskcluster.net/v1/no_args_no_input')
def test_two_args_no_input(self):
with httmock.HTTMock(self.fakeSite):
self.client.two_args_no_input('1', '2')
self.assertEqual(self.gotUrl, 'https://fake.taskcluster.net/v1/two_args_no_input/1/2')
def test_no_args_with_input(self):
with httmock.HTTMock(self.fakeSite):
self.client.no_args_with_input({'x': 1})
self.assertEqual(self.gotUrl, 'https://fake.taskcluster.net/v1/no_args_with_input')
self.assertEqual(json.loads(self.gotRequest.body), {"x": 1})
def test_no_args_with_empty_input(self):
with httmock.HTTMock(self.fakeSite):
self.client.no_args_with_input({})
self.assertEqual(self.gotUrl, 'https://fake.taskcluster.net/v1/no_args_with_input')
self.assertEqual(json.loads(self.gotRequest.body), {})
def test_two_args_with_input(self):
with httmock.HTTMock(self.fakeSite):
self.client.two_args_with_input('a', 'b', {'x': 1})
self.assertEqual(self.gotUrl,
'https://fake.taskcluster.net/v1/two_args_with_input/a/b')
self.assertEqual(json.loads(self.gotRequest.body), {"x": 1})
def test_kwargs(self):
with httmock.HTTMock(self.fakeSite):
self.client.two_args_with_input(
{'x': 1}, arg0='a', arg1='b')
self.assertEqual(self.gotUrl,
'https://fake.taskcluster.net/v1/two_args_with_input/a/b')
self.assertEqual(json.loads(self.gotRequest.body), {"x": 1})
@unittest.skipIf(os.environ.get('NO_TESTS_OVER_WIRE'), "Skipping tests over wire")
class TestAuthentication(base.TCTest):
def test_no_creds_needed(self):
"""we can call methods which require no scopes with an unauthenticated
client"""
# mock this request so we don't depend on the existence of a client
@httmock.all_requests
def auth_response(url, request):
self.assertEqual(urllib.parse.urlunsplit(url),
'https://auth.taskcluster.net/v1/clients/abc')
self.failIf('Authorization' in request.headers)
headers = {'content-type': 'application/json'}
content = {"clientId": "abc"}
return httmock.response(200, content, headers, None, 5, request)
with httmock.HTTMock(auth_response):
client = subject.Auth({"credentials": {}})
result = client.client('abc')
self.assertEqual(result, {"clientId": "abc"})
def test_permacred_simple(self):
"""we can call methods which require authentication with valid
permacreds"""
client = subject.Auth({
'credentials': {
'clientId': 'tester',
'accessToken': 'no-secret',
}
})
result = client.testAuthenticate({
'clientScopes': ['test:a'],
'requiredScopes': ['test:a'],
})
self.assertEqual(result, {'scopes': ['test:a'], 'clientId': 'tester'})
def test_permacred_simple_authorizedScopes(self):
client = subject.Auth({
'credentials': {
'clientId': 'tester',
'accessToken': 'no-secret',
},
'authorizedScopes': ['test:a', 'test:b'],
})
result = client.testAuthenticate({
'clientScopes': ['test:*'],
'requiredScopes': ['test:a'],
})
self.assertEqual(result, {'scopes': ['test:a', 'test:b'],
'clientId': 'tester'})
def test_unicode_permacred_simple(self):
"""Unicode strings that encode to ASCII in credentials do not cause issues"""
client = subject.Auth({
'credentials': {
'clientId': u'tester',
'accessToken': u'no-secret',
}
})
result = client.testAuthenticate({
'clientScopes': ['test:a'],
'requiredScopes': ['test:a'],
})
self.assertEqual(result, {'scopes': ['test:a'], 'clientId': 'tester'})
def test_invalid_unicode_permacred_simple(self):
"""Unicode strings that do not encode to ASCII in credentials cause issues"""
with self.assertRaises(exc.TaskclusterAuthFailure):
subject.Auth({
'credentials': {
'clientId': u"\U0001F4A9",
'accessToken': u"\U0001F4A9",
}
})
def test_permacred_insufficient_scopes(self):
"""A call with insufficient scopes results in an error"""
client = subject.Auth({
'credentials': {
'clientId': 'tester',
'accessToken': 'no-secret',
}
})
# TODO: this should be TaskclsuterAuthFailure; most likely the client
# is expecting AuthorizationFailure instead of AuthenticationFailure
with self.assertRaises(exc.TaskclusterRestFailure):
client.testAuthenticate({
'clientScopes': ['test:*'],
'requiredScopes': ['something-more'],
})
def test_temporary_credentials(self):
"""we can call methods which require authentication with temporary
credentials generated by python client"""
tempCred = subject.createTemporaryCredentials(
'tester',
'no-secret',
datetime.datetime.utcnow(),
datetime.datetime.utcnow() + datetime.timedelta(hours=1),
['test:xyz'],
)
client = subject.Auth({
'credentials': tempCred,
})
result = client.testAuthenticate({
'clientScopes': ['test:*'],
'requiredScopes': ['test:xyz'],
})
self.assertEqual(result, {'scopes': ['test:xyz'], 'clientId': 'tester'})
def test_named_temporary_credentials(self):
tempCred = subject.createTemporaryCredentials(
'tester',
'no-secret',
datetime.datetime.utcnow(),
datetime.datetime.utcnow() + datetime.timedelta(hours=1),
['test:xyz'],
name='credName'
)
client = subject.Auth({
'credentials': tempCred,
})
result = client.testAuthenticate({
'clientScopes': ['test:*', 'auth:create-client:credName'],
'requiredScopes': ['test:xyz'],
})
self.assertEqual(result, {'scopes': ['test:xyz'], 'clientId': 'credName'})
def test_temporary_credentials_authorizedScopes(self):
tempCred = subject.createTemporaryCredentials(
'tester',
'no-secret',
datetime.datetime.utcnow(),
datetime.datetime.utcnow() + datetime.timedelta(hours=1),
['test:xyz:*'],
)
client = subject.Auth({
'credentials': tempCred,
'authorizedScopes': ['test:xyz:abc'],
})
result = client.testAuthenticate({
'clientScopes': ['test:*'],
'requiredScopes': ['test:xyz:abc'],
})
self.assertEqual(result, {'scopes': ['test:xyz:abc'],
'clientId': 'tester'})
def test_named_temporary_credentials_authorizedScopes(self):
tempCred = subject.createTemporaryCredentials(
'tester',
'no-secret',
datetime.datetime.utcnow(),
datetime.datetime.utcnow() + datetime.timedelta(hours=1),
['test:xyz:*'],
name='credName'
)
client = subject.Auth({
'credentials': tempCred,
'authorizedScopes': ['test:xyz:abc'],
})
result = client.testAuthenticate({
'clientScopes': ['test:*', 'auth:create-client:credName'],
'requiredScopes': ['test:xyz:abc'],
})
self.assertEqual(result, {'scopes': ['test:xyz:abc'],
'clientId': 'credName'})
def test_signed_url(self):
"""we can use a signed url built with the python client"""
client = subject.Auth({
'credentials': {
'clientId': 'tester',
'accessToken': 'no-secret',
}
})
signedUrl = client.buildSignedUrl('testAuthenticateGet')
response = requests.get(signedUrl)
response.raise_for_status()
response = response.json()
response['scopes'].sort()
self.assertEqual(response, {
'scopes': sorted(['test:*', u'auth:create-client:test:*']),
'clientId': 'tester',
})
def test_signed_url_bad_credentials(self):
client = subject.Auth({
'credentials': {
'clientId': 'tester',
'accessToken': 'wrong-secret',
}
})
signedUrl = client.buildSignedUrl('testAuthenticateGet')
response = requests.get(signedUrl)
with self.assertRaises(requests.exceptions.RequestException):
response.raise_for_status()
self.assertEqual(401, response.status_code)
def test_temp_credentials_signed_url(self):
tempCred = subject.createTemporaryCredentials(
'tester',
'no-secret',
datetime.datetime.utcnow(),
datetime.datetime.utcnow() + datetime.timedelta(hours=1),
['test:*'],
)
client = subject.Auth({
'credentials': tempCred,
})
signedUrl = client.buildSignedUrl('testAuthenticateGet')
response = requests.get(signedUrl)
response.raise_for_status()
response = response.json()
self.assertEqual(response, {
'scopes': ['test:*'],
'clientId': 'tester',
})
def test_signed_url_authorizedScopes(self):
client = subject.Auth({
'credentials': {
'clientId': 'tester',
'accessToken': 'no-secret',
},
'authorizedScopes': ['test:authenticate-get'],
})
signedUrl = client.buildSignedUrl('testAuthenticateGet')
response = requests.get(signedUrl)
response.raise_for_status()
response = response.json()
self.assertEqual(response, {
'scopes': ['test:authenticate-get'],
'clientId': 'tester',
})
def test_temp_credentials_signed_url_authorizedScopes(self):
tempCred = subject.createTemporaryCredentials(
'tester',
'no-secret',
datetime.datetime.utcnow(),
datetime.datetime.utcnow() + datetime.timedelta(hours=1),
['test:*'],
)
client = subject.Auth({
'credentials': tempCred,
'authorizedScopes': ['test:authenticate-get'],
})
signedUrl = client.buildSignedUrl('testAuthenticateGet')
response = requests.get(signedUrl)
response.raise_for_status()
response = response.json()
self.assertEqual(response, {
'scopes': ['test:authenticate-get'],
'clientId': 'tester',
})

View File

@ -0,0 +1,340 @@
import datetime
import uuid
import taskcluster.utils as subject
import dateutil.parser
import httmock
import mock
import requests
import base
from unittest import TestCase
from hypothesis import given
import hypothesis.strategies as st
# https://docs.python.org/2/library/datetime.html#tzinfo-objects
class UTC(datetime.tzinfo):
"""UTC"""
def utcoffset(self, dt):
return datetime.timedelta(0)
def tzname(self, dt):
return 'UTC'
def dst(self, dt):
return datetime.timedelta(0)
utc = UTC()
class StringDateTests(base.TCTest):
def test_naive(self):
dateObj = datetime.datetime(
year=2000,
month=1,
day=1,
hour=1,
minute=1,
second=1
)
expected = '2000-01-01T01:01:01Z'
actual = subject.stringDate(dateObj)
self.assertEqual(expected, actual)
def test_aware(self):
dateObj = datetime.datetime(
year=2000,
month=1,
day=1,
hour=1,
minute=1,
second=1,
tzinfo=utc
)
expected = '2000-01-01T01:01:01Z'
actual = subject.stringDate(dateObj)
self.assertEqual(expected, actual)
class DumpJsonTests(base.TCTest):
def test_has_no_spaces(self):
expected = [
'{"test":"works","doesit":"yes"}',
'{"doesit":"yes","test":"works"}'
]
actual = subject.dumpJson({'test': 'works', 'doesit': 'yes'})
self.assertTrue(actual in expected)
def test_serializes_naive_date(self):
dateObj = datetime.datetime(
year=2000,
month=1,
day=1,
hour=1,
minute=1,
second=1
)
expected = '{"date":"2000-01-01T01:01:01Z"}'
actual = subject.dumpJson({'date': dateObj})
self.assertEqual(expected, actual)
def test_serializes_aware_date(self):
dateObj = datetime.datetime(
year=2000,
month=1,
day=1,
hour=1,
minute=1,
second=1,
tzinfo=utc
)
expected = '{"date":"2000-01-01T01:01:01Z"}'
actual = subject.dumpJson({'date': dateObj})
self.assertEqual(expected, actual)
class TestBase64Utils(base.TCTest):
def test_encode_string_for_b64_header(self):
# Really long strings trigger newlines every 72 ch
expected = 'YWJjZGVm' * 500
expected = expected.encode('ascii')
actual = subject.encodeStringForB64Header('abcdef' * 500)
self.assertEqual(expected, actual)
def test_makeb64urlsafe(self):
expected = b'-_'
actual = subject.makeB64UrlSafe('+/')
self.assertEqual(expected, actual)
def test_makeb64urlunsafe(self):
expected = b'+/'
actual = subject.makeB64UrlUnsafe('-_')
self.assertEqual(expected, actual)
class TestSlugId(base.TCTest):
def test_slug_id_is_always_nice(self):
with mock.patch('uuid.uuid4') as p:
# first bit of uuid set, which should get unset
p.return_value = uuid.UUID('bed97923-7616-4ec8-85ed-4b695f67ac2e')
expected = b'Ptl5I3YWTsiF7UtpX2esLg'
actual = subject.slugId()
self.assertEqual(expected, actual)
def test_slug_id_nice_stays_nice(self):
with mock.patch('uuid.uuid4') as p:
# first bit of uuid unset, should remain unset
p.return_value = uuid.UUID('3ed97923-7616-4ec8-85ed-4b695f67ac2e')
expected = b'Ptl5I3YWTsiF7UtpX2esLg'
actual = subject.slugId()
self.assertEqual(expected, actual)
class TestMakeSingleHttpRequest(base.TCTest):
def test_success_no_payload(self):
@httmock.all_requests
def response_content(url, request):
return {'status_code': 200, 'content': {}}
with httmock.HTTMock(response_content):
d = subject.makeSingleHttpRequest('GET', 'http://www.example.com', {}, {})
self.assertEqual(d.json(), {})
self.assertEqual(d.status_code, 200)
d.raise_for_status()
def test_success_payload(self):
@httmock.all_requests
def response_content(url, request):
self.assertEqual(request.body, 'i=j')
return {'status_code': 200, 'content': {'k': 'l'}}
with httmock.HTTMock(response_content):
d = subject.makeSingleHttpRequest('GET', 'http://www.example.com', {'i': 'j'}, {})
self.assertEqual(d.json(), {'k': 'l'})
self.assertEqual(d.status_code, 200)
d.raise_for_status()
def test_failure(self):
@httmock.all_requests
def response_content(url, requet):
return {'status_code': 404}
with httmock.HTTMock(response_content):
d = subject.makeSingleHttpRequest('GET', 'http://www.example.com', {}, {})
with self.assertRaises(requests.exceptions.RequestException):
d.raise_for_status()
class TestPutfile(base.TCTest):
def test_success_put_file(self):
with mock.patch.object(subject, 'makeSingleHttpRequest') as p:
class FakeResp:
status_code = 200
def raise_for_status(self):
pass
p.return_value = FakeResp()
subject.putFile('setup.py', 'http://www.example.com', 'text/plain')
p.assert_called_once_with('put', 'http://www.example.com', mock.ANY, mock.ANY, mock.ANY)
class TestStableSlugIdClosure(TestCase):
@given(st.text())
def test_repeat(self, text):
s = subject.stableSlugId()
self.assertEqual(s(text), s(text))
def test_not_equal(self):
s = subject.stableSlugId()
self.assertNotEqual(s("first"), s("second"))
@given(st.text())
def test_invalidate(self, text):
s1 = subject.stableSlugId()
s2 = subject.stableSlugId()
self.assertNotEqual(s1(text), s2(text))
class TestFromNow(TestCase):
examples = [
{"expr": '1 hour', "from": '2017-01-19T16:27:20.974Z', "result": '2017-01-19T17:27:20.974Z'},
{"expr": '3h', "from": '2017-01-19T16:27:20.974Z', "result": '2017-01-19T19:27:20.974Z'},
{"expr": '1 hours', "from": '2017-01-19T16:27:20.974Z', "result": '2017-01-19T17:27:20.974Z'},
{"expr": '-1 hour', "from": '2017-01-19T16:27:20.974Z', "result": '2017-01-19T15:27:20.974Z'},
{"expr": '1 m', "from": '2017-01-19T16:27:20.974Z', "result": '2017-01-19T16:28:20.974Z'},
{"expr": '1m', "from": '2017-01-19T16:27:20.974Z', "result": '2017-01-19T16:28:20.974Z'},
{"expr": '12 min', "from": '2017-01-19T16:27:20.974Z', "result": '2017-01-19T16:39:20.974Z'},
{"expr": '12min', "from": '2017-01-19T16:27:20.974Z', "result": '2017-01-19T16:39:20.974Z'},
{"expr": '11m', "from": '2017-01-19T16:27:20.974Z', "result": '2017-01-19T16:38:20.974Z'},
{"expr": '11 m', "from": '2017-01-19T16:27:20.974Z', "result": '2017-01-19T16:38:20.974Z'},
{"expr": '1 day', "from": '2017-01-19T16:27:20.974Z', "result": '2017-01-20T16:27:20.974Z'},
{"expr": '2 days', "from": '2017-01-19T16:27:20.974Z', "result": '2017-01-21T16:27:20.974Z'},
{"expr": '1 second', "from": '2017-01-19T16:27:20.974Z', "result": '2017-01-19T16:27:21.974Z'},
{"expr": '1 week', "from": '2017-01-19T16:27:20.974Z', "result": '2017-01-26T16:27:20.974Z'},
{"expr": '1 month', "from": '2017-01-19T16:27:20.974Z', "result": '2017-02-18T16:27:20.974Z'},
{"expr": '30 mo', "from": '2017-01-19T16:27:20.974Z', "result": '2019-07-08T16:27:20.974Z'},
{"expr": '-30 mo', "from": '2017-01-19T16:27:20.974Z', "result": '2014-08-03T16:27:20.974Z'},
{"expr": '1 year', "from": '2017-01-19T16:27:20.974Z', "result": '2018-01-19T16:27:20.974Z'},
]
def test_examples(self):
for example in self.examples:
from_ = dateutil.parser.parse(example['from'])
res = dateutil.parser.parse(example['result'])
self.assertEqual(subject.fromNow(example['expr'], from_), res)
class TestScopeMatch(TestCase):
def assertScopeMatch(self, assumed, requiredScopeSets, expected):
try:
result = subject.scopeMatch(assumed, requiredScopeSets)
self.assertEqual(result, expected)
except:
if expected != 'exception':
raise
def test_single_exact_match_string_except_1(self):
self.assertScopeMatch(["foo:bar"], "foo:bar", "exception")
def test_single_exact_match_string_except_2(self):
self.assertScopeMatch(["foo:bar"], ["foo:bar"], "exception")
def test_single_exact_match_string(self):
self.assertScopeMatch(["foo:bar"], [["foo:bar"]], True)
def test_empty_string_in_scopesets_except_1(self):
self.assertScopeMatch(["foo:bar"], "", "exception")
def test_empty_string_in_scopesets_except_2(self):
self.assertScopeMatch(["foo:bar"], [""], "exception")
def test_empty_string_in_scopesets(self):
self.assertScopeMatch(["foo:bar"], [[""]], False)
def test_prefix(self):
self.assertScopeMatch(["foo:*"], [["foo:bar"]], True)
def test_star_not_at_end(self):
self.assertScopeMatch(["foo:*:bing"], [["foo:bar:bing"]], False)
def test_star_at_beginnging(self):
self.assertScopeMatch(["*:bar"], [["foo:bar"]], False)
def test_prefix_with_no_star(self):
self.assertScopeMatch(["foo:"], [["foo:bar"]], False)
def test_star_but_not_prefix_1(self):
self.assertScopeMatch(["foo:bar:*"], [["bar:bing"]], False)
def test_star_but_not_prefix_2(self):
self.assertScopeMatch(["bar:*"], [["foo:bar:bing"]], False)
def test_disjunction_strings_except(self):
self.assertScopeMatch(["bar:*"], ["foo:x", "bar:x"], "exception")
def test_disjunction_strings_2(self):
self.assertScopeMatch(["bar:*"], [["foo:x"], ["bar:x"]], True)
def test_conjunction(self):
self.assertScopeMatch(["bar:*", "foo:x"], [["foo:x", "bar:y"]], True)
def test_empty_pattern(self):
self.assertScopeMatch([""], [["foo:bar"]], False)
def test_empty_patterns(self):
self.assertScopeMatch([], [["foo:bar"]], False)
def test_bare_star(self):
self.assertScopeMatch(["*"], [["foo:bar", "bar:bing"]], True)
def test_empty_conjunction_in_scopesets(self):
self.assertScopeMatch(["foo:bar"], [[]], True)
def test_non_string_scopesets(self):
self.assertScopeMatch(["foo:bar"], {}, "exception")
def test_non_string_scopeset(self):
self.assertScopeMatch(["foo:bar"], [{}], "exception")
def test_non_string_scope(self):
self.assertScopeMatch(["foo:bar"], [[{}]], "exception")
def test_empty_disjunction_in_scopesets(self):
self.assertScopeMatch(["foo:bar"], [], False)
class TestIsExpired(TestCase):
def test_not_expired(self):
isExpired = subject.isExpired("""
{
"version":1,
"scopes":["*"],
"start":1450740520182,
"expiry":2451000620182,
"seed":"90PyTwYxS96-lBPc0f_MqQGV-hHCUsTYWpXZilv6EqDg",
"signature":"HocA2IiCoGzjUQZbrbLSwKMXZSYWCu/hfMPCa/ovggQ="
}
""")
self.assertEqual(isExpired, False)
def test_expired(self):
# Warning we have to test with expiry: 0 as magic python spy thing
# mess up time.time() so it won't work.
isExpired = subject.isExpired("""
{
"version":1,
"scopes":["*"],
"start":1450740520182,
"expiry":0,
"seed":"90PyTwYxS96-lBPc0f_MqQGV-hHCUsTYWpXZilv6EqDg",
"signature":"HocA2IiCoGzjUQZbrbLSwKMXZSYWCu/hfMPCa/ovggQ="
}
""")
self.assertEqual(isExpired, True)