Bug 1322193 - Verify taskgraph implementations against documentation, with proper regex. Updated doc verification for fake values of kinds, parameters etc., regex optimized r=Callek

MozReview-Commit-ID: 56ZEJECbtK5

--HG--
extra : rebase_source : ad1efed52363e95040a9907c6387b4cd57bdaa49
This commit is contained in:
Hammad Akhtar 2016-12-06 12:03:36 +05:30
parent c080ecbf37
commit 079365ac41
4 changed files with 52 additions and 40 deletions

View File

@ -16,7 +16,6 @@ from .generator import TaskGraphGenerator
from .create import create_tasks
from .parameters import Parameters
from .taskgraph import TaskGraph
from .util.verifydoc import verify_docs
from taskgraph.util.templates import Templates
from taskgraph.util.time import (
@ -76,7 +75,6 @@ def taskgraph_decision(options):
"""
parameters = get_decision_parameters(options)
verify_parameters(parameters)
# create a TaskGraphGenerator instance
tgg = TaskGraphGenerator(
root_dir=options['root'],
@ -187,12 +185,3 @@ def get_action_yml(parameters):
"now": current_json_time()
})
return templates.load('action.yml', action_parameters)
def verify_parameters(parameters):
parameters_dict = dict(**parameters)
verify_docs(
filename="parameters.rst",
identifiers=parameters_dict.keys(),
appearing_as="inline-literal"
)

View File

@ -62,6 +62,8 @@ class TaskGraphGenerator(object):
self.root_dir = root_dir
self.parameters = parameters
self.verify_parameters(self.parameters)
filters = parameters.get('filters', [])
# Always add legacy target tasks method until we deprecate that API.
@ -237,6 +239,14 @@ class TaskGraphGenerator(object):
self._run_results[k] = v
return self._run_results[name]
def verify_parameters(self, parameters):
parameters_dict = dict(**parameters)
verify_docs(
filename="parameters.rst",
identifiers=parameters_dict.keys(),
appearing_as="inline-literal"
)
def verify_kinds(self, kinds):
verify_docs(
filename="kinds.rst",

View File

@ -22,7 +22,7 @@ class FakeTask(base.Task):
def load_tasks(cls, kind, path, config, parameters, loaded_tasks):
return [cls(kind=kind,
label='{}-t-{}'.format(kind, i),
attributes={'tasknum': str(i)},
attributes={'_tasknum': str(i)},
task={},
i=i)
for i in range(3)]
@ -51,7 +51,7 @@ class FakeKind(Kind):
class WithFakeKind(TaskGraphGenerator):
def _load_kinds(self):
for kind_name, deps in self.parameters['kinds']:
for kind_name, deps in self.parameters['_kinds']:
yield FakeKind(
kind_name, '/fake',
{'kind-dependencies': deps} if deps else {})
@ -59,7 +59,7 @@ class WithFakeKind(TaskGraphGenerator):
class TestGenerator(unittest.TestCase):
def maketgg(self, target_tasks=None, kinds=[('fake', [])]):
def maketgg(self, target_tasks=None, kinds=[('_fake', [])]):
FakeKind.loaded_kinds = []
self.target_tasks = target_tasks or []
@ -69,7 +69,7 @@ class TestGenerator(unittest.TestCase):
target_tasks_mod._target_task_methods['test_method'] = target_tasks_method
parameters = {
'kinds': kinds,
'_kinds': kinds,
'target_tasks_method': 'test_method',
}
@ -78,59 +78,59 @@ class TestGenerator(unittest.TestCase):
def test_kind_ordering(self):
"When task kinds depend on each other, they are loaded in postorder"
self.tgg = self.maketgg(kinds=[
('fake3', ['fake2', 'fake1']),
('fake2', ['fake1']),
('fake1', []),
('_fake3', ['_fake2', '_fake1']),
('_fake2', ['_fake1']),
('_fake1', []),
])
self.tgg._run_until('full_task_set')
self.assertEqual(FakeKind.loaded_kinds, ['fake1', 'fake2', 'fake3'])
self.assertEqual(FakeKind.loaded_kinds, ['_fake1', '_fake2', '_fake3'])
def test_full_task_set(self):
"The full_task_set property has all tasks"
self.tgg = self.maketgg()
self.assertEqual(self.tgg.full_task_set.graph,
graph.Graph({'fake-t-0', 'fake-t-1', 'fake-t-2'}, set()))
graph.Graph({'_fake-t-0', '_fake-t-1', '_fake-t-2'}, set()))
self.assertEqual(sorted(self.tgg.full_task_set.tasks.keys()),
sorted(['fake-t-0', 'fake-t-1', 'fake-t-2']))
sorted(['_fake-t-0', '_fake-t-1', '_fake-t-2']))
def test_full_task_graph(self):
"The full_task_graph property has all tasks, and links"
self.tgg = self.maketgg()
self.assertEqual(self.tgg.full_task_graph.graph,
graph.Graph({'fake-t-0', 'fake-t-1', 'fake-t-2'},
graph.Graph({'_fake-t-0', '_fake-t-1', '_fake-t-2'},
{
('fake-t-1', 'fake-t-0', 'prev'),
('fake-t-2', 'fake-t-1', 'prev'),
('_fake-t-1', '_fake-t-0', 'prev'),
('_fake-t-2', '_fake-t-1', 'prev'),
}))
self.assertEqual(sorted(self.tgg.full_task_graph.tasks.keys()),
sorted(['fake-t-0', 'fake-t-1', 'fake-t-2']))
sorted(['_fake-t-0', '_fake-t-1', '_fake-t-2']))
def test_target_task_set(self):
"The target_task_set property has the targeted tasks"
self.tgg = self.maketgg(['fake-t-1'])
self.tgg = self.maketgg(['_fake-t-1'])
self.assertEqual(self.tgg.target_task_set.graph,
graph.Graph({'fake-t-1'}, set()))
graph.Graph({'_fake-t-1'}, set()))
self.assertEqual(self.tgg.target_task_set.tasks.keys(),
['fake-t-1'])
['_fake-t-1'])
def test_target_task_graph(self):
"The target_task_graph property has the targeted tasks and deps"
self.tgg = self.maketgg(['fake-t-1'])
self.tgg = self.maketgg(['_fake-t-1'])
self.assertEqual(self.tgg.target_task_graph.graph,
graph.Graph({'fake-t-0', 'fake-t-1'},
{('fake-t-1', 'fake-t-0', 'prev')}))
graph.Graph({'_fake-t-0', '_fake-t-1'},
{('_fake-t-1', '_fake-t-0', 'prev')}))
self.assertEqual(sorted(self.tgg.target_task_graph.tasks.keys()),
sorted(['fake-t-0', 'fake-t-1']))
sorted(['_fake-t-0', '_fake-t-1']))
def test_optimized_task_graph(self):
"The optimized task graph contains task ids"
self.tgg = self.maketgg(['fake-t-2'])
self.tgg = self.maketgg(['_fake-t-2'])
tid = self.tgg.label_to_taskid
self.assertEqual(
self.tgg.optimized_task_graph.graph,
graph.Graph({tid['fake-t-0'], tid['fake-t-1'], tid['fake-t-2']}, {
(tid['fake-t-1'], tid['fake-t-0'], 'prev'),
(tid['fake-t-2'], tid['fake-t-1'], 'prev'),
graph.Graph({tid['_fake-t-0'], tid['_fake-t-1'], tid['_fake-t-2']}, {
(tid['_fake-t-1'], tid['_fake-t-0'], 'prev'),
(tid['_fake-t-2'], tid['_fake-t-1'], 'prev'),
}))
if __name__ == '__main__':

View File

@ -10,18 +10,31 @@ base_path = os.path.join(os.getcwd(), "taskcluster/docs/")
def verify_docs(filename, identifiers, appearing_as):
# We ignore identifiers starting with '_' for the sake of tests.
# Strings starting with "_" are ignored for doc verification
# hence they can be used for faking test values
with open(os.path.join(base_path, filename)) as fileObject:
doctext = "".join(fileObject.readlines())
if appearing_as == "inline-literal":
expression_list = ["``" + identifier + "``" for identifier in identifiers]
expression_list = [
"``" + identifier + "``"
for identifier in identifiers
if not identifier.startswith("_")
]
elif appearing_as == "heading":
expression_list = [identifier + "\n[-+\n*]+|[.+\n*]+" for identifier in identifiers]
expression_list = [
identifier + "\n(?:(?:(?:-+\n)+)|(?:(?:.+\n)+))"
for identifier in identifiers
if not identifier.startswith("_")
]
else:
raise Exception("appearing_as = {} not defined".format(appearing_as))
raise Exception("appearing_as = `{}` not defined".format(appearing_as))
for expression, identifier in zip(expression_list, identifiers):
match_group = re.search(expression, doctext)
if not match_group:
raise Exception(
"{}: {} missing from doc file: {}".format(appearing_as, identifier, filename)
"{}: `{}` missing from doc file: `{}`"
.format(appearing_as, identifier, filename)
)