Bug 1357001 - Part 1 - Unify validation errors in probe parsing scripts. r=chutten, f=gps

This commit is contained in:
Georg Fritzsche 2017-04-18 18:21:36 +02:00
parent d76d15db1e
commit 8e4e44f5d9
10 changed files with 124 additions and 85 deletions

View File

@ -6,7 +6,7 @@
# in a file provided as a command-line argument.
from __future__ import print_function
from shared_telemetry_utils import StringTable, static_assert
from shared_telemetry_utils import StringTable, static_assert, ParserError
import parse_events
import sys
@ -114,7 +114,11 @@ def main(output, *filenames):
# Load the event data.
if len(filenames) > 1:
raise Exception('We don\'t support loading from more than one file.')
events = parse_events.load_events(filenames[0])
try:
events = parse_events.load_events(filenames[0])
except ParserError as ex:
print("\nError processing events:\n" + str(ex) + "\n")
sys.exit(1)
# Write the scalar data file.
print(banner, file=output)

View File

@ -7,6 +7,7 @@
# The events are defined in files provided as command-line arguments.
from __future__ import print_function
from shared_telemetry_utils import ParserError
import sys
import parse_events
@ -34,7 +35,12 @@ def main(output, *filenames):
# Load the events first.
if len(filenames) > 1:
raise Exception('We don\'t support loading from more than one file.')
events = parse_events.load_events(filenames[0])
try:
events = parse_events.load_events(filenames[0])
except ParserError as ex:
print("\nError processing events:\n" + str(ex) + "\n")
sys.exit(1)
grouped = dict()
index = 0

View File

@ -6,7 +6,7 @@
# in a file provided as a command-line argument.
from __future__ import print_function
from shared_telemetry_utils import StringTable, static_assert
from shared_telemetry_utils import StringTable, static_assert, ParserError
import sys
import histogram_tools
@ -178,7 +178,11 @@ def write_debug_histogram_ranges(output, histograms):
def main(output, *filenames):
histograms = list(histogram_tools.from_files(filenames))
try:
histograms = list(histogram_tools.from_files(filenames))
except ParserError as ex:
print("\nError processing histograms:\n" + str(ex) + "\n")
sys.exit(1)
print(banner, file=output)
write_histogram_table(output, histograms)

View File

@ -13,11 +13,13 @@
# The histograms are defined in files provided as command-line arguments.
from __future__ import print_function
from shared_telemetry_utils import ParserError
import histogram_tools
import itertools
import sys
banner = """/* This file is auto-generated, see gen-histogram-enum.py. */
"""
@ -43,7 +45,12 @@ def main(output, *filenames):
print(header, file=output)
# Load the histograms.
all_histograms = list(histogram_tools.from_files(filenames))
try:
all_histograms = list(histogram_tools.from_files(filenames))
except ParserError as ex:
print("\nError processing histograms:\n" + str(ex) + "\n")
sys.exit(1)
groups = itertools.groupby(all_histograms,
lambda h: h.name().startswith("USE_COUNTER2_"))

View File

@ -6,7 +6,7 @@
# in a file provided as a command-line argument.
from __future__ import print_function
from shared_telemetry_utils import StringTable, static_assert
from shared_telemetry_utils import StringTable, static_assert, ParserError
import parse_scalars
import sys
@ -82,7 +82,12 @@ def main(output, *filenames):
# Load the scalars first.
if len(filenames) > 1:
raise Exception('We don\'t support loading from more than one file.')
scalars = parse_scalars.load_scalars(filenames[0])
try:
scalars = parse_scalars.load_scalars(filenames[0])
except ParserError as ex:
print("\nError processing scalars:\n" + str(ex) + "\n")
sys.exit(1)
# Write the scalar data file.
print(banner, file=output)

View File

@ -8,6 +8,7 @@
# The scalars are defined in files provided as command-line arguments.
from __future__ import print_function
from shared_telemetry_utils import ParserError
import sys
import parse_scalars
@ -35,7 +36,12 @@ def main(output, *filenames):
# Load the scalars first.
if len(filenames) > 1:
raise Exception('We don\'t support loading from more than one file.')
scalars = parse_scalars.load_scalars(filenames[0])
try:
scalars = parse_scalars.load_scalars(filenames[0])
except ParserError as ex:
print("\nError processing scalars:\n" + str(ex) + "\n")
sys.exit(1)
# Write the enum file.
print(banner, file=output)

View File

@ -10,6 +10,8 @@ import os
import re
import sys
from shared_telemetry_utils import ParserError
# Constants.
MAX_LABEL_LENGTH = 20
MAX_LABEL_COUNT = 100
@ -39,11 +41,7 @@ def table_dispatch(kind, table, body):
if kind in table:
return body(table[kind])
else:
raise BaseException("don't know how to handle a histogram of kind %s" % kind)
class DefinitionException(BaseException):
pass
raise ParserError("don't know how to handle a histogram of kind %s" % kind)
def linear_buckets(dmin, dmax, n_buckets):
@ -87,7 +85,7 @@ try:
for name, whitelist in whitelists.iteritems():
whitelists[name] = set(whitelist)
except ValueError, e:
raise BaseException('error parsing whitelist (%s)' % whitelist_path)
raise Exception('error parsing whitelist (%s)' % whitelist_path)
except IOError:
whitelists = None
print 'Unable to parse whitelist (%s). Assuming all histograms are acceptable.' % whitelist_path
@ -135,7 +133,7 @@ symbol that should guard C/C++ definitions associated with the histogram."""
'opt-out': 'DATASET_RELEASE_CHANNEL_OPTOUT'}
value = definition.get('releaseChannelCollection', 'opt-in')
if value not in datasets:
raise DefinitionException("unknown release channel collection policy for " + name)
raise ParserError("unknown release channel collection policy for " + name)
self._dataset = "nsITelemetry::" + datasets[value]
def name(self):
@ -250,11 +248,11 @@ associated with the histogram. Returns None if no guarding is necessary."""
def check_name(self, name):
if '#' in name:
raise ValueError('"#" not permitted for %s' % (name))
raise ParserError('"#" not permitted for %s' % (name))
# Avoid C++ identifier conflicts between histogram enums and label enum names.
if name.startswith("LABELS_"):
raise ValueError("Histogram name '%s' can not start with LABELS_" % (name))
raise ParserError("Histogram name '%s' can not start with LABELS_" % (name))
# To make it easier to generate C++ identifiers from this etc., we restrict
# the histogram names to a strict pattern.
@ -262,7 +260,7 @@ associated with the histogram. Returns None if no guarding is necessary."""
if self._strict_type_checks:
pattern = '^[a-z][a-z0-9_]+[a-z0-9]$'
if not re.match(pattern, name, re.IGNORECASE):
raise ValueError("Histogram name '%s' doesn't confirm to '%s'" % (name, pattern))
raise ParserError("Histogram name '%s' doesn't confirm to '%s'" % (name, pattern))
def check_expiration(self, name, definition):
field = 'expires_in_version'
@ -274,7 +272,7 @@ associated with the histogram. Returns None if no guarding is necessary."""
# We forbid new probes from using "expires_in_version" : "default" field/value pair.
# Old ones that use this are added to the whitelist.
if expiration == "default" and name not in whitelists['expiry_default']:
raise ValueError('New histogram "%s" cannot have "default" %s value.' % (name, field))
raise ParserError('New histogram "%s" cannot have "default" %s value.' % (name, field))
if re.match(r'^[1-9][0-9]*$', expiration):
expiration = expiration + ".0a1"
@ -290,11 +288,11 @@ associated with the histogram. Returns None if no guarding is necessary."""
invalid = filter(lambda l: len(l) > MAX_LABEL_LENGTH, labels)
if len(invalid) > 0:
raise ValueError('Label values for %s exceed length limit of %d: %s' %
raise ParserError('Label values for %s exceed length limit of %d: %s' %
(name, MAX_LABEL_LENGTH, ', '.join(invalid)))
if len(labels) > MAX_LABEL_COUNT:
raise ValueError('Label count for %s exceeds limit of %d' %
raise ParserError('Label count for %s exceeds limit of %d' %
(name, MAX_LABEL_COUNT))
# To make it easier to generate C++ identifiers from this etc., we restrict
@ -302,7 +300,7 @@ associated with the histogram. Returns None if no guarding is necessary."""
pattern = '^[a-z][a-z0-9_]+[a-z0-9]$'
invalid = filter(lambda l: not re.match(pattern, l, re.IGNORECASE), labels)
if len(invalid) > 0:
raise ValueError('Label values for %s are not matching pattern "%s": %s' %
raise ParserError('Label values for %s are not matching pattern "%s": %s' %
(name, pattern, ', '.join(invalid)))
def check_whitelisted_kind(self, name, definition):
@ -323,11 +321,11 @@ associated with the histogram. Returns None if no guarding is necessary."""
if not android_cpp_guard and \
hist_kind in ["flag", "count"] and \
name not in whitelists["kind"]:
raise KeyError(('New "%s" histograms are not supported on Desktop, you should'
' use scalars instead: %s'
' Are you trying to add a histogram on Android?'
' Add "cpp_guard": "ANDROID" to your histogram definition.')
% (hist_kind, DOC_URL))
raise ParserError(('New "%s" histograms are not supported on Desktop, you should'
' use scalars instead: %s'
' Are you trying to add a histogram on Android?'
' Add "cpp_guard": "ANDROID" to your histogram definition.')
% (hist_kind, DOC_URL))
# Check for the presence of fields that old histograms are whitelisted for.
def check_whitelistable_fields(self, name, definition):
@ -343,10 +341,10 @@ associated with the histogram. Returns None if no guarding is necessary."""
for field in ['alert_emails', 'bug_numbers']:
if field not in definition and name not in whitelists[field]:
raise KeyError('New histogram "%s" must have a %s field.' % (name, field))
raise ParserError('New histogram "%s" must have a %s field.' % (name, field))
if field in definition and name in whitelists[field]:
msg = 'Should remove histogram "%s" from the whitelist for "%s" in histogram-whitelists.json'
raise KeyError(msg % (name, field))
raise ParserError(msg % (name, field))
def check_field_types(self, name, definition):
# Define expected types for the histogram properties.
@ -395,21 +393,21 @@ associated with the histogram. Returns None if no guarding is necessary."""
if key not in definition:
continue
if not isinstance(definition[key], key_type):
raise ValueError('value for key "{0}" in Histogram "{1}" should be {2}'
raise ParserError('value for key "{0}" in Histogram "{1}" should be {2}'
.format(key, name, nice_type_name(key_type)))
for key, key_type in type_checked_list_fields.iteritems():
if key not in definition:
continue
if not all(isinstance(x, key_type) for x in definition[key]):
raise ValueError('all values for list "{0}" in Histogram "{1}" should be {2}'
raise ParserError('all values for list "{0}" in Histogram "{1}" should be {2}'
.format(key, name, nice_type_name(key_type)))
@staticmethod
def check_keys(name, definition, allowed_keys):
for key in definition.iterkeys():
if key not in allowed_keys:
raise KeyError('%s not permitted for %s' % (key, name))
raise ParserError('%s not permitted for %s' % (key, name))
def set_bucket_parameters(self, low, high, n_buckets):
self._low = low
@ -417,7 +415,7 @@ associated with the histogram. Returns None if no guarding is necessary."""
self._n_buckets = n_buckets
if whitelists is not None and self._n_buckets > 100 and type(self._n_buckets) is int:
if self._name not in whitelists['n_buckets']:
raise KeyError('New histogram "%s" is not permitted to have more than 100 buckets. '
raise ParserError('New histogram "%s" is not permitted to have more than 100 buckets. '
'Histograms with large numbers of buckets use disproportionately high amounts of resources. '
'Contact the Telemetry team (e.g. in #telemetry) if you think an exception ought to be made.' % self._name)
@ -454,12 +452,12 @@ associated with the histogram. Returns None if no guarding is necessary."""
# This hook function loads the histograms into an OrderedDict.
# It will raise a ValueError if duplicate keys are found.
# It will raise a ParserError if duplicate keys are found.
def load_histograms_into_dict(ordered_pairs):
d = collections.OrderedDict()
for key, value in ordered_pairs:
if key in d:
raise ValueError("Found duplicate key in Histograms file: %s" % key)
raise ParserError("Found duplicate key in Histograms file: %s" % key)
d[key] = value
return d
@ -473,7 +471,7 @@ def from_Histograms_json(filename):
try:
histograms = json.load(f, object_pairs_hook=load_histograms_into_dict)
except ValueError, e:
raise BaseException("error parsing histograms in %s: %s" % (filename, e.message))
raise ParserError("error parsing histograms in %s: %s" % (filename, e.message))
return histograms
@ -534,11 +532,11 @@ the histograms defined in filenames.
# all_histograms stable, which makes ordering in generated files
# stable, which makes builds more deterministic.
if not isinstance(histograms, OrderedDict):
raise BaseException("histogram parser didn't provide an OrderedDict")
raise ParserError("histogram parser didn't provide an OrderedDict")
for (name, definition) in histograms.iteritems():
if name in all_histograms:
raise DefinitionException("duplicate histogram name %s" % name)
raise ParserError("duplicate histogram name %s" % name)
all_histograms[name] = definition
# We require that all USE_COUNTER2_* histograms be defined in a contiguous
@ -550,7 +548,7 @@ the histograms defined in filenames.
upper_bound = use_counter_indices[-1][0]
n_counters = upper_bound - lower_bound + 1
if n_counters != len(use_counter_indices):
raise DefinitionException("use counter histograms must be defined in a contiguous block")
raise ParserError("use counter histograms must be defined in a contiguous block")
# Check that histograms that were removed from Histograms.json etc. are also removed from the whitelists.
if whitelists is not None:
@ -558,7 +556,7 @@ the histograms defined in filenames.
orphaned = set(all_whitelist_entries) - set(all_histograms.keys())
if len(orphaned) > 0:
msg = 'The following entries are orphaned and should be removed from histogram-whitelists.json: %s'
raise BaseException(msg % (', '.join(sorted(orphaned))))
raise ParserError(msg % (', '.join(sorted(orphaned))))
for (name, definition) in all_histograms.iteritems():
yield Histogram(name, definition, strict_type_checks=True)

View File

@ -9,6 +9,8 @@ import datetime
import string
import shared_telemetry_utils as utils
from shared_telemetry_utils import ParserError
MAX_CATEGORY_NAME_LENGTH = 30
MAX_METHOD_NAME_LENGTH = 20
MAX_OBJECT_NAME_LENGTH = 20
@ -44,7 +46,7 @@ class AtomicTypeChecker:
def check(self, identifier, key, value):
if not isinstance(value, self.instance_type):
raise ValueError("%s: failed type check for %s - expected %s, got %s" %
raise ParserError("%s: failed type check for %s - expected %s, got %s" %
(identifier, key, nice_type_name(self.instance_type),
nice_type_name(type(value))))
@ -53,13 +55,13 @@ class MultiTypeChecker:
"""Validate a simple value against a list of possible types"""
def __init__(self, *instance_types):
if not instance_types:
raise ValueError("At least one instance type is required")
raise ParserError("At least one instance type is required")
self.instance_types = instance_types
def check(self, identifier, key, value):
if not any(isinstance(value, i) for i in self.instance_types):
raise ValueError("%s: failed type check for %s - got %s, expected one of: %s," %
raise ParserError("%s: failed type check for %s - got %s, expected one of: %s," %
(identifier, key,
nice_type_name(type(value)),
" or ".join(map(nice_type_name, self.instance_types))))
@ -72,12 +74,12 @@ class ListTypeChecker:
def check(self, identifier, key, value):
if len(value) < 1:
raise ValueError("%s: failed check for %s - list should not be empty" %
raise ParserError("%s: failed check for %s - list should not be empty" %
(identifier, key))
for x in value:
if not isinstance(x, self.instance_type):
raise ValueError("%s: failed type check for %s - expected list value type %s, got"
raise ParserError("%s: failed type check for %s - expected list value type %s, got"
" %s" % (identifier, key, nice_type_name(self.instance_type),
nice_type_name(type(x))))
@ -90,18 +92,18 @@ class DictTypeChecker:
def check(self, identifier, key, value):
if len(value.keys()) < 1:
raise ValueError("%s: failed check for %s - dict should not be empty" %
raise ParserError("%s: failed check for %s - dict should not be empty" %
(identifier, key))
for x in value.iterkeys():
if not isinstance(x, self.keys_instance_type):
raise ValueError("%s: failed dict type check for %s - expected key type %s, got "
raise ParserError("%s: failed dict type check for %s - expected key type %s, got "
"%s" %
(identifier, key,
nice_type_name(self.keys_instance_type),
nice_type_name(type(x))))
for k, v in value.iteritems():
if not isinstance(v, self.values_instance_type):
raise ValueError("%s: failed dict type check for %s - "
raise ParserError("%s: failed dict type check for %s - "
"expected value type %s for key %s, got %s" %
(identifier, key,
nice_type_name(self.values_instance_type),
@ -130,12 +132,12 @@ def type_check_event_fields(identifier, name, definition):
# Check that all the required fields are available.
missing_fields = [f for f in REQUIRED_FIELDS.keys() if f not in definition]
if len(missing_fields) > 0:
raise KeyError(identifier + ' - missing required fields: ' + ', '.join(missing_fields))
raise ParserError(identifier + ' - missing required fields: ' + ', '.join(missing_fields))
# Is there any unknown field?
unknown_fields = [f for f in definition.keys() if f not in ALL_FIELDS]
if len(unknown_fields) > 0:
raise KeyError(identifier + ' - unknown fields: ' + ', '.join(unknown_fields))
raise ParserError(identifier + ' - unknown fields: ' + ', '.join(unknown_fields))
# Type-check fields.
for k, v in definition.iteritems():
@ -145,14 +147,14 @@ def type_check_event_fields(identifier, name, definition):
def string_check(identifier, field, value, min_length=1, max_length=None, regex=None):
# Length check.
if len(value) < min_length:
raise ValueError("%s: value '%s' for field %s is less than minimum length of %d" %
raise ParserError("%s: value '%s' for field %s is less than minimum length of %d" %
(identifier, value, field, min_length))
if max_length and len(value) > max_length:
raise ValueError("%s: value '%s' for field %s is greater than maximum length of %d" %
raise ParserError("%s: value '%s' for field %s is greater than maximum length of %d" %
(identifier, value, field, max_length))
# Regex check.
if regex and not re.match(regex, value):
raise ValueError('%s: string value "%s" for %s is not matching pattern "%s"' %
raise ParserError('%s: string value "%s" for %s is not matching pattern "%s"' %
(identifier, value, field, regex))
@ -181,19 +183,19 @@ class EventData:
rcc = definition.get(rcc_key, 'opt-in')
allowed_rcc = ["opt-in", "opt-out"]
if rcc not in allowed_rcc:
raise ValueError("%s: value for %s should be one of: %s" %
raise ParserError("%s: value for %s should be one of: %s" %
(self.identifier, rcc_key, ", ".join(allowed_rcc)))
# Check record_in_processes.
record_in_processes = definition.get('record_in_processes')
for proc in record_in_processes:
if not utils.is_valid_process_name(proc):
raise ValueError(self.identifier + ': unknown value in record_in_processes: ' + proc)
raise ParserError(self.identifier + ': unknown value in record_in_processes: ' + proc)
# Check extra_keys.
extra_keys = definition.get('extra_keys', {})
if len(extra_keys.keys()) > MAX_EXTRA_KEYS_COUNT:
raise ValueError("%s: number of extra_keys exceeds limit %d" %
raise ParserError("%s: number of extra_keys exceeds limit %d" %
(self.identifier, MAX_EXTRA_KEYS_COUNT))
for key in extra_keys.iterkeys():
string_check(self.identifier, field='extra_keys', value=key,
@ -202,12 +204,12 @@ class EventData:
# Check expiry.
if 'expiry_version' not in definition and 'expiry_date' not in definition:
raise KeyError("%s: event is missing an expiration - either expiry_version or expiry_date is required" %
raise ParserError("%s: event is missing an expiration - either expiry_version or expiry_date is required" %
(self.identifier))
expiry_date = definition.get('expiry_date')
if expiry_date and isinstance(expiry_date, basestring) and expiry_date != 'never':
if not re.match(DATE_PATTERN, expiry_date):
raise ValueError("%s: event has invalid expiry_date, it should be either 'never' or match this format: %s" %
raise ParserError("%s: event has invalid expiry_date, it should be either 'never' or match this format: %s" %
(self.identifier, DATE_PATTERN))
# Parse into date.
definition['expiry_date'] = datetime.datetime.strptime(expiry_date, '%Y-%m-%d')
@ -298,7 +300,7 @@ def load_events(filename):
"""Parses a YAML file containing the event definitions.
:param filename: the YAML file containing the event definitions.
:raises Exception: if the event file cannot be opened or parsed.
:raises ParserError: if the event file cannot be opened or parsed.
"""
# Parse the event definitions from the YAML file.
@ -307,9 +309,9 @@ def load_events(filename):
with open(filename, 'r') as f:
events = yaml.safe_load(f)
except IOError, e:
raise Exception('Error opening ' + filename + ': ' + e.message)
except ValueError, e:
raise Exception('Error parsing events in ' + filename + ': ' + e.message)
raise ParserError('Error opening ' + filename + ': ' + e.message)
except ParserError, e:
raise ParserError('Error parsing events in ' + filename + ': ' + e.message)
event_list = []
@ -328,7 +330,7 @@ def load_events(filename):
# Make sure that the category has at least one entry in it.
if not category or len(category) == 0:
raise ValueError(category_name + ' must contain at least one entry')
raise ParserError(category_name + ' must contain at least one entry')
for name, entry in category.iteritems():
string_check(category_name, field='event name', value=name,

View File

@ -6,6 +6,8 @@ import re
import yaml
import shared_telemetry_utils as utils
from shared_telemetry_utils import ParserError
# The map of containing the allowed scalar types and their mapping to
# nsITelemetry::SCALAR_* type constants.
@ -44,7 +46,7 @@ class ScalarType:
:param group_name: the name of the group the probe is in.
:param probe_name: the name of the scalar probe.
:raises ValueError: if the length of the names exceeds the limit or they don't
:raises ParserError: if the length of the names exceeds the limit or they don't
conform our name specification.
"""
@ -52,7 +54,7 @@ class ScalarType:
MAX_NAME_LENGTH = 40
for n in [group_name, probe_name]:
if len(n) > MAX_NAME_LENGTH:
raise ValueError(("Name '{}' exceeds maximum name length of {} characters."
raise ParserError(("Name '{}' exceeds maximum name length of {} characters."
" See: {}#the-yaml-definition-file")
.format(n, MAX_NAME_LENGTH, BASE_DOC_URL))
@ -60,12 +62,12 @@ class ScalarType:
# Check if we only have the allowed characters.
chars_regxp = r'^[a-zA-Z0-9' + allowed_char_regexp + r']+$'
if not re.search(chars_regxp, name):
raise ValueError((error_msg_prefix + " name must be alpha-numeric. Got: '{}'. "
raise ParserError((error_msg_prefix + " name must be alpha-numeric. Got: '{}'. "
"See: {}#the-yaml-definition-file").format(name, BASE_DOC_URL))
# Don't allow leading/trailing digits, '.' or '_'.
if re.search(r'(^[\d\._])|([\d\._])$', name):
raise ValueError((error_msg_prefix + " name must not have a leading/trailing "
raise ParserError((error_msg_prefix + " name must not have a leading/trailing "
"digit, a dot or underscore. Got: '{}'."
" See: {}#the-yaml-definition-file").format(name, BASE_DOC_URL))
@ -78,8 +80,8 @@ class ScalarType:
- Checks that all the fields have the expected types.
:param definition: the dictionary containing the scalar properties.
:raises TypeError: if a scalar definition field is of the wrong type.
:raise KeyError: if a required field is missing or unknown fields are present.
:raises ParserError: if a scalar definition field is of the wrong type.
:raises ParserError: if a required field is missing or unknown fields are present.
"""
# The required and optional fields in a scalar type definition.
@ -112,20 +114,20 @@ class ScalarType:
# Checks that all the required fields are available.
missing_fields = [f for f in REQUIRED_FIELDS.keys() if f not in definition]
if len(missing_fields) > 0:
raise KeyError(self._name + ' - missing required fields: ' + ', '.join(missing_fields) +
raise ParserError(self._name + ' - missing required fields: ' + ', '.join(missing_fields) +
'. See: {}#required-fields'.format(BASE_DOC_URL))
# Do we have any unknown field?
unknown_fields = [f for f in definition.keys() if f not in ALL_FIELDS]
if len(unknown_fields) > 0:
raise KeyError(self._name + ' - unknown fields: ' + ', '.join(unknown_fields) +
raise ParserError(self._name + ' - unknown fields: ' + ', '.join(unknown_fields) +
'. See: {}#required-fields'.format(BASE_DOC_URL))
# Checks the type for all the fields.
wrong_type_names = ['{} must be {}'.format(f, ALL_FIELDS[f].__name__)
for f in definition.keys() if not isinstance(definition[f], ALL_FIELDS[f])]
if len(wrong_type_names) > 0:
raise TypeError(self._name + ' - ' + ', '.join(wrong_type_names) +
raise ParserError(self._name + ' - ' + ', '.join(wrong_type_names) +
'. See: {}#required-fields'.format(BASE_DOC_URL))
# Check that the lists are not empty and that data in the lists
@ -134,14 +136,14 @@ class ScalarType:
for field in list_fields:
# Check for empty lists.
if len(definition[field]) == 0:
raise TypeError(("Field '{}' for probe '{}' must not be empty" +
raise ParserError(("Field '{}' for probe '{}' must not be empty" +
". See: {}#required-fields)")
.format(field, self._name, BASE_DOC_URL))
# Check the type of the list content.
broken_types =\
[not isinstance(v, LIST_FIELDS_CONTENT[field]) for v in definition[field]]
if any(broken_types):
raise TypeError(("Field '{}' for probe '{}' must only contain values of type {}"
raise ParserError(("Field '{}' for probe '{}' must only contain values of type {}"
". See: {}#the-yaml-definition-file)")
.format(field, self._name, LIST_FIELDS_CONTENT[field].__name__,
BASE_DOC_URL))
@ -150,32 +152,32 @@ class ScalarType:
"""This function checks that the fields have the correct values.
:param definition: the dictionary containing the scalar properties.
:raises ValueError: if a scalar definition field contains an unexpected value.
:raises ParserError: if a scalar definition field contains an unexpected value.
"""
# Validate the scalar kind.
scalar_kind = definition.get('kind')
if scalar_kind not in SCALAR_TYPES_MAP.keys():
raise ValueError(self._name + ' - unknown scalar kind: ' + scalar_kind +
raise ParserError(self._name + ' - unknown scalar kind: ' + scalar_kind +
'. See: {}'.format(BASE_DOC_URL))
# Validate the collection policy.
collection_policy = definition.get('release_channel_collection', None)
if collection_policy and collection_policy not in ['opt-in', 'opt-out']:
raise ValueError(self._name + ' - unknown collection policy: ' + collection_policy +
raise ParserError(self._name + ' - unknown collection policy: ' + collection_policy +
'. See: {}#optional-fields'.format(BASE_DOC_URL))
# Validate the cpp_guard.
cpp_guard = definition.get('cpp_guard')
if cpp_guard and re.match(r'\W', cpp_guard):
raise ValueError(self._name + ' - invalid cpp_guard: ' + cpp_guard +
raise ParserError(self._name + ' - invalid cpp_guard: ' + cpp_guard +
'. See: {}#optional-fields'.format(BASE_DOC_URL))
# Validate record_in_processes.
record_in_processes = definition.get('record_in_processes', [])
for proc in record_in_processes:
if not utils.is_valid_process_name(proc):
raise ValueError(self._name + ' - unknown value in record_in_processes: ' + proc +
raise ParserError(self._name + ' - unknown value in record_in_processes: ' + proc +
'. See: {}'.format(BASE_DOC_URL))
@property
@ -265,7 +267,7 @@ def load_scalars(filename):
"""Parses a YAML file containing the scalar definition.
:param filename: the YAML file containing the scalars definition.
:raises Exception: if the scalar file cannot be opened or parsed.
:raises ParserError: if the scalar file cannot be opened or parsed.
"""
# Parse the scalar definitions from the YAML file.
@ -274,9 +276,9 @@ def load_scalars(filename):
with open(filename, 'r') as f:
scalars = yaml.safe_load(f)
except IOError, e:
raise Exception('Error opening ' + filename + ': ' + e.message)
except ValueError, e:
raise Exception('Error parsing scalars in ' + filename + ': ' + e.message +
raise ParserError('Error opening ' + filename + ': ' + e.message)
except ParserError, e:
raise ParserError('Error parsing scalars in ' + filename + ': ' + e.message +
'. See: {}'.format(BASE_DOC_URL))
scalar_list = []
@ -289,7 +291,7 @@ def load_scalars(filename):
# Make sure that the group has at least one probe in it.
if not group or len(group) == 0:
raise ValueError(group_name + ' must have at least a probe in it' +
raise ParserError(group_name + ' must have at least a probe in it' +
'. See: {}'.format(BASE_DOC_URL))
for probe_name in group:

View File

@ -22,6 +22,11 @@ KNOWN_PROCESS_FLAGS = {
PROCESS_ENUM_PREFIX = "mozilla::Telemetry::Common::RecordedProcessType::"
# This is thrown by the different probe parsers.
class ParserError(Exception):
pass
def is_valid_process_name(name):
return (name in KNOWN_PROCESS_FLAGS)