Bug 1548845 - [raptor] Fix local import of filter module. r=perftest-reviewers,rwood

To not collide with the built-in "filter" method, the local
filter module should be named as filters.

Differential Revision: https://phabricator.services.mozilla.com/D30532

--HG--
rename : testing/raptor/raptor/filter.py => testing/raptor/raptor/filters.py
extra : moz-landing-system : lando
This commit is contained in:
Henrik Skupin 2019-05-17 11:12:40 +00:00
parent 11d84bf86a
commit 992c3cdb7d
2 changed files with 27 additions and 27 deletions

View File

@ -16,10 +16,10 @@ Each filter is a simple function, but it also have attached a special
`prepare` method that create a tuple with one instance of a
:class:`Filter`; this allow to write stuff like::
from raptor import filter
filters = filter.ignore_first.prepare(1) + filter.median.prepare()
from raptor import filters
filter_list = filters.ignore_first.prepare(1) + filters.median.prepare()
for filter in filters:
for filter in filter_list:
data = filter(data)
# data is filtered
"""

View File

@ -8,7 +8,7 @@
"""output raptor test results"""
from __future__ import absolute_import
import filter
import filters
import json
import os
@ -101,7 +101,7 @@ class Output(object):
# for warm page-load, ignore first value due to 1st pageload noise
LOG.info("ignoring the first %s value due to initial pageload noise"
% measurement_name)
filtered_values = filter.ignore_first(new_subtest['replicates'], 1)
filtered_values = filters.ignore_first(new_subtest['replicates'], 1)
else:
# for cold-load we want all the values
filtered_values = new_subtest['replicates']
@ -111,7 +111,7 @@ class Output(object):
# cases where TTFI is not available, which is acceptable; however we don't want
# to include those '-1' TTFI values in our final results calculations
if measurement_name == "ttfi":
filtered_values = filter.ignore_negative(filtered_values)
filtered_values = filters.ignore_negative(filtered_values)
# we've already removed the first pageload value; if there aren't any more
# valid TTFI values available for this pageload just remove it from results
if len(filtered_values) < 1:
@ -125,7 +125,7 @@ class Output(object):
% measurement_name)
new_subtest['shouldAlert'] = True
new_subtest['value'] = filter.median(filtered_values)
new_subtest['value'] = filters.median(filtered_values)
vals.append([new_subtest['value'], new_subtest['name']])
subtests.append(new_subtest)
@ -272,7 +272,7 @@ class Output(object):
vals = []
for next_sub in combined_suites[name]['subtests']:
# calculate sub-test results (i.e. each measurement type)
next_sub['value'] = filter.median(next_sub['replicates'])
next_sub['value'] = filters.median(next_sub['replicates'])
# add to vals; vals is used to calculate overall suite result i.e. the
# geomean of all of the subtests / measurement types
vals.append([next_sub['value'], next_sub['name']])
@ -404,7 +404,7 @@ class Output(object):
names = _subtests.keys()
names.sort(reverse=True)
for name in names:
_subtests[name]['value'] = filter.median(_subtests[name]['replicates'])
_subtests[name]['value'] = filters.median(_subtests[name]['replicates'])
subtests.append(_subtests[name])
vals.append([_subtests[name]['value'], name])
@ -441,7 +441,7 @@ class Output(object):
names = _subtests.keys()
names.sort(reverse=True)
for name in names:
_subtests[name]['value'] = filter.median(_subtests[name]['replicates'])
_subtests[name]['value'] = filters.median(_subtests[name]['replicates'])
subtests.append(_subtests[name])
vals.append([_subtests[name]['value'], name])
@ -480,7 +480,7 @@ class Output(object):
names = _subtests.keys()
names.sort(reverse=True)
for name in names:
_subtests[name]['value'] = filter.median(_subtests[name]['replicates'])
_subtests[name]['value'] = filters.median(_subtests[name]['replicates'])
subtests.append(_subtests[name])
vals.append([_subtests[name]['value'], name])
@ -527,7 +527,7 @@ class Output(object):
names = _subtests.keys()
names.sort(reverse=True)
for name in names:
_subtests[name]['value'] = filter.median(_subtests[name]['replicates'])
_subtests[name]['value'] = filters.median(_subtests[name]['replicates'])
subtests.append(_subtests[name])
vals.append([_subtests[name]['value'], name])
@ -582,7 +582,7 @@ class Output(object):
names = _subtests.keys()
names.sort(reverse=True)
for name in names:
_subtests[name]['value'] = filter.median(_subtests[name]['replicates'])
_subtests[name]['value'] = filters.median(_subtests[name]['replicates'])
subtests.append(_subtests[name])
vals.append([_subtests[name]['value'], name])
@ -609,7 +609,7 @@ class Output(object):
names = _subtests.keys()
names.sort(reverse=True)
for name in names:
_subtests[name]['value'] = filter.mean(_subtests[name]['replicates'])
_subtests[name]['value'] = filters.mean(_subtests[name]['replicates'])
subtests.append(_subtests[name])
vals.append([_subtests[name]['value'], name])
@ -654,7 +654,7 @@ class Output(object):
names = _subtests.keys()
names.sort(reverse=True)
for name in names:
_subtests[name]['value'] = filter.median(_subtests[name]['replicates'])
_subtests[name]['value'] = filters.median(_subtests[name]['replicates'])
subtests.append(_subtests[name])
vals.append([_subtests[name]['value'], name])
@ -693,7 +693,7 @@ class Output(object):
names = _subtests.keys()
names.sort(reverse=True)
for name in names:
_subtests[name]['value'] = round(filter.median(_subtests[name]['replicates']), 2)
_subtests[name]['value'] = round(filters.median(_subtests[name]['replicates']), 2)
subtests.append(_subtests[name])
# only use the 'total's to compute the overall result
if name == 'total':
@ -830,7 +830,7 @@ class Output(object):
@classmethod
def v8_Metric(cls, val_list):
results = [i for i, j in val_list]
score = 100 * filter.geometric_mean(results)
score = 100 * filters.geometric_mean(results)
return score
@classmethod
@ -853,7 +853,7 @@ class Output(object):
raise Exception("Speedometer has 160 subtests, found: %s instead" % len(results))
results = results[9::10]
score = 60 * 1000 / filter.geometric_mean(results) / correctionFactor
score = 60 * 1000 / filters.geometric_mean(results) / correctionFactor
return score
@classmethod
@ -862,7 +862,7 @@ class Output(object):
benchmark_score: ares6/jetstream self reported as 'geomean'
"""
results = [i for i, j in val_list if j == 'geomean']
return filter.mean(results)
return filters.mean(results)
@classmethod
def webaudio_score(cls, val_list):
@ -870,7 +870,7 @@ class Output(object):
webaudio_score: self reported as 'Geometric Mean'
"""
results = [i for i, j in val_list if j == 'Geometric Mean']
return filter.mean(results)
return filters.mean(results)
@classmethod
def unity_webgl_score(cls, val_list):
@ -878,7 +878,7 @@ class Output(object):
unity_webgl_score: self reported as 'Geometric Mean'
"""
results = [i for i, j in val_list if j == 'Geometric Mean']
return filter.mean(results)
return filters.mean(results)
@classmethod
def wasm_misc_score(cls, val_list):
@ -886,7 +886,7 @@ class Output(object):
wasm_misc_score: self reported as '__total__'
"""
results = [i for i, j in val_list if j == '__total__']
return filter.mean(results)
return filters.mean(results)
@classmethod
def wasm_godot_score(cls, val_list):
@ -894,7 +894,7 @@ class Output(object):
wasm_godot_score: first-interactive mean
"""
results = [i for i, j in val_list if j == 'first-interactive']
return filter.mean(results)
return filters.mean(results)
@classmethod
def stylebench_score(cls, val_list):
@ -940,7 +940,7 @@ class Output(object):
raise Exception("StyleBench has 380 entries, found: %s instead" % len(results))
results = results[75::76]
score = 60 * 1000 / filter.geometric_mean(results) / correctionFactor
score = 60 * 1000 / filters.geometric_mean(results) / correctionFactor
return score
@classmethod
@ -951,7 +951,7 @@ class Output(object):
@classmethod
def assorted_dom_score(cls, val_list):
results = [i for i, j in val_list]
return round(filter.geometric_mean(results), 2)
return round(filters.geometric_mean(results), 2)
@classmethod
def supporting_data_total(cls, val_list):
@ -984,6 +984,6 @@ class Output(object):
elif testname.startswith('supporting_data'):
return self.supporting_data_total(vals)
elif len(vals) > 1:
return round(filter.geometric_mean([i for i, j in vals]), 2)
return round(filters.geometric_mean([i for i, j in vals]), 2)
else:
return round(filter.mean([i for i, j in vals]), 2)
return round(filters.mean([i for i, j in vals]), 2)