Bug 1472800 - [raptor] Add the unity-webgl benchmark r=rwood

This will run the benchmark in taskcluster as well as allow us to run it locally with:
./mach raptor-test -t raptor-unity-webgl

Depends on D2306.

Differential Revision: https://phabricator.services.mozilla.com/D2307

--HG--
extra : moz-landing-system : lando
This commit is contained in:
Andrew Halberstadt 2018-08-07 18:22:02 +00:00
parent a357cacbdb
commit 5df71be96e
4 changed files with 73 additions and 1 deletions

View File

@ -95,6 +95,8 @@ class Output(object):
subtests, vals = self.parseSunspiderOutput(test)
elif 'webaudio' in test.measurements:
subtests, vals = self.parseWebaudioOutput(test)
elif 'unity-webgl' in test.measurements:
subtests, vals = self.parseUnityWebGLOutput(test)
suite['subtests'] = subtests
else:
@ -296,6 +298,50 @@ class Output(object):
return subtests, vals
def parseUnityWebGLOutput(self, test):
"""
Example output (this is one page cycle):
{'name': 'raptor-unity-webgl-firefox',
'type': 'benchmark',
'measurements': {
'unity-webgl': [
[
'[{"benchmark":"Mandelbrot GPU","result":1035361},...}]'
]
]
},
'lower_is_better': False,
'unit': 'score'
}
"""
_subtests = {}
data = test.measurements['unity-webgl']
for page_cycle in data:
data = json.loads(page_cycle[0])
for item in data:
# for each pagecycle, build a list of subtests and append all related replicates
sub = item['benchmark']
if sub not in _subtests.keys():
# subtest not added yet, first pagecycle, so add new one
_subtests[sub] = {'unit': test.unit,
'alertThreshold': float(test.alert_threshold),
'lowerIsBetter': test.lower_is_better,
'name': sub,
'replicates': []}
_subtests[sub]['replicates'].append(item['result'])
vals = []
subtests = []
names = _subtests.keys()
names.sort(reverse=True)
for name in names:
_subtests[name]['value'] = filter.median(_subtests[name]['replicates'])
subtests.append(_subtests[name])
vals.append([_subtests[name]['value'], name])
return subtests, vals
def output(self):
"""output to file and perfherder data json """
if self.summarized_results == {}:
@ -371,6 +417,14 @@ class Output(object):
results = [i for i, j in val_list if j == 'Geometric Mean']
return filter.mean(results)
@classmethod
def unity_webgl_score(cls, val_list):
"""
unity_webgl_score: self reported as 'Geometric Mean'
"""
results = [i for i, j in val_list if j == 'Geometric Mean']
return filter.mean(results)
@classmethod
def stylebench_score(cls, val_list):
"""
@ -436,6 +490,8 @@ class Output(object):
return self.stylebench_score(vals)
elif testname.startswith('raptor-sunspider'):
return self.sunspider_score(vals)
elif testname.startswith('raptor-unity-webgl'):
return self.unity_webgl_score(vals)
elif testname.startswith('raptor-webaudio'):
return self.webaudio_score(vals)
elif len(vals) > 1:

View File

@ -5,5 +5,6 @@
[include:tests/raptor-sunspider.ini]
[include:tests/raptor-motionmark-htmlsuite.ini]
[include:tests/raptor-motionmark-animometer.ini]
[include:tests/raptor-unity-webgl.ini]
[include:tests/raptor-webaudio.ini]
[include:tests/raptor-gdocs.ini]

View File

@ -0,0 +1,14 @@
[DEFAULT]
type = benchmark
test_url = http://localhost:<port>/unity-webgl/index.html?raptor
page_cycles = 5
page_timeout = 900000
unit = score
lower_is_better = false
alert_threshold = 2.0
[raptor-unity-webgl-firefox]
apps = firefox
[raptor-unity-webgl-chrome]
apps = chrome

View File

@ -24,7 +24,8 @@
"*://*/StyleBench/*",
"*://*/MotionMark/*",
"*://*/SunSpider/*",
"*://*/webaudio/*"],
"*://*/webaudio/*",
"*://*/unity-webgl/index.html*"],
"js": ["benchmark-relay.js"]
}
],