Bug 1533423 - Raptor cold page-load support for Firefox android geckoview; r=jmaher

Differential Revision: https://phabricator.services.mozilla.com/D24464

--HG--
extra : moz-landing-system : lando
This commit is contained in:
Rob Wood 2019-03-22 15:27:27 +00:00
parent 0274f5667c
commit 17dec601e9
12 changed files with 339 additions and 11 deletions

View File

@ -322,6 +322,20 @@ raptor-tp6m-1-geckoview:
- --binary=org.mozilla.geckoview_example
- --activity=GeckoViewActivity
raptor-tp6m-1-geckoview-cold:
description: "Raptor tp6m-1 cold page-load on Geckoview"
try-name: raptor-tp6m-1-geckoview-cold
treeherder-symbol: Rap(tp6m-c-1)
run-on-projects: ['try', 'mozilla-central']
target: geckoview_example.apk
tier: 3
mozharness:
extra-options:
- --test=raptor-tp6m-cold-1
- --app=geckoview
- --binary=org.mozilla.geckoview_example
- --activity=GeckoViewActivity
raptor-tp6m-2-geckoview:
description: "Raptor tp6m-2 on Geckoview"
try-name: raptor-tp6m-2-geckoview

View File

@ -435,6 +435,7 @@ android-hw-aarch64-raptor:
- raptor-tp6m-7-geckoview
- raptor-tp6m-8-geckoview
- raptor-tp6m-9-geckoview
- raptor-tp6m-1-geckoview-cold
android-hw-arm7-raptor-power:
- raptor-speedometer-geckoview-power

View File

@ -126,6 +126,12 @@ class Raptor(TestingMixin, MercurialScript, CodeCoverageMixin, AndroidMixin):
"type": "int",
"help": "How long to wait (ms) for one page_cycle to complete, before timing out"
}],
[["--browser-cycles"], {
"dest": "browser_cycles",
"type": "int",
"help": "The number of times a cold load test is repeated (for cold load tests only, "
"where the browser is shutdown and restarted between test iterations)"
}],
[["--host"], {
"dest": "host",
"help": "Hostname from which to serve urls (default: 127.0.0.1). "

View File

@ -86,6 +86,9 @@ def create_parser(mach_interface=False):
"for benchmark tests this is how many times the benchmark test will be run")
add_arg('--page-timeout', dest="page_timeout", type=int,
help="How long to wait (ms) for one page_cycle to complete, before timing out")
add_arg('--browser-cycles', dest="browser_cycles", type=int,
help="The number of times a cold load test is repeated (for cold load tests only, "
"where the browser is shutdown and restarted between test iterations)")
add_arg('--print-tests', action=_PrintTests,
help="Print all available Raptor tests")
add_arg('--debug-mode', dest="debug_mode", action="store_true",

View File

@ -14,7 +14,8 @@ LOG = get_proxy_logger(component="raptor-gen-test-config")
def gen_test_config(browser, test, cs_port, post_startup_delay,
host='127.0.0.1', b_port=0, debug_mode=0):
host='127.0.0.1', b_port=0, debug_mode=0,
browser_cycle=1):
LOG.info("writing test settings into background js, so webext can get it")
data = """// this file is auto-generated by raptor, do not edit directly
@ -26,10 +27,12 @@ function getTestConfig() {
"post_startup_delay": "%s",
"benchmark_port": "%d",
"host": "%s",
"debug_mode": "%d"};
"debug_mode": "%d",
"browser_cycle": "%d"};
}
""" % (browser, cs_port, test, host, cs_port, test, post_startup_delay, b_port, host, debug_mode)
""" % (browser, cs_port, test, host, cs_port, test, post_startup_delay, b_port, host, debug_mode,
browser_cycle)
webext_background_script = (os.path.join(webext_dir, "auto_gen_test_config.js"))

View File

@ -63,6 +63,9 @@ def validate_test_ini(test_details):
if setting == 'measure' and test_details['type'] == 'benchmark':
continue
if setting not in test_details:
# if page-cycles is not specified, it's ok as long as browser-cycles is there
if setting == "page-cycles" and test_details.get('browser_cycles') is not None:
continue
valid_settings = False
LOG.error("ERROR: setting '%s' is required but not found in %s"
% (setting, test_details['manifest']))
@ -103,7 +106,9 @@ def write_test_settings_json(args, test_details, oskey):
test_settings = {
"raptor-options": {
"type": test_details['type'],
"cold": test_details['cold'],
"test_url": test_url,
"expected_browser_cycles": test_details['expected_browser_cycles'],
"page_cycles": int(test_details['page_cycles']),
"host": args.host,
}
@ -223,7 +228,7 @@ def get_raptor_test_list(args, oskey):
# gecko-profiling enabled, or when --page-cycles cmd line arg was used (that overrides all)
for next_test in tests_to_run:
LOG.info("configuring settings for test %s" % next_test['name'])
max_page_cycles = next_test['page_cycles']
max_page_cycles = next_test.get('page_cycles', 1)
if args.gecko_profile is True:
next_test['gecko_profile'] = True
LOG.info("gecko-profiling enabled")
@ -236,7 +241,7 @@ def get_raptor_test_list(args, oskey):
next_test['page_cycles'] = args.page_cycles
LOG.info("set page-cycles to %d as specified on cmd line" % args.page_cycles)
else:
if int(next_test['page_cycles']) > max_page_cycles:
if int(next_test.get('page_cycles', 1)) > max_page_cycles:
next_test['page_cycles'] = max_page_cycles
LOG.info("page-cycles set to %d" % next_test['page_cycles'])
# if --page-timeout was provided on the command line, use that instead of INI
@ -244,6 +249,24 @@ def get_raptor_test_list(args, oskey):
LOG.info("setting page-timeout to %d as specified on cmd line" % args.page_timeout)
next_test['page_timeout'] = args.page_timeout
if next_test.get("cold", "false") == "true":
# when running in cold mode, set browser-cycles to the page-cycles value; as we want
# the browser to restart between page-cycles; and set page-cycles to 1 as we only
# want 1 single page-load for every browser-cycle
next_test['cold'] = True
next_test['expected_browser_cycles'] = int(next_test['browser_cycles'])
next_test['page_cycles'] = 1
# also ensure '-cold' is in test name so perfherder results indicate warm cold-load
if "-cold" not in next_test['name']:
next_test['name'] += "-cold"
else:
# when running in warm mode, just set test-cycles to 1 and leave page-cycles as/is
next_test['cold'] = False
next_test['expected_browser_cycles'] = 1
# either warm or cold-mode, initialize the starting current 'browser-cycle'
next_test['browser_cycle'] = 1
if next_test.get('use_live_sites', "false") == "true":
# when using live sites we want to turn off playback
LOG.info("using live sites so turning playback off!")

View File

@ -60,6 +60,13 @@ class Output(object):
'alertThreshold': float(test.alert_threshold)
}
# if cold load add that info to the suite result dict; this will be used later
# when combining the results from multiple browser cycles into one overall result
if test.cold is True:
suite['cold'] = True
suite['browser_cycle'] = int(test.browser_cycle)
suite['expected_browser_cycles'] = int(test.expected_browser_cycles)
suites.append(suite)
# process results for pageloader type of tests
@ -86,10 +93,14 @@ class Output(object):
new_subtest['value'] = 0
new_subtest['unit'] = test.subtest_unit
# ignore first value due to 1st pageload noise
LOG.info("ignoring the first %s value due to initial pageload noise"
% measurement_name)
filtered_values = filter.ignore_first(new_subtest['replicates'], 1)
if test.cold is False:
# for warm page-load, ignore first value due to 1st pageload noise
LOG.info("ignoring the first %s value due to initial pageload noise"
% measurement_name)
filtered_values = filter.ignore_first(new_subtest['replicates'], 1)
else:
# for cold-load we want all the values
filtered_values = new_subtest['replicates']
# for pageload tests that measure TTFI: TTFI is not guaranteed to be available
# everytime; the raptor measure.js webext will substitute a '-1' value in the
@ -154,6 +165,119 @@ class Output(object):
self.summarized_results = test_results
def combine_browser_cycles(self):
'''
At this point the results have been summarized; however there may have been multiple
browser cycles (i.e. cold load). In which case the results have one entry for each
test for each browser cycle. For each test we need to combine the results for all
browser cycles into one results entry.
For example, this is what the summarized results suites list looks like from a test that
was run with multiple (two) browser cycles:
[{'expected_browser_cycles': 2, 'extraOptions': [],
'name': u'raptor-tp6m-amazon-geckoview-cold', 'lowerIsBetter': True,
'alertThreshold': 2.0, 'value': 1776.94, 'browser_cycle': 1,
'subtests': [{'name': u'dcf', 'lowerIsBetter': True, 'alertThreshold': 2.0,
'value': 818, 'replicates': [818], 'unit': u'ms'}, {'name': u'fcp',
'lowerIsBetter': True, 'alertThreshold': 2.0, 'value': 1131, 'shouldAlert': True,
'replicates': [1131], 'unit': u'ms'}, {'name': u'fnbpaint', 'lowerIsBetter': True,
'alertThreshold': 2.0, 'value': 1056, 'replicates': [1056], 'unit': u'ms'},
{'name': u'ttfi', 'lowerIsBetter': True, 'alertThreshold': 2.0, 'value': 18074,
'replicates': [18074], 'unit': u'ms'}, {'name': u'loadtime', 'lowerIsBetter': True,
'alertThreshold': 2.0, 'value': 1002, 'shouldAlert': True, 'replicates': [1002],
'unit': u'ms'}],
'cold': True, 'type': u'pageload', 'unit': u'ms'},
{'expected_browser_cycles': 2, 'extraOptions': [],
'name': u'raptor-tp6m-amazon-geckoview-cold', 'lowerIsBetter': True,
'alertThreshold': 2.0, 'value': 840.25, 'browser_cycle': 2,
'subtests': [{'name': u'dcf', 'lowerIsBetter': True, 'alertThreshold': 2.0,
'value': 462, 'replicates': [462], 'unit': u'ms'}, {'name': u'fcp',
'lowerIsBetter': True, 'alertThreshold': 2.0, 'value': 718, 'shouldAlert': True,
'replicates': [718], 'unit': u'ms'}, {'name': u'fnbpaint', 'lowerIsBetter': True,
'alertThreshold': 2.0, 'value': 676, 'replicates': [676], 'unit': u'ms'},
{'name': u'ttfi', 'lowerIsBetter': True, 'alertThreshold': 2.0, 'value': 3084,
'replicates': [3084], 'unit': u'ms'}, {'name': u'loadtime', 'lowerIsBetter': True,
'alertThreshold': 2.0, 'value': 605, 'shouldAlert': True, 'replicates': [605],
'unit': u'ms'}],
'cold': True, 'type': u'pageload', 'unit': u'ms'}]
Need to combine those into a single entry.
'''
# first build a list of entries that need to be combined; and as we do that, mark the
# original suite entry as up for deletion, so once combined we know which ones to del
# note that summarized results are for all tests that were ran in the session, which
# could include cold and / or warm page-load and / or benchnarks combined
suites_to_be_combined = []
combined_suites = []
for _index, suite in enumerate(self.summarized_results['suites']):
if suite.get('cold') is None:
continue
if suite['expected_browser_cycles'] > 1:
_name = suite['name']
_details = suite.copy()
suites_to_be_combined.append({'name': _name, 'details': _details})
suite['to_be_deleted'] = True
# now create a new suite entry that will have all the results from
# all of the browser cycles, but in one result entry for each test
combined_suites = {}
for next_suite in suites_to_be_combined:
suite_name = next_suite['details']['name']
browser_cycle = next_suite['details']['browser_cycle']
LOG.info("combining results from browser cycle %d" % browser_cycle)
if browser_cycle == 1:
# first browser cycle so just take entire entry to start with
combined_suites[suite_name] = next_suite['details']
LOG.info("created new combined result with intial cycle replicates")
# remove the 'cold', 'browser_cycle', and 'expected_browser_cycles' info
# as we don't want that showing up in perfherder data output
del(combined_suites[suite_name]['cold'])
del(combined_suites[suite_name]['browser_cycle'])
del(combined_suites[suite_name]['expected_browser_cycles'])
else:
# subsequent browser cycles, already have an entry; just add subtest replicates
for next_subtest in next_suite['details']['subtests']:
# find the existing entry for that subtest in our new combined test entry
found_subtest = False
for combined_subtest in combined_suites[suite_name]['subtests']:
if combined_subtest['name'] == next_subtest['name']:
# add subtest (measurement type) replicates to the combined entry
LOG.info("adding replicates for %s" % next_subtest['name'])
combined_subtest['replicates'].extend(next_subtest['replicates'])
found_subtest = True
# the subtest / measurement type wasn't found in our existing combined
# result entry; if it is for the same suite name add it - this could happen
# as ttfi may not be available in every browser cycle
if not found_subtest:
LOG.info("adding replicates for %s" % next_subtest['name'])
combined_suites[next_suite['details']['name']]['subtests'] \
.append(next_subtest)
# now we have a single entry for each test; with all replicates from all browser cycles
for i, name in enumerate(combined_suites):
vals = []
for next_sub in combined_suites[name]['subtests']:
# calculate sub-test results (i.e. each measurement type)
next_sub['value'] = filter.median(next_sub['replicates'])
# add to vals; vals is used to calculate overall suite result i.e. the
# geomean of all of the subtests / measurement types
vals.append([next_sub['value'], next_sub['name']])
# calculate overall suite result ('value') which is geomean of all measures
if len(combined_suites[name]['subtests']) > 1:
combined_suites[name]['value'] = self.construct_summary(vals, testname=name)
# now add the combined suite entry to our overall summarized results!
self.summarized_results['suites'].append(combined_suites[name])
# now it is safe to delete the original entries that were made by each cycle
self.summarized_results['suites'] = [item for item in self.summarized_results['suites']
if item.get('to_be_deleted') is not True]
def summarize_supporting_data(self):
'''
Supporting data was gathered outside of the main raptor test; it will be kept

View File

@ -13,7 +13,7 @@
# raptor pageload binast tests desktop
[include:tests/raptor-tp6-binast-1.ini]
# raptor pageload tests mobile
# raptor warm pageload tests mobile
[include:tests/raptor-tp6m-1.ini]
[include:tests/raptor-tp6m-2.ini]
[include:tests/raptor-tp6m-3.ini]
@ -24,6 +24,9 @@
[include:tests/raptor-tp6m-8.ini]
[include:tests/raptor-tp6m-9.ini]
# raptor cold pageload tests mobile
[include:tests/raptor-tp6m-cold-1.ini]
# raptor benchmark tests
[include:tests/raptor-assorted-dom.ini]
[include:tests/raptor-motionmark-animometer.ini]

View File

@ -131,7 +131,8 @@ class Raptor(object):
self.post_startup_delay,
host=self.config['host'],
b_port=self.benchmark_port,
debug_mode=1 if self.debug_mode else 0)
debug_mode=1 if self.debug_mode else 0,
browser_cycle=test['browser_cycle'])
self.install_raptor_webext()
@ -604,7 +605,122 @@ class RaptorAndroid(Raptor):
self.control_server.device = self.device
self.control_server.app_name = self.config['binary']
def copy_cert_db(self, source_dir, target_dir):
# copy browser cert db (that was previously created via certutil) from source to target
cert_db_files = ['pkcs11.txt', 'key4.db', 'cert9.db']
for next_file in cert_db_files:
_source = os.path.join(source_dir, next_file)
_dest = os.path.join(target_dir, next_file)
if os.path.exists(_source):
self.log.info("copying %s to %s" % (_source, _dest))
shutil.copyfile(_source, _dest)
else:
self.log.critical("unable to find ssl cert db file: %s" % _source)
def run_test(self, test, timeout=None):
# tests will be run warm (i.e. NO browser restart between page-cycles)
# unless otheriwse specified in the test INI by using 'cold = true'
if test.get('cold', False) is True:
self.run_test_cold(test, timeout)
else:
self.run_test_warm(test, timeout)
def run_test_cold(self, test, timeout=None):
'''
Run the Raptor test but restart the entire browser app between page-cycles.
Note: For page-load tests, playback will only be started once - at the beginning of all
browser cycles, and then stopped after all cycles are finished. The proxy is set via prefs
in the browser profile so those will need to be set again in each new profile/cycle.
Note that instead of using the certutil tool each time to create a db and import the
mitmproxy SSL cert (it's done in mozbase/mozproxy) we will simply copy the existing
cert db from the first cycle's browser profile into the new clean profile; this way
we don't have to re-create the cert db on each browser cycle.
Since we're running in cold-mode, before this point (in manifest.py) the
'expected-browser-cycles' value was already set to the initial 'page-cycles' value;
and the 'page-cycles' value was set to 1 as we want to perform one page-cycle per
browser restart.
The 'browser-cycle' value is the current overall browser start iteration. The control
server will receive the current 'browser-cycle' and the 'expected-browser-cycles' in
each results set received; and will pass that on as part of the results so that the
results processing will know results for multiple browser cycles are being received.
The default will be to run in warm mode; unless 'cold = true' is set in the test INI.
'''
self.log.info("test %s is running in cold mode; browser WILL be restarted between "
"page cycles" % test['name'])
if self.config['power_test']:
init_geckoview_power_test(self)
for test['browser_cycle'] in range(1, test['expected_browser_cycles'] + 1):
self.log.info("begin browser cycle %d of %d for test %s"
% (test['browser_cycle'], test['expected_browser_cycles'], test['name']))
self.run_test_setup(test)
if test['browser_cycle'] == 1:
self.create_raptor_sdcard_folder()
if test.get('playback', None) is not None:
self.start_playback(test)
# an ssl cert db has now been created in the profile; copy it out so we
# can use the same cert db in future test cycles / browser restarts
local_cert_db_dir = tempfile.mkdtemp()
self.log.info("backing up browser ssl cert db that was created via certutil")
self.copy_cert_db(self.config['local_profile_dir'], local_cert_db_dir)
if self.config['host'] not in ('localhost', '127.0.0.1'):
self.delete_proxy_settings_from_profile()
else:
# double-check to ensure app has been shutdown
self.device.stop_application(self.config['binary'])
# clear the android app data before the next app startup
self.clear_app_data()
# initial browser profile was already created before run_test was called;
# now additional browser cycles we want to create a new one each time
self.create_browser_profile()
# get cert db from previous cycle profile and copy into new clean profile
# this saves us from having to start playback again / recreate cert db etc.
self.log.info("copying existing ssl cert db into new browser profile")
self.copy_cert_db(local_cert_db_dir, self.config['local_profile_dir'])
self.run_test_setup(test)
if test.get('playback', None) is not None:
self.turn_on_android_app_proxy()
self.copy_profile_onto_device()
# now start the browser/app under test
self.launch_firefox_android_app()
# set our control server flag to indicate we are running the browser/app
self.control_server._finished = False
self.wait_for_test_finish(test, timeout)
# in debug mode, and running locally, leave the browser running
if self.debug_mode and self.config['run_local']:
self.log.info("* debug-mode enabled - please shutdown the browser manually...")
self.runner.wait(timeout=None)
if self.config['power_test']:
finish_geckoview_power_test(self)
self.run_test_teardown()
def run_test_warm(self, test, timeout=None):
self.log.info("test %s is running in warm mode; browser will NOT be restarted between "
"page cycles" % test['name'])
if self.config['power_test']:
init_geckoview_power_test(self)

View File

@ -75,6 +75,9 @@ class RaptorResultsHandler():
LOG.info("summarizing raptor test results")
output = Output(self.results, self.supporting_data, test_config['subtest_alert_on'])
output.summarize(test_names)
# that has each browser cycle separate; need to check if there were multiple browser
# cycles, and if so need to combine results from all cycles into one overall result
output.combine_browser_cycles()
output.summarize_screenshots(self.images)
# only dump out supporting data (i.e. power) if actual Raptor test completed
if self.supporting_data is not None and len(self.results) != 0:

View File

@ -0,0 +1,24 @@
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
# raptor tp6m-cold-1
[DEFAULT]
type = pageload
playback = mitmproxy-android
playback_binary_manifest = mitmproxy-rel-bin-{platform}.manifest
browser_cycles = 10
unit = ms
lower_is_better = true
alert_threshold = 2.0
page_timeout = 60000
alert_on = fcp, loadtime
cold = true
[raptor-tp6m-amazon-geckoview-cold]
apps = geckoview
test_url = https://www.amazon.com
playback_pageset_manifest = mitmproxy-recordings-raptor-tp6m-amazon.manifest
playback_recordings = android-amazon.mp
measure = fnbpaint, fcp, dcf, ttfi, loadtime

View File

@ -33,6 +33,7 @@ var csPort = null;
var host = null;
var benchmarkPort = null;
var testType;
var browserCycle = 0;
var pageCycles = 0;
var pageCycle = 0;
var testURL;
@ -63,6 +64,9 @@ var screenCapture = false;
var results = {"name": "",
"page": "",
"type": "",
"browser_cycle": 0,
"expected_browser_cycles": 0,
"cold": false,
"lower_is_better": true,
"alert_threshold": 2.0,
"measurements": {}};
@ -99,6 +103,9 @@ function getTestSettings() {
results.page = testURL;
results.type = testType;
results.name = testName;
results.browser_cycle = browserCycle;
results.expected_browser_cycles = settings.expected_browser_cycles;
results.cold = settings.cold;
results.unit = settings.unit;
results.subtest_unit = settings.subtest_unit;
results.lower_is_better = settings.lower_is_better === true;
@ -582,6 +589,7 @@ function raptorRunner() {
postStartupDelay = config.post_startup_delay;
host = config.host;
debugMode = config.debug_mode;
browserCycle = config.browser_cycle;
postToControlServer("status", "raptor runner.js is loaded!");