Backed out 5 changesets (bug 1392391) for frequently failing jsreftests on Android. a=backout

Backed out changeset 50df56a0cebf (bug 1392391)
Backed out changeset 406806a088d5 (bug 1392391)
Backed out changeset 6bd3abc55ea8 (bug 1392391)
Backed out changeset 846d50ea8403 (bug 1392391)
Backed out changeset b9e9374af9a1 (bug 1392391)
This commit is contained in:
Gurzau Raul 2018-02-07 02:20:38 +02:00
parent 4a583a2b25
commit 9c39f70c0f
7 changed files with 201 additions and 302 deletions

View File

@ -142,7 +142,6 @@ for (let [key, val] of Object.entries({
startAfter: undefined,
suiteStarted: false,
manageSuite: false,
// The enabled-state of the test-plugins, stored so they can be reset later
testPluginEnabledStates: null,

View File

@ -5,7 +5,7 @@
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
"use strict";
this.EXPORTED_SYMBOLS = ["ReadTopManifest", "CreateUrls"];
this.EXPORTED_SYMBOLS = ["ReadTopManifest"];
var CC = Components.classes;
const CI = Components.interfaces;
@ -296,15 +296,25 @@ function ReadManifest(aURL, aFilter)
CI.nsIScriptSecurityManager.DISALLOW_SCRIPT);
ReadManifest(incURI, aFilter);
}
} else if (items[0] == TYPE_LOAD || items[0] == TYPE_SCRIPT) {
} else if (items[0] == TYPE_LOAD) {
if (items.length != 2)
throw "Error in manifest file " + aURL.spec + " line " + lineNo + ": incorrect number of arguments to " + items[0];
if (items[0] == TYPE_LOAD && expected_status != EXPECTED_PASS && expected_status != EXPECTED_DEATH)
throw "Error in manifest file " + aURL.spec + " line " + lineNo + ": incorrect number of arguments to load";
if (expected_status != EXPECTED_PASS &&
expected_status != EXPECTED_DEATH)
throw "Error in manifest file " + aURL.spec + " line " + lineNo + ": incorrect known failure type for load test";
var [testURI] = runHttp
? ServeFiles(principal, httpDepth,
listURL, [items[1]])
: [g.ioService.newURI(items[1], null, listURL)];
var prettyPath = runHttp
? g.ioService.newURI(items[1], null, listURL).spec
: testURI.spec;
secMan.checkLoadURIWithPrincipal(principal, testURI,
CI.nsIScriptSecurityManager.DISALLOW_SCRIPT);
AddTestItem({ type: TYPE_LOAD,
expected: expected_status,
manifest: aURL.spec,
allowSilentFail: allow_silent_fail,
prettyPath: prettyPath,
minAsserts: minAsserts,
maxAsserts: maxAsserts,
needsFocus: needs_focus,
@ -315,9 +325,36 @@ function ReadManifest(aURL, aFilter)
fuzzyMaxDelta: fuzzy_delta.max,
fuzzyMinPixels: fuzzy_pixels.min,
fuzzyMaxPixels: fuzzy_pixels.max,
runHttp: runHttp,
httpDepth: httpDepth,
url1: items[1],
url1: testURI,
url2: null,
chaosMode: chaosMode }, aFilter);
} else if (items[0] == TYPE_SCRIPT) {
if (items.length != 2)
throw "Error in manifest file " + aURL.spec + " line " + lineNo + ": incorrect number of arguments to script";
var [testURI] = runHttp
? ServeFiles(principal, httpDepth,
listURL, [items[1]])
: [g.ioService.newURI(items[1], null, listURL)];
var prettyPath = runHttp
? g.ioService.newURI(items[1], null, listURL).spec
: testURI.spec;
secMan.checkLoadURIWithPrincipal(principal, testURI,
CI.nsIScriptSecurityManager.DISALLOW_SCRIPT);
AddTestItem({ type: TYPE_SCRIPT,
expected: expected_status,
allowSilentFail: allow_silent_fail,
prettyPath: prettyPath,
minAsserts: minAsserts,
maxAsserts: maxAsserts,
needsFocus: needs_focus,
slow: slow,
prefSettings1: testPrefSettings,
prefSettings2: refPrefSettings,
fuzzyMinDelta: fuzzy_delta.min,
fuzzyMaxDelta: fuzzy_delta.max,
fuzzyMinPixels: fuzzy_pixels.min,
fuzzyMaxPixels: fuzzy_pixels.max,
url1: testURI,
url2: null,
chaosMode: chaosMode }, aFilter);
} else if (items[0] == TYPE_REFTEST_EQUAL || items[0] == TYPE_REFTEST_NOTEQUAL || items[0] == TYPE_PRINT) {
@ -330,9 +367,22 @@ function ReadManifest(aURL, aFilter)
throw "Error in manifest file " + aURL.spec + " line " + lineNo + ": minimum fuzz must be zero for tests of type " + items[0];
}
var [testURI, refURI] = runHttp
? ServeFiles(principal, httpDepth,
listURL, [items[1], items[2]])
: [g.ioService.newURI(items[1], null, listURL),
g.ioService.newURI(items[2], null, listURL)];
var prettyPath = runHttp
? g.ioService.newURI(items[1], null, listURL).spec
: testURI.spec;
secMan.checkLoadURIWithPrincipal(principal, testURI,
CI.nsIScriptSecurityManager.DISALLOW_SCRIPT);
secMan.checkLoadURIWithPrincipal(principal, refURI,
CI.nsIScriptSecurityManager.DISALLOW_SCRIPT);
var type = items[0];
if (g.compareStyloToGecko || g.compareRetainedDisplayLists) {
type = TYPE_REFTEST_EQUAL;
refURI = testURI;
// We expect twice as many assertion failures when running in
// styloVsGecko mode because we run each test twice: once in
@ -352,8 +402,8 @@ function ReadManifest(aURL, aFilter)
AddTestItem({ type: type,
expected: expected_status,
manifest: aURL.spec,
allowSilentFail: allow_silent_fail,
prettyPath: prettyPath,
minAsserts: minAsserts,
maxAsserts: maxAsserts,
needsFocus: needs_focus,
@ -364,10 +414,8 @@ function ReadManifest(aURL, aFilter)
fuzzyMaxDelta: fuzzy_delta.max,
fuzzyMinPixels: fuzzy_pixels.min,
fuzzyMaxPixels: fuzzy_pixels.max,
runHttp: runHttp,
httpDepth: httpDepth,
url1: items[1],
url2: items[2],
url1: testURI,
url2: refURI,
chaosMode: chaosMode }, aFilter);
} else {
throw "Error in manifest file " + aURL.spec + " line " + lineNo + ": unknown test type " + items[0];
@ -644,7 +692,7 @@ function ExtractRange(matches, startIndex, defaultMin = 0) {
};
}
function ServeTestBase(aURL, depth) {
function ServeFiles(manifestPrincipal, depth, aURL, files) {
var listURL = aURL.QueryInterface(CI.nsIFileURL);
var directory = listURL.file.parent;
@ -669,51 +717,33 @@ function ServeTestBase(aURL, depth) {
// Give the testbase URI access to XUL and XBL
Services.perms.add(testbase, "allowXULXBL", Services.perms.ALLOW_ACTION);
return testbase;
}
function CreateUrls(test) {
let secMan = CC[NS_SCRIPTSECURITYMANAGER_CONTRACTID]
.getService(CI.nsIScriptSecurityManager);
let manifestURL = g.ioService.newURI(test.manifest);
let principal = secMan.createCodebasePrincipal(manifestURL, {});
let testbase = manifestURL;
if (test.runHttp)
testbase = ServeTestBase(manifestURL, test.httpDepth)
function FileToURI(file)
{
if (file === null)
return file;
// Only serve relative URIs via the HTTP server, not absolute
// ones like about:blank.
var testURI = g.ioService.newURI(file, null, testbase);
secMan.checkLoadURIWithPrincipal(principal, testURI,
// XXX necessary? manifestURL guaranteed to be file, others always HTTP
secMan.checkLoadURIWithPrincipal(manifestPrincipal, testURI,
CI.nsIScriptSecurityManager.DISALLOW_SCRIPT);
return testURI;
}
let files = [test.url1, test.url2];
[test.url1, test.url2] = files.map(FileToURI);
if (test.url2 && g.compareStyloToGecko)
test.url2 = test.url1;
return test;
return files.map(FileToURI);
}
function AddTestItem(aTest, aFilter) {
if (!aFilter)
aFilter = [null, [], false];
var {url1, url2} = CreateUrls(Object.assign({}, aTest));
var globalFilter = aFilter[0];
var manifestFilter = aFilter[1];
var invertManifest = aFilter[2];
if ((globalFilter && !globalFilter.test(url1.spec)) ||
if ((globalFilter && !globalFilter.test(aTest.url1.spec)) ||
(manifestFilter &&
!(invertManifest ^ manifestFilter.test(url1.spec))))
!(invertManifest ^ manifestFilter.test(aTest.url1.spec))))
return;
if (g.focusFilterMode == FOCUS_FILTER_NEEDS_FOCUS_TESTS &&
!aTest.needsFocus)
@ -722,9 +752,10 @@ function AddTestItem(aTest, aFilter) {
aTest.needsFocus)
return;
if (url2 !== null)
aTest.identifier = [url1.spec, aTest.type, url2.spec];
if (aTest.url2 !== null)
aTest.identifier = [aTest.prettyPath, aTest.type, aTest.url2.spec];
else
aTest.identifier = url1.spec;
aTest.identifier = aTest.prettyPath;
g.urls.push(aTest);
}

View File

@ -4,7 +4,6 @@
import json
import threading
from collections import defaultdict
from mozlog.formatters import TbplFormatter
from mozrunner.utils import get_stack_fixer_function
@ -130,7 +129,6 @@ class OutputHandler(object):
self.stack_fixer_function = get_stack_fixer_function(utilityPath, symbolsPath)
self.log = log
self.proc_name = None
self.results = defaultdict(int)
def __call__(self, line):
# need to return processed messages to appease remoteautomation.py
@ -145,11 +143,7 @@ class OutputHandler(object):
return [line]
if isinstance(data, dict) and 'action' in data:
if data['action'] == 'results':
for k, v in data['results'].items():
self.results[k] += v
else:
self.log.log_raw(data)
self.log.log_raw(data)
else:
self.verbatim(json.dumps(data))

View File

@ -321,10 +321,10 @@ function InitAndStartRefTests()
// Focus the content browser.
if (g.focusFilterMode != FOCUS_FILTER_NON_NEEDS_FOCUS_TESTS) {
g.browser.addEventListener("focus", ReadTests, true);
g.browser.addEventListener("focus", StartTests, true);
g.browser.focus();
} else {
ReadTests();
StartTests();
}
}
@ -346,87 +346,13 @@ function Shuffle(array)
}
}
function ReadTests() {
try {
if (g.focusFilterMode != FOCUS_FILTER_NON_NEEDS_FOCUS_TESTS) {
g.browser.removeEventListener("focus", ReadTests, true);
}
g.urls = [];
var prefs = Components.classes["@mozilla.org/preferences-service;1"].
getService(Components.interfaces.nsIPrefBranch);
/* There are three modes implemented here:
* 1) reftest.manifests
* 2) reftest.manifests and reftest.manifests.dumpTests
* 3) reftest.tests
*
* The first will parse the specified manifests, then immediately
* run the tests. The second will parse the manifests, save the test
* objects to a file and exit. The third will load a file of test
* objects and run them.
*
* The latter two modes are used to pass test data back and forth
* with python harness.
*/
let manifests = prefs.getCharPref("reftest.manifests", null);
let dumpTests = prefs.getCharPref("reftest.manifests.dumpTests", null);
let testList = prefs.getCharPref("reftest.tests", null);
if ((testList && manifests) || !(testList || manifests)) {
logger.error("Exactly one of reftest.manifests or reftest.tests must be specified.");
DoneTests();
}
if (testList) {
let promise = OS.File.read(testList).then(function onSuccess(array) {
let decoder = new TextDecoder();
g.urls = JSON.parse(decoder.decode(array)).map(CreateUrls);
StartTests();
});
} else if (manifests) {
// Parse reftest manifests
manifests = JSON.parse(manifests);
g.urlsFilterRegex = manifests[null];
var globalFilter = manifests.hasOwnProperty("") ? new RegExp(manifests[""]) : null;
var manifestURLs = Object.keys(manifests);
// Ensure we read manifests from higher up the directory tree first so that we
// process includes before reading the included manifest again
manifestURLs.sort(function(a,b) {return a.length - b.length})
manifestURLs.forEach(function(manifestURL) {
logger.info("Reading manifest " + manifestURL);
var filter = manifests[manifestURL] ? new RegExp(manifests[manifestURL]) : null;
ReadTopManifest(manifestURL, [globalFilter, filter, false]);
});
if (dumpTests) {
let encoder = new TextEncoder();
let tests = encoder.encode(JSON.stringify(g.urls));
OS.File.writeAtomic(dumpTests, tests, {flush: true}).then(
function onSuccess() {
DoneTests();
},
function onFailure(reason) {
logger.error("failed to write test data: " + reason);
DoneTests();
}
)
} else {
g.manageSuite = true;
g.urls = g.urls.map(CreateUrls);
StartTests();
}
}
} catch(e) {
++g.testResults.Exception;
logger.error("EXCEPTION: " + e);
}
}
function StartTests()
{
if (g.focusFilterMode != FOCUS_FILTER_NON_NEEDS_FOCUS_TESTS) {
g.browser.removeEventListener("focus", StartTests, true);
}
var manifests;
/* These prefs are optional, so we don't need to spit an error to the log */
try {
var prefs = Components.classes["@mozilla.org/preferences-service;1"].
@ -462,7 +388,28 @@ function StartTests()
g.noCanvasCache = true;
}
g.urls = [];
try {
var manifests = JSON.parse(prefs.getCharPref("reftest.manifests"));
g.urlsFilterRegex = manifests[null];
} catch(e) {
logger.error("Unable to find reftest.manifests pref. Please ensure your profile is setup properly");
DoneTests();
}
try {
var globalFilter = manifests.hasOwnProperty("") ? new RegExp(manifests[""]) : null;
var manifestURLs = Object.keys(manifests);
// Ensure we read manifests from higher up the directory tree first so that we
// process includes before reading the included manifest again
manifestURLs.sort(function(a,b) {return a.length - b.length})
manifestURLs.forEach(function(manifestURL) {
logger.info("Reading manifest " + manifestURL);
var filter = manifests[manifestURL] ? new RegExp(manifests[manifestURL]) : null;
ReadTopManifest(manifestURL, [globalFilter, filter, false]);
});
BuildUseCounts();
// Filter tests which will be skipped to get a more even distribution when chunking
@ -502,7 +449,7 @@ function StartTests()
g.urls = g.urls.slice(start, end);
}
if (g.manageSuite && g.startAfter === undefined && !g.suiteStarted) {
if (g.startAfter === undefined && !g.suiteStarted) {
var ids = g.urls.map(function(obj) {
return obj.identifier;
});
@ -778,12 +725,8 @@ function StartCurrentURI(aURLTargetType)
function DoneTests()
{
if (g.manageSuite) {
g.suiteStarted = false
logger.suiteEnd({'results': g.testResults});
} else {
logger._logData('results', {results: g.testResults});
}
logger.suiteEnd({'results': g.testResults});
g.suiteStarted = false
logger.info("Slowest test took " + g.slowestTestTime + "ms (" + g.slowestTestURL + ")");
logger.info("Total canvas count = " + g.recycledCanvases.length);
if (g.failedUseWidgetLayers) {

View File

@ -2,16 +2,16 @@
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
from contextlib import closing
import sys
import logging
import os
import psutil
import signal
import sys
import tempfile
import time
import tempfile
import traceback
import urllib2
from contextlib import closing
import mozdevice
import mozinfo
@ -143,7 +143,6 @@ class ReftestServer:
class RemoteReftest(RefTest):
use_marionette = False
parse_manifest = False
remoteApp = ''
resolver_cls = RemoteReftestResolver
@ -168,11 +167,11 @@ class RemoteReftest(RefTest):
self._devicemanager.removeDir(self.remoteCache)
self._populate_logger(options)
self.outputHandler = OutputHandler(self.log, options.utilityPath, options.symbolsPath)
outputHandler = OutputHandler(self.log, options.utilityPath, options.symbolsPath)
# RemoteAutomation.py's 'messageLogger' is also used by mochitest. Mimic a mochitest
# MessageLogger object to re-use this code path.
self.outputHandler.write = self.outputHandler.__call__
self.automation._processArgs['messageLogger'] = self.outputHandler
outputHandler.write = outputHandler.__call__
self.automation._processArgs['messageLogger'] = outputHandler
def findPath(self, paths, filename=None):
for path in paths:
@ -260,12 +259,12 @@ class RemoteReftest(RefTest):
# may not be able to access process info for all processes
continue
def createReftestProfile(self, options, startAfter=None, **kwargs):
def createReftestProfile(self, options, manifest, startAfter=None):
profile = RefTest.createReftestProfile(self,
options,
manifest,
server=options.remoteWebServer,
port=options.httpPort,
**kwargs)
port=options.httpPort)
if startAfter is not None:
print ("WARNING: Continuing after a crash is not supported for remote "
"reftest yet.")
@ -284,11 +283,6 @@ class RemoteReftest(RefTest):
# reftest pages at 1.0 zoom, rather than zooming to fit the CSS viewport.
prefs["apz.allow_zooming"] = False
if options.totalChunks:
prefs['reftest.totalChunks'] = options.totalChunks
if options.thisChunk:
prefs['reftest.thisChunk'] = options.thisChunk
# Set the extra prefs.
profile.set_preferences(prefs)
@ -339,21 +333,10 @@ class RemoteReftest(RefTest):
del browserEnv["XPCOM_MEM_BLOAT_LOG"]
return browserEnv
def runApp(self, options, cmdargs=None, timeout=None, debuggerInfo=None, symbolsPath=None,
valgrindPath=None, valgrindArgs=None, valgrindSuppFiles=None, **profileArgs):
if cmdargs is None:
cmdargs = []
if self.use_marionette:
cmdargs.append('-marionette')
binary = options.app
profile = self.createReftestProfile(options, **profileArgs)
# browser environment
env = self.buildBrowserEnv(options, profile.profile)
self.log.info("Running with e10s: {}".format(options.e10s))
def runApp(self, profile, binary, cmdargs, env,
timeout=None, debuggerInfo=None,
symbolsPath=None, options=None,
valgrindPath=None, valgrindArgs=None, valgrindSuppFiles=None):
status, lastTestSeen = self.automation.runApp(None, env,
binary,
profile.profile,
@ -366,9 +349,7 @@ class RemoteReftest(RefTest):
if status == 1:
# when max run time exceeded, avoid restart
lastTestSeen = RefTest.TEST_SEEN_FINAL
self.cleanup(profile.profile)
return status, lastTestSeen, self.outputHandler.results
return status, lastTestSeen
def cleanup(self, profileDir):
# Pull results back from device

View File

@ -18,7 +18,6 @@ import shutil
import signal
import subprocess
import sys
import tempfile
import threading
from datetime import datetime, timedelta
@ -29,14 +28,12 @@ if SCRIPT_DIRECTORY not in sys.path:
import mozcrash
import mozdebug
import mozfile
import mozinfo
import mozleak
import mozlog
import mozprocess
import mozprofile
import mozrunner
from manifestparser import TestManifest, filters as mpf
from mozrunner.utils import get_stack_fixer_function, test_environment
from mozscreenshot import printstatus, dump_screen
@ -229,10 +226,9 @@ class ReftestResolver(object):
class RefTest(object):
TEST_SEEN_INITIAL = 'reftest'
TEST_SEEN_FINAL = 'Main app process exited normally'
oldcwd = os.getcwd()
parse_manifest = True
resolver_cls = ReftestResolver
use_marionette = True
oldcwd = os.getcwd()
resolver_cls = ReftestResolver
def __init__(self):
update_mozinfo()
@ -240,7 +236,6 @@ class RefTest(object):
self.haveDumpedScreen = False
self.resolver = self.resolver_cls()
self.log = None
self.testDumpFile = os.path.join(tempfile.gettempdir(), 'reftests.json')
def _populate_logger(self, options):
if self.log:
@ -264,21 +259,17 @@ class RefTest(object):
"Get an absolute path relative to self.oldcwd."
return os.path.normpath(os.path.join(self.oldcwd, os.path.expanduser(path)))
def createReftestProfile(self, options, tests=None, manifests=None,
server='localhost', port=0, profile_to_clone=None,
startAfter=None, prefs=None):
def createReftestProfile(self, options, manifests, server='localhost', port=0,
profile_to_clone=None, startAfter=None):
"""Sets up a profile for reftest.
:param options: Object containing command line options
:param tests: List of test objects to run
:param manifests: List of manifest files to parse (only takes effect
if tests were not passed in)
:param manifests: Dictionary of the form {manifest_path: [filters]}
:param server: Server name to use for http tests
:param profile_to_clone: Path to a profile to use as the basis for the
test profile
:param startAfter: Start running tests after the specified test id
:param prefs: Extra preferences to set in the profile
"""
locations = mozprofile.permissions.ServerLocations()
locations.add_host(server, scheme='http', port=port)
locations.add_host(server, scheme='https', port=port)
@ -286,8 +277,12 @@ class RefTest(object):
# Set preferences for communication between our command line arguments
# and the reftest harness. Preferences that are required for reftest
# to work should instead be set in reftest-preferences.js .
prefs = prefs or {}
prefs = {}
prefs['reftest.timeout'] = options.timeout * 1000
if options.totalChunks:
prefs['reftest.totalChunks'] = options.totalChunks
if options.thisChunk:
prefs['reftest.thisChunk'] = options.thisChunk
if options.logFile:
prefs['reftest.logFile'] = options.logFile
if options.ignoreWindowSize:
@ -304,6 +299,7 @@ class RefTest(object):
prefs['reftest.cleanupPendingCrashes'] = True
prefs['reftest.focusFilterMode'] = options.focusFilterMode
prefs['reftest.logLevel'] = options.log_tbpl_level or 'info'
prefs['reftest.manifests'] = json.dumps(manifests)
prefs['reftest.suite'] = options.suite
if startAfter not in (None, self.TEST_SEEN_INITIAL, self.TEST_SEEN_FINAL):
@ -384,14 +380,6 @@ class RefTest(object):
else:
profile = mozprofile.Profile(**kwargs)
if tests:
testlist = os.path.join(profile.profile, 'reftests.json')
with open(testlist, 'w') as fh:
json.dump(tests, fh)
profile.set_preferences({'reftest.tests': testlist})
elif manifests:
profile.set_preferences({'reftest.manifests': json.dumps(manifests)})
if os.path.join(here, 'chrome') not in options.extraProfileFiles:
options.extraProfileFiles.append(os.path.join(here, 'chrome'))
@ -671,23 +659,10 @@ class RefTest(object):
self.log.info("Can't trigger Breakpad, just killing process")
process.kill()
def runApp(self, options, cmdargs=None, timeout=None, debuggerInfo=None,
symbolsPath=None, valgrindPath=None, valgrindArgs=None,
valgrindSuppFiles=None, **profileArgs):
if cmdargs is None:
cmdargs = []
if self.use_marionette:
cmdargs.append('-marionette')
binary = options.app
profile = self.createReftestProfile(options, **profileArgs)
# browser environment
env = self.buildBrowserEnv(options, profile.profile)
self.log.info("Running with e10s: {}".format(options.e10s))
def runApp(self, profile, binary, cmdargs, env,
timeout=None, debuggerInfo=None,
symbolsPath=None, options=None,
valgrindPath=None, valgrindArgs=None, valgrindSuppFiles=None):
def timeoutHandler():
self.handleTimeout(
@ -794,44 +769,12 @@ class RefTest(object):
status = 1
runner.cleanup()
self.cleanup(profile.profile)
if marionette_exception is not None:
exc, value, tb = marionette_exception
raise exc, value, tb
self.log.info("Process mode: {}".format('e10s' if options.e10s else 'non-e10s'))
return status, self.lastTestSeen, outputHandler.results
def getActiveTests(self, manifests, options, testDumpFile=None):
# These prefs will cause reftest.jsm to parse the manifests,
# dump the resulting tests to a file, and exit.
prefs = {
'reftest.manifests': json.dumps(manifests),
'reftest.manifests.dumpTests': testDumpFile or self.testDumpFile,
}
cmdargs = [] # ['-headless']
status, _, _ = self.runApp(options, cmdargs=cmdargs, prefs=prefs)
with open(self.testDumpFile, 'r') as fh:
tests = json.load(fh)
if os.path.isfile(self.testDumpFile):
mozfile.remove(self.testDumpFile)
for test in tests:
# Name and path are expected by manifestparser, but not used in reftest.
test['name'] = test['path'] = test['url1']
mp = TestManifest(strict=False)
mp.tests = tests
filters = []
if options.totalChunks:
filters.append(mpf.chunk_by_slice(options.thisChunk, options.totalChunks))
tests = mp.active_tests(exists=False, filters=filters)
return tests
return status, self.lastTestSeen
def runSerialTests(self, manifests, options, cmdargs=None):
debuggerInfo = None
@ -839,67 +782,75 @@ class RefTest(object):
debuggerInfo = mozdebug.get_debugger_info(options.debugger, options.debuggerArgs,
options.debuggerInteractive)
tests = None
if self.parse_manifest:
tests = self.getActiveTests(manifests, options)
ids = [t['identifier'] for t in tests]
self.log.suite_start(ids, name=options.suite)
profileDir = None
startAfter = None # When the previous run crashed, we skip the tests we ran before
prevStartAfter = None
for i in itertools.count():
status, startAfter, results = self.runApp(
options,
tests=tests,
manifests=manifests,
cmdargs=cmdargs,
# We generally want the JS harness or marionette
# to handle timeouts if they can.
# The default JS harness timeout is currently
# 300 seconds (default options.timeout).
# The default Marionette socket timeout is
# currently 360 seconds.
# Give the JS harness extra time to deal with
# its own timeouts and try to usually exceed
# the 360 second marionette socket timeout.
# See bug 479518 and bug 1414063.
timeout=options.timeout + 70.0,
symbolsPath=options.symbolsPath,
debuggerInfo=debuggerInfo
)
mozleak.process_leak_log(self.leakLogFile,
leak_thresholds=options.leakThresholds,
stack_fixer=get_stack_fixer_function(options.utilityPath,
options.symbolsPath))
try:
if cmdargs is None:
cmdargs = []
if status == 0:
break
if self.use_marionette:
cmdargs.append('-marionette')
if startAfter == self.TEST_SEEN_FINAL:
self.log.info("Finished running all tests, skipping resume "
"despite non-zero status code: %s" % status)
break
profile = self.createReftestProfile(options,
manifests,
startAfter=startAfter)
profileDir = profile.profile # name makes more sense
if startAfter is not None and options.shuffle:
self.log.error("Can not resume from a crash with --shuffle "
"enabled. Please consider disabling --shuffle")
break
if startAfter is not None and options.maxRetries <= i:
self.log.error("Hit maximum number of allowed retries ({}) "
"in the test run".format(options.maxRetries))
break
if startAfter == prevStartAfter:
# If the test stuck on the same test, or there the crashed
# test appeared more then once, stop
self.log.error("Force stop because we keep running into "
"test \"{}\"".format(startAfter))
break
prevStartAfter = startAfter
# TODO: we need to emit an SUITE-END log if it crashed
# browser environment
browserEnv = self.buildBrowserEnv(options, profileDir)
if self.parse_manifest:
self.log.suite_end(extra={'results': results})
self.log.info("Running with e10s: {}".format(options.e10s))
status, startAfter = self.runApp(profile,
binary=options.app,
cmdargs=cmdargs,
env=browserEnv,
# We generally want the JS harness or marionette
# to handle timeouts if they can.
# The default JS harness timeout is currently
# 300 seconds (default options.timeout).
# The default Marionette socket timeout is
# currently 360 seconds.
# Give the JS harness extra time to deal with
# its own timeouts and try to usually exceed
# the 360 second marionette socket timeout.
# See bug 479518 and bug 1414063.
timeout=options.timeout + 70.0,
symbolsPath=options.symbolsPath,
options=options,
debuggerInfo=debuggerInfo)
self.log.info("Process mode: {}".format('e10s' if options.e10s else 'non-e10s'))
mozleak.process_leak_log(self.leakLogFile,
leak_thresholds=options.leakThresholds,
stack_fixer=get_stack_fixer_function(options.utilityPath,
options.symbolsPath))
if status == 0:
break
if startAfter == self.TEST_SEEN_FINAL:
self.log.info("Finished running all tests, skipping resume "
"despite non-zero status code: %s" % status)
break
if startAfter is not None and options.shuffle:
self.log.error("Can not resume from a crash with --shuffle "
"enabled. Please consider disabling --shuffle")
break
if startAfter is not None and options.maxRetries <= i:
self.log.error("Hit maximum number of allowed retries ({}) "
"in the test run".format(options.maxRetries))
break
if startAfter == prevStartAfter:
# If the test stuck on the same test, or there the crashed
# test appeared more then once, stop
self.log.error("Force stop because we keep running into "
"test \"{}\"".format(startAfter))
break
prevStartAfter = startAfter
# TODO: we need to emit an SUITE-END log if it crashed
finally:
self.cleanup(profileDir)
return status
def copyExtraFilesToProfile(self, options, profile):

View File

@ -304,7 +304,7 @@ class StructuredLogger(object):
if not self._ensure_suite_state('suite_end', data):
return
self._log_data("suite_end", data)
self._log_data("suite_end")
@log_action(TestId("test"),
Unicode("path", default=None, optional=True))