mirror of
https://github.com/mozilla/gecko-dev.git
synced 2024-10-08 02:14:43 +00:00
Back out d1779fe421c3 (bug 1034290) for UnicodeDecodeErrors in things run by the reftest harness
CLOSED TREE
This commit is contained in:
parent
ecf67cb48c
commit
66e6da78e6
@ -396,7 +396,7 @@ class Automation(object):
|
||||
self.log.info("Can't trigger Breakpad, just killing process")
|
||||
self.killPid(processPID)
|
||||
|
||||
def waitForFinish(self, proc, utilityPath, timeout, maxTime, startTime, debuggerInfo, symbolsPath, outputHandler=None):
|
||||
def waitForFinish(self, proc, utilityPath, timeout, maxTime, startTime, debuggerInfo, symbolsPath):
|
||||
""" Look for timeout or crashes and return the status after the process terminates """
|
||||
stackFixerFunction = None
|
||||
didTimeout = False
|
||||
@ -436,12 +436,7 @@ class Automation(object):
|
||||
while line != "" and not didTimeout:
|
||||
if stackFixerFunction:
|
||||
line = stackFixerFunction(line)
|
||||
|
||||
if outputHandler is None:
|
||||
self.log.info(line.rstrip().decode("UTF-8", "ignore"))
|
||||
else:
|
||||
outputHandler(line)
|
||||
|
||||
self.log.info(line.rstrip().decode("UTF-8", "ignore"))
|
||||
if "TEST-START" in line and "|" in line:
|
||||
self.lastTestSeen = line.split("|")[1].strip()
|
||||
if not debuggerInfo and "TEST-UNEXPECTED-FAIL" in line and "Test timed out" in line:
|
||||
@ -535,7 +530,7 @@ class Automation(object):
|
||||
debuggerInfo = None, symbolsPath = None,
|
||||
timeout = -1, maxTime = None, onLaunch = None,
|
||||
detectShutdownLeaks = False, screenshotOnFail=False, testPath=None, bisectChunk=None,
|
||||
valgrindPath=None, valgrindArgs=None, valgrindSuppFiles=None, outputHandler=None):
|
||||
valgrindPath=None, valgrindArgs=None, valgrindSuppFiles=None):
|
||||
"""
|
||||
Run the app, log the duration it took to execute, return the status code.
|
||||
Kills the app if it runs for longer than |maxTime| seconds, or outputs nothing for |timeout| seconds.
|
||||
@ -584,8 +579,7 @@ class Automation(object):
|
||||
# app is launched.
|
||||
onLaunch()
|
||||
|
||||
status = self.waitForFinish(proc, utilityPath, timeout, maxTime, startTime, debuggerInfo, symbolsPath,
|
||||
outputHandler=outputHandler)
|
||||
status = self.waitForFinish(proc, utilityPath, timeout, maxTime, startTime, debuggerInfo, symbolsPath)
|
||||
self.log.info("INFO | automation.py | Application ran for: %s", str(datetime.now() - startTime))
|
||||
|
||||
# Do a final check for zombie child processes.
|
||||
|
@ -209,31 +209,23 @@ class B2GRemoteAutomation(Automation):
|
||||
return app, args
|
||||
|
||||
def waitForFinish(self, proc, utilityPath, timeout, maxTime, startTime,
|
||||
debuggerInfo, symbolsPath, outputHandler=None):
|
||||
debuggerInfo, symbolsPath):
|
||||
""" Wait for tests to finish (as evidenced by a signature string
|
||||
in logcat), or for a given amount of time to elapse with no
|
||||
output.
|
||||
"""
|
||||
timeout = timeout or 120
|
||||
while True:
|
||||
lines = proc.getStdoutLines(timeout)
|
||||
if lines:
|
||||
currentlog = '\n'.join(lines)
|
||||
|
||||
if outputHandler:
|
||||
for line in lines:
|
||||
outputHandler(line)
|
||||
else:
|
||||
print(currentlog)
|
||||
|
||||
currentlog = proc.getStdoutLines(timeout)
|
||||
if currentlog:
|
||||
print currentlog
|
||||
# Match the test filepath from the last TEST-START line found in the new
|
||||
# log content. These lines are in the form:
|
||||
# ... INFO TEST-START | /filepath/we/wish/to/capture.html\n
|
||||
testStartFilenames = re.findall(r"TEST-START \| ([^\s]*)", currentlog)
|
||||
if testStartFilenames:
|
||||
self.lastTestSeen = testStartFilenames[-1]
|
||||
if (outputHandler and outputHandler.suite_finished) or (
|
||||
hasattr(self, 'logFinish') and self.logFinish in currentlog):
|
||||
if hasattr(self, 'logFinish') and self.logFinish in currentlog:
|
||||
return 0
|
||||
else:
|
||||
self.log.info("TEST-UNEXPECTED-FAIL | %s | application timed "
|
||||
@ -442,12 +434,11 @@ class B2GRemoteAutomation(Automation):
|
||||
break
|
||||
|
||||
# wait 'timeout' for any additional lines
|
||||
if not lines:
|
||||
try:
|
||||
lines.append(self.queue.get(True, timeout))
|
||||
except Queue.Empty:
|
||||
pass
|
||||
return lines
|
||||
try:
|
||||
lines.append(self.queue.get(True, timeout))
|
||||
except Queue.Empty:
|
||||
pass
|
||||
return '\n'.join(lines)
|
||||
|
||||
def wait(self, timeout=None):
|
||||
# this should never happen
|
||||
|
@ -95,7 +95,7 @@ class RemoteAutomation(Automation):
|
||||
|
||||
return env
|
||||
|
||||
def waitForFinish(self, proc, utilityPath, timeout, maxTime, startTime, debuggerInfo, symbolsPath, outputHandler=None):
|
||||
def waitForFinish(self, proc, utilityPath, timeout, maxTime, startTime, debuggerInfo, symbolsPath):
|
||||
""" Wait for tests to finish.
|
||||
If maxTime seconds elapse or no output is detected for timeout
|
||||
seconds, kill the process and fail the test.
|
||||
@ -307,21 +307,20 @@ class RemoteAutomation(Automation):
|
||||
return pid
|
||||
|
||||
def read_stdout(self):
|
||||
"""
|
||||
Fetch the full remote log file using devicemanager, process them and
|
||||
return whether there were any new log entries since the last call.
|
||||
""" Fetch the full remote log file using devicemanager and return just
|
||||
the new log entries since the last call (as a list of messages or lines).
|
||||
"""
|
||||
if not self.dm.fileExists(self.proc):
|
||||
return False
|
||||
return []
|
||||
try:
|
||||
newLogContent = self.dm.pullFile(self.proc, self.stdoutlen)
|
||||
except DMError:
|
||||
# we currently don't retry properly in the pullFile
|
||||
# function in dmSUT, so an error here is not necessarily
|
||||
# the end of the world
|
||||
return False
|
||||
return []
|
||||
if not newLogContent:
|
||||
return False
|
||||
return []
|
||||
|
||||
self.stdoutlen += len(newLogContent)
|
||||
|
||||
@ -330,27 +329,26 @@ class RemoteAutomation(Automation):
|
||||
if testStartFilenames:
|
||||
self.lastTestSeen = testStartFilenames[-1]
|
||||
print newLogContent
|
||||
return True
|
||||
return [newLogContent]
|
||||
|
||||
self.logBuffer += newLogContent
|
||||
lines = self.logBuffer.split('\n')
|
||||
|
||||
if lines:
|
||||
# We only keep the last (unfinished) line in the buffer
|
||||
self.logBuffer = lines[-1]
|
||||
del lines[-1]
|
||||
|
||||
if not lines:
|
||||
return False
|
||||
return
|
||||
|
||||
# We only keep the last (unfinished) line in the buffer
|
||||
self.logBuffer = lines[-1]
|
||||
del lines[-1]
|
||||
messages = []
|
||||
for line in lines:
|
||||
# This passes the line to the logger (to be logged or buffered)
|
||||
# and returns a list of structured messages (dict)
|
||||
parsed_messages = self.messageLogger.write(line)
|
||||
for message in parsed_messages:
|
||||
if isinstance(message, dict) and message.get('action') == 'test_start':
|
||||
if message['action'] == 'test_start':
|
||||
self.lastTestSeen = message['test']
|
||||
return True
|
||||
messages += parsed_messages
|
||||
return messages
|
||||
|
||||
@property
|
||||
def getLastTestSeen(self):
|
||||
@ -376,10 +374,10 @@ class RemoteAutomation(Automation):
|
||||
# too long, only do it every 60 seconds
|
||||
if (not slowLog) or (timer % 60 == 0):
|
||||
startRead = datetime.datetime.now()
|
||||
hasOutput = self.read_stdout()
|
||||
messages = self.read_stdout()
|
||||
if (datetime.datetime.now() - startRead) > datetime.timedelta(seconds=5):
|
||||
slowLog = True
|
||||
if hasOutput:
|
||||
if messages:
|
||||
noOutputTimer = 0
|
||||
time.sleep(interval)
|
||||
timer += interval
|
||||
|
@ -12,7 +12,6 @@ _HARNESS_FILES = \
|
||||
$(srcdir)/runreftestb2g.py \
|
||||
$(srcdir)/runreftestmulet.py \
|
||||
$(srcdir)/gaia_lock_screen.js \
|
||||
$(srcdir)/output.py \
|
||||
automation.py \
|
||||
$(topsrcdir)/testing/mozbase/mozdevice/mozdevice/devicemanager.py \
|
||||
$(topsrcdir)/testing/mozbase/mozdevice/mozdevice/devicemanagerADB.py \
|
||||
|
@ -2,7 +2,6 @@ reftest.jar:
|
||||
% content reftest %content/
|
||||
* content/reftest-content.js (reftest-content.js)
|
||||
content/httpd.jsm (../../../netwerk/test/httpserver/httpd.js)
|
||||
content/StructuredLog.jsm (../../../testing/modules/StructuredLog.jsm)
|
||||
#ifdef BOOTSTRAP
|
||||
* content/reftest.jsm (reftest.js)
|
||||
#else
|
||||
|
@ -203,6 +203,8 @@ class ReftestRunner(MozbuildObject):
|
||||
if not kwargs["runTestsInParallel"]:
|
||||
kwargs["logFile"] = "%s.log" % kwargs["suite"]
|
||||
|
||||
# Remove the stdout handler from the internal logger and let mach deal with it
|
||||
runreftest.log.removeHandler(runreftest.log.handlers[0])
|
||||
self.log_manager.enable_unstructured()
|
||||
try:
|
||||
rv = runreftest.run(**kwargs)
|
||||
|
@ -1,144 +0,0 @@
|
||||
# This Source Code Form is subject to the terms of the Mozilla Public
|
||||
# License, v. 2.0. If a copy of the MPL was not distributed with this
|
||||
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
|
||||
import json
|
||||
import os
|
||||
import threading
|
||||
|
||||
from mozlog.formatters import TbplFormatter
|
||||
from mozrunner.utils import get_stack_fixer_function
|
||||
|
||||
|
||||
class ReftestFormatter(TbplFormatter):
|
||||
"""
|
||||
Formatter designed to preserve the legacy "tbpl" format in reftest.
|
||||
|
||||
This is needed for both the reftest-analyzer and mozharness log parsing.
|
||||
We can change this format when both reftest-analyzer and mozharness have
|
||||
been changed to read structured logs.
|
||||
"""
|
||||
|
||||
def __call__(self, data):
|
||||
if 'component' in data and data['component'] == 'mozleak':
|
||||
# Output from mozleak requires that no prefix be added
|
||||
# so that mozharness will pick up these failures.
|
||||
return "%s\n" % data['message']
|
||||
|
||||
formatted = TbplFormatter.__call__(self, data)
|
||||
if data['action'] == 'process_output':
|
||||
return formatted
|
||||
return 'REFTEST %s' % formatted
|
||||
|
||||
def log(self, data):
|
||||
prefix = "%s |" % data['level'].upper()
|
||||
return "%s %s\n" % (prefix, data['message'])
|
||||
|
||||
def test_end(self, data):
|
||||
extra = data.get('extra', {})
|
||||
status = data['status']
|
||||
|
||||
status_msg = "TEST-"
|
||||
if 'expected' in data:
|
||||
status_msg += "UNEXPECTED-%s" % status
|
||||
else:
|
||||
if status != "PASS":
|
||||
status_msg += "KNOWN-"
|
||||
status_msg += status
|
||||
if extra.get('status_msg') == 'Random':
|
||||
status_msg += "(EXPECTED RANDOM)"
|
||||
test = self.id_str(data['test'])
|
||||
if 'message' in data:
|
||||
status_details = data['message']
|
||||
elif isinstance(data['test'], tuple):
|
||||
status_details = 'image comparison ({})'.format(data['test'][1])
|
||||
else:
|
||||
status_details = '(LOAD ONLY)'
|
||||
|
||||
output_text = "%s | %s | %s" % (status_msg, test, status_details)
|
||||
if 'differences' in extra:
|
||||
diff_msg = (", max difference: %(max_difference)s"
|
||||
", number of differing pixels: %(differences)s") % extra
|
||||
diagnostic_data = ("REFTEST IMAGE 1 (TEST): %(image1)s\n"
|
||||
"REFTEST IMAGE 2 (REFERENCE): %(image2)s") % extra
|
||||
output_text += '%s\n%s' % (diff_msg, diagnostic_data)
|
||||
elif "image1" in extra:
|
||||
diagnostic_data = "REFTEST IMAGE: %(image1)s" % extra
|
||||
output_text += '\n%s' % diagnostic_data
|
||||
|
||||
output_text += "\nREFTEST TEST-END | %s" % test
|
||||
return "%s\n" % output_text
|
||||
|
||||
def process_output(self, data):
|
||||
return "%s\n" % data["data"]
|
||||
|
||||
def suite_end(self, data):
|
||||
lines = []
|
||||
summary = data['extra']['results']
|
||||
summary['success'] = summary['Pass'] + summary['LoadOnly']
|
||||
lines.append("Successful: %(success)s (%(Pass)s pass, %(LoadOnly)s load only)" %
|
||||
summary)
|
||||
summary['unexpected'] = (summary['Exception'] + summary['FailedLoad'] +
|
||||
summary['UnexpectedFail'] + summary['UnexpectedPass'] +
|
||||
summary['AssertionUnexpected'] +
|
||||
summary['AssertionUnexpectedFixed'])
|
||||
lines.append(("Unexpected: %(unexpected)s (%(UnexpectedFail)s unexpected fail, "
|
||||
"%(UnexpectedPass)s unexpected pass, "
|
||||
"%(AssertionUnexpected)s unexpected asserts, "
|
||||
"%(FailedLoad)s failed load, "
|
||||
"%(Exception)s exception)") % summary)
|
||||
summary['known'] = (summary['KnownFail'] + summary['AssertionKnown'] +
|
||||
summary['Random'] + summary['Skip'] + summary['Slow'])
|
||||
lines.append(("Known problems: %(known)s (" +
|
||||
"%(KnownFail)s known fail, " +
|
||||
"%(AssertionKnown)s known asserts, " +
|
||||
"%(Random)s random, " +
|
||||
"%(Skip)s skipped, " +
|
||||
"%(Slow)s slow)") % summary)
|
||||
lines = ["REFTEST INFO | %s" % s for s in lines]
|
||||
lines.append("REFTEST SUITE-END | Shutdown")
|
||||
return "INFO | Result summary:\n{}\n".format('\n'.join(lines))
|
||||
|
||||
def id_str(self, test_id):
|
||||
if isinstance(test_id, basestring):
|
||||
return test_id
|
||||
return test_id[0]
|
||||
|
||||
|
||||
class OutputHandler(object):
|
||||
"""Process the output of a process during a test run and translate
|
||||
raw data logged from reftest.js to an appropriate structured log action,
|
||||
where applicable.
|
||||
"""
|
||||
|
||||
def __init__(self, log, utilityPath, symbolsPath=None):
|
||||
self.stack_fixer_function = get_stack_fixer_function(utilityPath, symbolsPath)
|
||||
self.log = log
|
||||
# needed for b2gautomation.py
|
||||
self.suite_finished = False
|
||||
|
||||
def __call__(self, line):
|
||||
# need to return processed messages to appease remoteautomation.py
|
||||
if not line.strip():
|
||||
return []
|
||||
|
||||
try:
|
||||
data = json.loads(line)
|
||||
except ValueError:
|
||||
self.verbatim(line)
|
||||
return [line]
|
||||
|
||||
if isinstance(data, dict) and 'action' in data:
|
||||
if data['action'] == 'suite_end':
|
||||
self.suite_finished = True
|
||||
|
||||
self.log.log_raw(data)
|
||||
else:
|
||||
self.verbatim(json.dumps(data))
|
||||
|
||||
return [data]
|
||||
|
||||
def verbatim(self, line):
|
||||
if self.stack_fixer_function:
|
||||
line = self.stack_fixer_function(line.encode('utf-8', 'replace'))
|
||||
self.log.process_output(threading.current_thread().name, line)
|
@ -38,7 +38,6 @@ const NS_OBSERVER_SERVICE_CONTRACTID =
|
||||
|
||||
CU.import("resource://gre/modules/FileUtils.jsm");
|
||||
CU.import("chrome://reftest/content/httpd.jsm", this);
|
||||
CU.import("chrome://reftest/content/StructuredLog.jsm", this);
|
||||
CU.import("resource://gre/modules/Services.jsm");
|
||||
CU.import("resource://gre/modules/NetUtil.jsm");
|
||||
|
||||
@ -100,7 +99,6 @@ var gTotalTests = 0;
|
||||
var gState;
|
||||
var gCurrentURL;
|
||||
var gTestLog = [];
|
||||
var gLogLevel;
|
||||
var gServer;
|
||||
var gCount = 0;
|
||||
var gAssertionCount = 0;
|
||||
@ -165,6 +163,10 @@ var gNoCanvasCache = false;
|
||||
|
||||
var gRecycledCanvases = new Array();
|
||||
|
||||
// By default we just log to stdout
|
||||
var gDumpLog = dump;
|
||||
var gVerbose = false;
|
||||
|
||||
// Only dump the sandbox once, because it doesn't depend on the
|
||||
// manifest URL (yet!).
|
||||
var gDumpedConditionSandbox = false;
|
||||
@ -179,27 +181,25 @@ function HasUnexpectedResult()
|
||||
gTestResults.AssertionUnexpectedFixed > 0;
|
||||
}
|
||||
|
||||
// By default we just log to stdout
|
||||
var gDumpFn = dump;
|
||||
var gDumpRawLog = function(record) {
|
||||
// Dump JSON representation of data on a single line
|
||||
var line = JSON.stringify(record);
|
||||
gDumpFn("\n" + line + "\n");
|
||||
}
|
||||
var logger = new StructuredLogger('reftest', gDumpRawLog);
|
||||
|
||||
function TestBuffer(str)
|
||||
function LogWarning(str)
|
||||
{
|
||||
logger.debug(str);
|
||||
gDumpLog("REFTEST INFO | " + str + "\n");
|
||||
gTestLog.push(str);
|
||||
}
|
||||
|
||||
function FlushTestBuffer()
|
||||
function LogInfo(str)
|
||||
{
|
||||
// In debug mode, we've dumped all these messages already.
|
||||
if (gLogLevel !== 'debug') {
|
||||
if (gVerbose)
|
||||
gDumpLog("REFTEST INFO | " + str + "\n");
|
||||
gTestLog.push(str);
|
||||
}
|
||||
|
||||
function FlushTestLog()
|
||||
{
|
||||
if (!gVerbose) {
|
||||
// In verbose mode, we've dumped all these messages already.
|
||||
for (var i = 0; i < gTestLog.length; ++i) {
|
||||
logger.info("Saved log: " + gTestLog[i]);
|
||||
gDumpLog("REFTEST INFO | Saved log: " + gTestLog[i] + "\n");
|
||||
}
|
||||
}
|
||||
gTestLog = [];
|
||||
@ -244,7 +244,7 @@ function getTestPlugin(aName) {
|
||||
return tags[i];
|
||||
}
|
||||
|
||||
logger.warning("Failed to find the test-plugin.");
|
||||
LogWarning("Failed to find the test-plugin.");
|
||||
return null;
|
||||
}
|
||||
|
||||
@ -257,6 +257,7 @@ this.OnRefTestLoad = function OnRefTestLoad(win)
|
||||
|
||||
var env = CC["@mozilla.org/process/environment;1"].
|
||||
getService(CI.nsIEnvironment);
|
||||
gVerbose = !!env.get("MOZ_REFTEST_VERBOSE");
|
||||
|
||||
var prefs = Components.classes["@mozilla.org/preferences-service;1"].
|
||||
getService(Components.interfaces.nsIPrefBranch);
|
||||
@ -278,12 +279,6 @@ this.OnRefTestLoad = function OnRefTestLoad(win)
|
||||
gBrowserIsIframe = false;
|
||||
}
|
||||
|
||||
try {
|
||||
gLogLevel = prefs.getCharPref("reftest.logLevel");
|
||||
} catch (e) {
|
||||
gLogLevel ='info';
|
||||
}
|
||||
|
||||
if (win === undefined || win == null) {
|
||||
win = window;
|
||||
}
|
||||
@ -327,7 +322,7 @@ this.OnRefTestLoad = function OnRefTestLoad(win)
|
||||
plugin1.enabledState = CI.nsIPluginTag.STATE_ENABLED;
|
||||
plugin2.enabledState = CI.nsIPluginTag.STATE_ENABLED;
|
||||
} else {
|
||||
logger.warning("Could not get test plugin tags.");
|
||||
LogWarning("Could not get test plugin tags.");
|
||||
}
|
||||
|
||||
gBrowserMessageManager = gBrowser.QueryInterface(CI.nsIFrameLoaderOwner)
|
||||
@ -344,7 +339,7 @@ function InitAndStartRefTests()
|
||||
var prefs = Components.classes["@mozilla.org/preferences-service;1"].
|
||||
getService(Components.interfaces.nsIPrefBranch);
|
||||
} catch(e) {
|
||||
logger.error("EXCEPTION: " + e);
|
||||
gDumpLog("REFTEST TEST-UNEXPECTED-FAIL | | EXCEPTION: " + e + "\n");
|
||||
}
|
||||
|
||||
try {
|
||||
@ -366,7 +361,7 @@ function InitAndStartRefTests()
|
||||
var f = FileUtils.File(logFile);
|
||||
var mfl = FileUtils.openFileOutputStream(f, FileUtils.MODE_WRONLY | FileUtils.MODE_CREATE);
|
||||
// Set to mirror to stdout as well as the file
|
||||
gDumpFn = function (msg) {
|
||||
gDumpLog = function (msg) {
|
||||
#ifdef BOOTSTRAP
|
||||
#ifdef REFTEST_B2G
|
||||
dump(msg);
|
||||
@ -381,7 +376,7 @@ function InitAndStartRefTests()
|
||||
}
|
||||
catch(e) {
|
||||
// If there is a problem, just use stdout
|
||||
gDumpFn = dump;
|
||||
gDumpLog = dump;
|
||||
}
|
||||
}
|
||||
} catch(e) {}
|
||||
@ -432,7 +427,7 @@ function InitAndStartRefTests()
|
||||
} catch (ex) {
|
||||
//gBrowser.loadURI('data:text/plain,' + ex);
|
||||
++gTestResults.Exception;
|
||||
logger.error("EXCEPTION: " + ex);
|
||||
gDumpLog("REFTEST TEST-UNEXPECTED-FAIL | | EXCEPTION: " + ex + "\n");
|
||||
DoneTests();
|
||||
}
|
||||
|
||||
@ -470,7 +465,7 @@ function StartTests()
|
||||
var prefs = Components.classes["@mozilla.org/preferences-service;1"].
|
||||
getService(Components.interfaces.nsIPrefBranch);
|
||||
} catch(e) {
|
||||
logger.error("EXCEPTION: " + e);
|
||||
gDumpLog("REFTEST TEST-UNEXPECTED-FAIL | | EXCEPTION: " + e + "\n");
|
||||
}
|
||||
|
||||
try {
|
||||
@ -518,7 +513,7 @@ function StartTests()
|
||||
var manifests = JSON.parse(prefs.getCharPref("reftest.manifests"));
|
||||
gURLFilterRegex = manifests[null];
|
||||
} catch(e) {
|
||||
logger.error("Unable to find reftest.manifests pref. Please ensure your profile is setup properly");
|
||||
gDumpLog("REFTEST TEST-UNEXPECTED-FAIL | | Unable to find reftest.manifests pref. Please ensure your profile is setup properly\n");
|
||||
DoneTests();
|
||||
}
|
||||
|
||||
@ -530,7 +525,7 @@ function StartTests()
|
||||
// process includes before reading the included manifest again
|
||||
manifestURLs.sort(function(a,b) {return a.length - b.length})
|
||||
manifestURLs.forEach(function(manifestURL) {
|
||||
logger.info("Reading manifest " + manifestURL);
|
||||
gDumpLog("Reading manifest " + manifestURL + "\n");
|
||||
var filter = manifests[manifestURL] ? new RegExp(manifests[manifestURL]) : null;
|
||||
ReadTopManifest(manifestURL, [globalFilter, filter, false]);
|
||||
});
|
||||
@ -539,7 +534,6 @@ function StartTests()
|
||||
// Filter tests which will be skipped to get a more even distribution when chunking
|
||||
// tURLs is a temporary array containing all active tests
|
||||
var tURLs = new Array();
|
||||
var tIDs = new Array();
|
||||
for (var i = 0; i < gURLs.length; ++i) {
|
||||
if (gURLs[i].expected == EXPECTED_DEATH)
|
||||
continue;
|
||||
@ -551,10 +545,9 @@ function StartTests()
|
||||
continue;
|
||||
|
||||
tURLs.push(gURLs[i]);
|
||||
tIDs.push(gURLs[i].identifier);
|
||||
}
|
||||
|
||||
logger.suiteStart(tIDs, {"skipped": gURLs.length - tURLs.length});
|
||||
gDumpLog("REFTEST INFO | Discovered " + gURLs.length + " tests, after filtering SKIP tests, we have " + tURLs.length + "\n");
|
||||
|
||||
if (gTotalChunks > 0 && gThisChunk > 0) {
|
||||
// Calculate start and end indices of this chunk if tURLs array were
|
||||
@ -569,8 +562,8 @@ function StartTests()
|
||||
end = gThisChunk == gTotalChunks ? gURLs.length : gURLs.indexOf(tURLs[end + 1]) - 1;
|
||||
gURLs = gURLs.slice(start, end);
|
||||
|
||||
logger.info("Running chunk " + gThisChunk + " out of " + gTotalChunks + " chunks. " +
|
||||
"tests " + (start+1) + "-" + end + "/" + gURLs.length);
|
||||
gDumpLog("REFTEST INFO | Running chunk " + gThisChunk + " out of " + gTotalChunks + " chunks. ");
|
||||
gDumpLog("tests " + (start+1) + "-" + end + "/" + gURLs.length + "\n");
|
||||
}
|
||||
|
||||
if (gShuffle) {
|
||||
@ -587,7 +580,7 @@ function StartTests()
|
||||
} catch (ex) {
|
||||
//gBrowser.loadURI('data:text/plain,' + ex);
|
||||
++gTestResults.Exception;
|
||||
logger.error("EXCEPTION: " + ex);
|
||||
gDumpLog("REFTEST TEST-UNEXPECTED-FAIL | | EXCEPTION: " + ex + "\n");
|
||||
DoneTests();
|
||||
}
|
||||
}
|
||||
@ -600,7 +593,7 @@ function OnRefTestUnload()
|
||||
plugin1.enabledState = gTestPluginEnabledStates[0];
|
||||
plugin2.enabledState = gTestPluginEnabledStates[1];
|
||||
} else {
|
||||
logger.warning("Failed to get test plugin tags.");
|
||||
LogWarning("Failed to get test plugin tags.");
|
||||
}
|
||||
}
|
||||
|
||||
@ -756,8 +749,8 @@ function BuildConditionSandbox(aURL) {
|
||||
}
|
||||
|
||||
if (!gDumpedConditionSandbox) {
|
||||
logger.info("Dumping JSON representation of sandbox");
|
||||
logger.info(JSON.stringify(CU.waiveXrays(sandbox)));
|
||||
dump("REFTEST INFO | Dumping JSON representation of sandbox \n");
|
||||
dump("REFTEST INFO | " + JSON.stringify(CU.waiveXrays(sandbox)) + " \n");
|
||||
gDumpedConditionSandbox = true;
|
||||
}
|
||||
|
||||
@ -818,12 +811,6 @@ function AddTestItem(aTest, aFilter)
|
||||
if (gFocusFilterMode == FOCUS_FILTER_NON_NEEDS_FOCUS_TESTS &&
|
||||
aTest.needsFocus)
|
||||
return;
|
||||
|
||||
if (aTest.url2 !== null)
|
||||
aTest.identifier = [aTest.prettyPath, aTest.type, aTest.url2.spec];
|
||||
else
|
||||
aTest.identifier = aTest.prettyPath;
|
||||
|
||||
gURLs.push(aTest);
|
||||
}
|
||||
|
||||
@ -850,7 +837,8 @@ function ReadManifest(aURL, inherited_status, aFilter)
|
||||
var inputStream = channel.open2();
|
||||
if (channel instanceof Components.interfaces.nsIHttpChannel
|
||||
&& channel.responseStatus != 200) {
|
||||
logger.error("HTTP ERROR : " + channel.responseStatus);
|
||||
gDumpLog("REFTEST TEST-UNEXPECTED-FAIL | | HTTP ERROR : " +
|
||||
channel.responseStatus + "\n");
|
||||
}
|
||||
var streamBuf = getStreamContent(inputStream);
|
||||
inputStream.close();
|
||||
@ -1250,20 +1238,19 @@ function StartCurrentTest()
|
||||
// make sure we don't run tests that are expected to kill the browser
|
||||
while (gURLs.length > 0) {
|
||||
var test = gURLs[0];
|
||||
logger.testStart(test.identifier);
|
||||
if (test.expected == EXPECTED_DEATH) {
|
||||
++gTestResults.Skip;
|
||||
logger.testEnd(test.identifier, "SKIP");
|
||||
gDumpLog("REFTEST TEST-KNOWN-FAIL | " + test.url1.spec + " | (SKIP)\n");
|
||||
gURLs.shift();
|
||||
} else if (test.needsFocus && !Focus()) {
|
||||
// FIXME: Marking this as a known fail is dangerous! What
|
||||
// if it starts failing all the time?
|
||||
++gTestResults.Skip;
|
||||
logger.testEnd(test.identifier, "SKIP", null, "(COULDN'T GET FOCUS)");
|
||||
gDumpLog("REFTEST TEST-KNOWN-FAIL | " + test.url1.spec + " | (SKIPPED; COULDN'T GET FOCUS)\n");
|
||||
gURLs.shift();
|
||||
} else if (test.slow && !gRunSlowTests) {
|
||||
++gTestResults.Slow;
|
||||
logger.testEnd(test.identifier, "SKIP", null, "(SLOW)");
|
||||
gDumpLog("REFTEST TEST-KNOWN-SLOW | " + test.url1.spec + " | (SLOW)\n");
|
||||
gURLs.shift();
|
||||
} else {
|
||||
break;
|
||||
@ -1279,6 +1266,7 @@ function StartCurrentTest()
|
||||
gRepeat--;
|
||||
StartTests();
|
||||
} else {
|
||||
gDumpLog("REFTEST TEST-START | " + gURLs[0].prettyPath + "\n");
|
||||
if (gURLs[0].chaosMode) {
|
||||
gWindowUtils.enterChaosMode();
|
||||
}
|
||||
@ -1344,30 +1332,32 @@ function StartCurrentURI(aState)
|
||||
} else if (ps.type == PREF_INTEGER) {
|
||||
prefs.setIntPref(ps.name, value);
|
||||
}
|
||||
logger.info("SET PREFERENCE pref(" + ps.name + "," + value + ")");
|
||||
gDumpLog("SET PREFERENCE pref(" + ps.name + "," + value + ")\n");
|
||||
}
|
||||
});
|
||||
} catch (e) {
|
||||
if (e == "bad pref") {
|
||||
var test = gURLs[0];
|
||||
if (test.expected == EXPECTED_FAIL) {
|
||||
logger.testEnd(test.identifier, "FAIL", "FAIL",
|
||||
"(SKIPPED; " + badPref + " not known or wrong type)");
|
||||
gDumpLog("REFTEST TEST-KNOWN-FAIL | " + test.url1.spec +
|
||||
" | (SKIPPED; " + badPref + " not known or wrong type)\n");
|
||||
++gTestResults.Skip;
|
||||
} else {
|
||||
logger.testEnd(test.identifier, "FAIL", "PASS",
|
||||
badPref + " not known or wrong type");
|
||||
gDumpLog("REFTEST TEST-UNEXPECTED-FAIL | " + test.url1.spec +
|
||||
" | " + badPref + " not known or wrong type\n");
|
||||
++gTestResults.UnexpectedFail;
|
||||
}
|
||||
|
||||
// skip the test that had a bad preference
|
||||
gURLs.shift();
|
||||
StartCurrentTest();
|
||||
return;
|
||||
} else {
|
||||
throw e;
|
||||
}
|
||||
}
|
||||
if (badPref != undefined) {
|
||||
// skip the test that had a bad preference
|
||||
gURLs.shift();
|
||||
|
||||
StartCurrentTest();
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
if (prefSettings.length == 0 &&
|
||||
@ -1380,11 +1370,9 @@ function StartCurrentURI(aState)
|
||||
gContainingWindow.setTimeout(RecordResult, 0);
|
||||
} else {
|
||||
var currentTest = gTotalTests - gURLs.length;
|
||||
// Log this to preserve the same overall log format,
|
||||
// should be removed if the format is updated
|
||||
gDumpFn("REFTEST TEST-LOAD | " + gCurrentURL + " | " + currentTest + " / " + gTotalTests +
|
||||
" (" + Math.floor(100 * (currentTest / gTotalTests)) + "%)\n");
|
||||
TestBuffer("START " + gCurrentURL);
|
||||
gDumpLog("REFTEST TEST-LOAD | " + gCurrentURL + " | " + currentTest + " / " + gTotalTests +
|
||||
" (" + Math.floor(100 * (currentTest / gTotalTests)) + "%)\n");
|
||||
LogInfo("START " + gCurrentURL);
|
||||
var type = gURLs[0].type
|
||||
if (TYPE_SCRIPT == type) {
|
||||
SendLoadScriptTest(gCurrentURL, gLoadTimeout);
|
||||
@ -1396,10 +1384,37 @@ function StartCurrentURI(aState)
|
||||
|
||||
function DoneTests()
|
||||
{
|
||||
logger.suiteEnd(extra={'results': gTestResults});
|
||||
logger.info("Slowest test took " + gSlowestTestTime + "ms (" + gSlowestTestURL + ")");
|
||||
logger.info("Total canvas count = " + gRecycledCanvases.length);
|
||||
gDumpLog("REFTEST FINISHED: Slowest test took " + gSlowestTestTime +
|
||||
"ms (" + gSlowestTestURL + ")\n");
|
||||
|
||||
gDumpLog("REFTEST INFO | Result summary:\n");
|
||||
var count = gTestResults.Pass + gTestResults.LoadOnly;
|
||||
gDumpLog("REFTEST INFO | Successful: " + count + " (" +
|
||||
gTestResults.Pass + " pass, " +
|
||||
gTestResults.LoadOnly + " load only)\n");
|
||||
count = gTestResults.Exception + gTestResults.FailedLoad +
|
||||
gTestResults.UnexpectedFail + gTestResults.UnexpectedPass +
|
||||
gTestResults.AssertionUnexpected +
|
||||
gTestResults.AssertionUnexpectedFixed;
|
||||
gDumpLog("REFTEST INFO | Unexpected: " + count + " (" +
|
||||
gTestResults.UnexpectedFail + " unexpected fail, " +
|
||||
gTestResults.UnexpectedPass + " unexpected pass, " +
|
||||
gTestResults.AssertionUnexpected + " unexpected asserts, " +
|
||||
gTestResults.AssertionUnexpectedFixed + " unexpected fixed asserts, " +
|
||||
gTestResults.FailedLoad + " failed load, " +
|
||||
gTestResults.Exception + " exception)\n");
|
||||
count = gTestResults.KnownFail + gTestResults.AssertionKnown +
|
||||
gTestResults.Random + gTestResults.Skip + gTestResults.Slow;
|
||||
gDumpLog("REFTEST INFO | Known problems: " + count + " (" +
|
||||
gTestResults.KnownFail + " known fail, " +
|
||||
gTestResults.AssertionKnown + " known asserts, " +
|
||||
gTestResults.Random + " random, " +
|
||||
gTestResults.Skip + " skipped, " +
|
||||
gTestResults.Slow + " slow)\n");
|
||||
|
||||
gDumpLog("REFTEST INFO | Total canvas count = " + gRecycledCanvases.length + "\n");
|
||||
|
||||
gDumpLog("REFTEST TEST-START | Shutdown\n");
|
||||
function onStopped() {
|
||||
let appStartup = CC["@mozilla.org/toolkit/app-startup;1"].getService(CI.nsIAppStartup);
|
||||
appStartup.quit(CI.nsIAppStartup.eForceQuit);
|
||||
@ -1447,7 +1462,7 @@ function DoDrawWindow(ctx, x, y, w, h)
|
||||
// browser element
|
||||
flags |= ctx.DRAWWINDOW_USE_WIDGET_LAYERS;
|
||||
} else if (gBrowserIsRemote) {
|
||||
logger.error(gCurrentURL + " | can't drawWindow remote content");
|
||||
gDumpLog("REFTEST TEST-UNEXPECTED-FAIL | " + gCurrentURL + " | can't drawWindow remote content\n");
|
||||
++gTestResults.Exception;
|
||||
}
|
||||
|
||||
@ -1460,21 +1475,22 @@ function DoDrawWindow(ctx, x, y, w, h)
|
||||
} else {
|
||||
// Output a special warning because we need to be able to detect
|
||||
// this whenever it happens.
|
||||
logger.error("WARNING: USE_WIDGET_LAYERS disabled");
|
||||
gDumpLog("REFTEST TEST-UNEXPECTED-FAIL | WARNING: USE_WIDGET_LAYERS disabled\n");
|
||||
}
|
||||
logger.info("drawWindow flags = " + flagsStr +
|
||||
"; window size = " + gContainingWindow.innerWidth + "," + gContainingWindow.innerHeight +
|
||||
"; test browser size = " + testRect.width + "," + testRect.height);
|
||||
gDumpLog("REFTEST INFO | drawWindow flags = " + flagsStr +
|
||||
"; window size = " + gContainingWindow.innerWidth + "," + gContainingWindow.innerHeight +
|
||||
"; test browser size = " + testRect.width + "," + testRect.height +
|
||||
"\n");
|
||||
}
|
||||
|
||||
TestBuffer("DoDrawWindow " + x + "," + y + "," + w + "," + h);
|
||||
LogInfo("DoDrawWindow " + x + "," + y + "," + w + "," + h);
|
||||
ctx.drawWindow(gContainingWindow, x, y, w, h, "rgb(255,255,255)",
|
||||
gDrawWindowFlags);
|
||||
}
|
||||
|
||||
function InitCurrentCanvasWithSnapshot()
|
||||
{
|
||||
TestBuffer("Initializing canvas snapshot");
|
||||
LogInfo("Initializing canvas snapshot");
|
||||
|
||||
if (gURLs[0].type == TYPE_LOAD || gURLs[0].type == TYPE_SCRIPT) {
|
||||
// We don't want to snapshot this kind of test
|
||||
@ -1492,7 +1508,7 @@ function InitCurrentCanvasWithSnapshot()
|
||||
|
||||
function UpdateCurrentCanvasForInvalidation(rects)
|
||||
{
|
||||
TestBuffer("Updating canvas for invalidation");
|
||||
LogInfo("Updating canvas for invalidation");
|
||||
|
||||
if (!gCurrentCanvas) {
|
||||
return;
|
||||
@ -1522,7 +1538,7 @@ function UpdateCurrentCanvasForInvalidation(rects)
|
||||
|
||||
function UpdateWholeCurrentCanvasForInvalidation()
|
||||
{
|
||||
TestBuffer("Updating entire canvas for invalidation");
|
||||
LogInfo("Updating entire canvas for invalidation");
|
||||
|
||||
if (!gCurrentCanvas) {
|
||||
return;
|
||||
@ -1534,7 +1550,7 @@ function UpdateWholeCurrentCanvasForInvalidation()
|
||||
|
||||
function RecordResult(testRunTime, errorMsg, scriptResults)
|
||||
{
|
||||
TestBuffer("RecordResult fired");
|
||||
LogInfo("RecordResult fired");
|
||||
|
||||
// Keep track of which test was slowest, and how long it took.
|
||||
if (testRunTime > gSlowestTestTime) {
|
||||
@ -1544,26 +1560,26 @@ function RecordResult(testRunTime, errorMsg, scriptResults)
|
||||
|
||||
// Not 'const ...' because of 'EXPECTED_*' value dependency.
|
||||
var outputs = {};
|
||||
const randomMsg = "(EXPECTED RANDOM)";
|
||||
outputs[EXPECTED_PASS] = {
|
||||
true: {s: ["PASS", "PASS"], n: "Pass"},
|
||||
false: {s: ["FAIL", "PASS"], n: "UnexpectedFail"}
|
||||
true: {s: "TEST-PASS" , n: "Pass"},
|
||||
false: {s: "TEST-UNEXPECTED-FAIL" , n: "UnexpectedFail"}
|
||||
};
|
||||
outputs[EXPECTED_FAIL] = {
|
||||
true: {s: ["PASS", "FAIL"], n: "UnexpectedPass"},
|
||||
false: {s: ["FAIL", "FAIL"], n: "KnownFail"}
|
||||
true: {s: "TEST-UNEXPECTED-PASS" , n: "UnexpectedPass"},
|
||||
false: {s: "TEST-KNOWN-FAIL" , n: "KnownFail"}
|
||||
};
|
||||
outputs[EXPECTED_RANDOM] = {
|
||||
true: {s: ["PASS", "PASS"], n: "Random"},
|
||||
false: {s: ["FAIL", "FAIL"], n: "Random"}
|
||||
true: {s: "TEST-PASS" + randomMsg , n: "Random"},
|
||||
false: {s: "TEST-KNOWN-FAIL" + randomMsg, n: "Random"}
|
||||
};
|
||||
outputs[EXPECTED_FUZZY] = outputs[EXPECTED_PASS];
|
||||
|
||||
var output;
|
||||
var extra;
|
||||
|
||||
if (gURLs[0].type == TYPE_LOAD) {
|
||||
++gTestResults.LoadOnly;
|
||||
logger.testEnd(gURLs[0].identifier, "PASS", "PASS", "(LOAD ONLY)");
|
||||
gDumpLog("REFTEST TEST-PASS | " + gURLs[0].prettyPath + " | (LOAD ONLY)\n");
|
||||
gCurrentCanvas = null;
|
||||
FinishTestItem();
|
||||
return;
|
||||
@ -1581,14 +1597,17 @@ function RecordResult(testRunTime, errorMsg, scriptResults)
|
||||
if (!gURLs[0].allowSilentFail)
|
||||
errorMsg = "No test results reported. (SCRIPT)\n";
|
||||
else
|
||||
logger.info("An expected silent failure occurred");
|
||||
gDumpLog("REFTEST INFO | An expected silent failure occurred \n");
|
||||
}
|
||||
|
||||
if (errorMsg) {
|
||||
output = outputs[expected][false];
|
||||
extra = { status_msg: output.n };
|
||||
++gTestResults[output.n];
|
||||
logger.testEnd(gURLs[0].identifier, output.s[0], output.s[1], errorMsg, null, extra);
|
||||
var result = "REFTEST " + output.s + " | " +
|
||||
gURLs[0].prettyPath + " | " + // the URL being tested
|
||||
errorMsg;
|
||||
|
||||
gDumpLog(result);
|
||||
FinishTestItem();
|
||||
return;
|
||||
}
|
||||
@ -1608,15 +1627,16 @@ function RecordResult(testRunTime, errorMsg, scriptResults)
|
||||
var index = 0;
|
||||
scriptResults.forEach(function(result) {
|
||||
var output = outputPair[result.passed];
|
||||
var extra = { status_msg: output.n };
|
||||
|
||||
++gTestResults[output.n];
|
||||
logger.testEnd(gURLs[0].identifier, output.s[0], output.s[1],
|
||||
result.description + " item " + (++index), null, extra);
|
||||
result = "REFTEST " + output.s + " | " +
|
||||
gURLs[0].prettyPath + " | " + // the URL being tested
|
||||
result.description + " item " + (++index) + "\n";
|
||||
gDumpLog(result);
|
||||
});
|
||||
|
||||
if (anyFailed && expected == EXPECTED_PASS) {
|
||||
FlushTestBuffer();
|
||||
FlushTestLog();
|
||||
}
|
||||
|
||||
FinishTestItem();
|
||||
@ -1628,7 +1648,7 @@ function RecordResult(testRunTime, errorMsg, scriptResults)
|
||||
gCurrentCanvas = gURICanvases[gCurrentURL];
|
||||
}
|
||||
if (gCurrentCanvas == null) {
|
||||
logger.error(gCurrentURL, "program error managing snapshots");
|
||||
gDumpLog("REFTEST TEST-UNEXPECTED-FAIL | " + gCurrentURL + " | program error managing snapshots\n");
|
||||
++gTestResults.Exception;
|
||||
}
|
||||
if (gState == 1) {
|
||||
@ -1671,7 +1691,7 @@ function RecordResult(testRunTime, errorMsg, scriptResults)
|
||||
throw "Inconsistent result from compareCanvases.";
|
||||
}
|
||||
equal = expected == EXPECTED_FUZZY;
|
||||
logger.info("REFTEST fuzzy match");
|
||||
gDumpLog("REFTEST fuzzy match\n");
|
||||
}
|
||||
|
||||
var failedExtraCheck = gFailedNoPaint || gFailedOpaqueLayer || gFailedAssignedLayer;
|
||||
@ -1680,7 +1700,6 @@ function RecordResult(testRunTime, errorMsg, scriptResults)
|
||||
var test_passed = (equal == (gURLs[0].type == TYPE_REFTEST_EQUAL)) && !failedExtraCheck;
|
||||
|
||||
output = outputs[expected][test_passed];
|
||||
extra = { status_msg: output.n };
|
||||
|
||||
++gTestResults[output.n];
|
||||
|
||||
@ -1699,21 +1718,39 @@ function RecordResult(testRunTime, errorMsg, scriptResults)
|
||||
failures.push("failed reftest-assigned-layer: " + gFailedAssignedLayerMessages.join(", "));
|
||||
}
|
||||
var failureString = failures.join(", ");
|
||||
logger.testEnd(gURLs[0].identifier, output.s[0], output.s[1], failureString, null, extra);
|
||||
if (expected == EXPECTED_FAIL) {
|
||||
gDumpLog("REFTEST TEST-KNOWN-FAIL | " + gURLs[0].prettyPath + " | " + failureString + "\n");
|
||||
} else {
|
||||
gDumpLog("REFTEST TEST-UNEXPECTED-FAIL | " + gURLs[0].prettyPath + " | " + failureString + "\n");
|
||||
}
|
||||
} else {
|
||||
var result = "REFTEST " + output.s + " | " +
|
||||
gURLs[0].prettyPath + " | "; // the URL being tested
|
||||
switch (gURLs[0].type) {
|
||||
case TYPE_REFTEST_NOTEQUAL:
|
||||
result += "image comparison (!=)";
|
||||
break;
|
||||
case TYPE_REFTEST_EQUAL:
|
||||
result += "image comparison (==)";
|
||||
break;
|
||||
}
|
||||
|
||||
if (!test_passed && expected == EXPECTED_PASS ||
|
||||
!test_passed && expected == EXPECTED_FUZZY ||
|
||||
test_passed && expected == EXPECTED_FAIL) {
|
||||
if (!equal) {
|
||||
extra.max_difference = maxDifference.value;
|
||||
extra.differences = differences;
|
||||
extra.image1 = gCanvas1.toDataURL();
|
||||
extra.image2 = gCanvas2.toDataURL();
|
||||
result += ", max difference: " + maxDifference.value + ", number of differing pixels: " + differences + "\n";
|
||||
result += "REFTEST IMAGE 1 (TEST): " + gCanvas1.toDataURL() + "\n";
|
||||
result += "REFTEST IMAGE 2 (REFERENCE): " + gCanvas2.toDataURL() + "\n";
|
||||
} else {
|
||||
extra.image1 = gCanvas1.toDataURL();
|
||||
result += "\n";
|
||||
result += "REFTEST IMAGE: " + gCanvas1.toDataURL() + "\n";
|
||||
}
|
||||
} else {
|
||||
result += "\n";
|
||||
}
|
||||
logger.testEnd(gURLs[0].identifier, output.s[0], output.s[1], null, null, extra);
|
||||
|
||||
gDumpLog(result);
|
||||
|
||||
if (gURLs[0].prefSettings1.length == 0) {
|
||||
UpdateCanvasCache(gURLs[0].url1, gCanvas1);
|
||||
@ -1724,7 +1761,7 @@ function RecordResult(testRunTime, errorMsg, scriptResults)
|
||||
}
|
||||
|
||||
if ((!test_passed && expected == EXPECTED_PASS) || (test_passed && expected == EXPECTED_FAIL)) {
|
||||
FlushTestBuffer();
|
||||
FlushTestLog();
|
||||
}
|
||||
|
||||
CleanUpCrashDumpFiles();
|
||||
@ -1741,10 +1778,11 @@ function LoadFailed(why)
|
||||
// Once bug 896840 is fixed, this can go away, but for now it will give log
|
||||
// output that is TBPL starable for bug 789751 and bug 720452.
|
||||
if (!why) {
|
||||
logger.error("load failed with unknown reason");
|
||||
gDumpLog("REFTEST TEST-UNEXPECTED-FAIL | load failed with unknown reason\n");
|
||||
}
|
||||
logger.testEnd(gURLs[0]["url" + gState].spec, "FAIL", "PASS", "load failed: " + why);
|
||||
FlushTestBuffer();
|
||||
gDumpLog("REFTEST TEST-UNEXPECTED-FAIL | " +
|
||||
gURLs[0]["url" + gState].spec + " | load failed: " + why + "\n");
|
||||
FlushTestLog();
|
||||
FinishTestItem();
|
||||
}
|
||||
|
||||
@ -1781,9 +1819,11 @@ function FindUnexpectedCrashDumpFiles()
|
||||
if (!foundCrashDumpFile) {
|
||||
++gTestResults.UnexpectedFail;
|
||||
foundCrashDumpFile = true;
|
||||
logger.testEnd(gCurrentURL, "FAIL", "PASS", "This test left crash dumps behind, but we weren't expecting it to!");
|
||||
gDumpLog("REFTEST TEST-UNEXPECTED-FAIL | " + gCurrentURL +
|
||||
" | This test left crash dumps behind, but we weren't expecting it to!\n");
|
||||
}
|
||||
logger.info("Found unexpected crash dump file " + path);
|
||||
gDumpLog("REFTEST INFO | Found unexpected crash dump file " + path +
|
||||
".\n");
|
||||
gUnexpectedCrashDumpFiles[path] = true;
|
||||
}
|
||||
}
|
||||
@ -1800,7 +1840,7 @@ function FinishTestItem()
|
||||
{
|
||||
// Replace document with BLANK_URL_FOR_CLEARING in case there are
|
||||
// assertions when unloading.
|
||||
logger.debug("Loading a blank page");
|
||||
gDumpLog("REFTEST INFO | Loading a blank page\n");
|
||||
// After clearing, content will notify us of the assertion count
|
||||
// and tests will continue.
|
||||
SendClear();
|
||||
@ -1835,25 +1875,26 @@ function DoAssertionCheck(numAsserts)
|
||||
|
||||
if (numAsserts < minAsserts) {
|
||||
++gTestResults.AssertionUnexpectedFixed;
|
||||
gDumpFn("REFTEST TEST-UNEXPECTED-PASS | " + gURLs[0].prettyPath +
|
||||
" | assertion count" + numAsserts + " is less than " +
|
||||
expectedAssertions + "\n");
|
||||
gDumpLog("REFTEST TEST-UNEXPECTED-PASS | " + gURLs[0].prettyPath +
|
||||
" | assertion count " + numAsserts + " is less than " +
|
||||
expectedAssertions + "\n");
|
||||
} else if (numAsserts > maxAsserts) {
|
||||
++gTestResults.AssertionUnexpected;
|
||||
gDumpFn("REFTEST TEST-UNEXPECTED-FAIL | " + gURLs[0].prettyPath +
|
||||
" | assertion count " + numAsserts + " is more than " +
|
||||
expectedAssertions + "\n");
|
||||
gDumpLog("REFTEST TEST-UNEXPECTED-FAIL | " + gURLs[0].prettyPath +
|
||||
" | assertion count " + numAsserts + " is more than " +
|
||||
expectedAssertions + "\n");
|
||||
} else if (numAsserts != 0) {
|
||||
++gTestResults.AssertionKnown;
|
||||
gDumpFn("REFTEST TEST-KNOWN-FAIL | " + gURLs[0].prettyPath +
|
||||
"assertion count " + numAsserts + " matches " +
|
||||
expectedAssertions + "\n");
|
||||
gDumpLog("REFTEST TEST-KNOWN-FAIL | " + gURLs[0].prettyPath +
|
||||
" | assertion count " + numAsserts + " matches " +
|
||||
expectedAssertions + "\n");
|
||||
}
|
||||
}
|
||||
|
||||
if (gURLs[0].chaosMode) {
|
||||
gWindowUtils.leaveChaosMode();
|
||||
}
|
||||
gDumpLog("REFTEST TEST-END | " + gURLs[0].prettyPath + "\n");
|
||||
|
||||
// And start the next test.
|
||||
gURLs.shift();
|
||||
@ -1882,7 +1923,7 @@ function RestoreChangedPreferences()
|
||||
} else if (ps.type == PREF_INTEGER) {
|
||||
prefs.setIntPref(ps.name, value);
|
||||
}
|
||||
logger.info("RESTORE PREFERENCE pref(" + ps.name + "," + value + ")");
|
||||
gDumpLog("RESTORE PREFERENCE pref(" + ps.name + "," + value + ")\n");
|
||||
});
|
||||
gPrefsToRestore = [];
|
||||
}
|
||||
@ -1963,7 +2004,7 @@ function RecvContentReady()
|
||||
|
||||
function RecvException(what)
|
||||
{
|
||||
logger.error(gCurrentURL + " | " + what);
|
||||
gDumpLog("REFTEST TEST-UNEXPECTED-FAIL | " + gCurrentURL + " | " + what + "\n");
|
||||
++gTestResults.Exception;
|
||||
}
|
||||
|
||||
@ -1995,13 +2036,13 @@ function RecvInitCanvasWithSnapshot()
|
||||
|
||||
function RecvLog(type, msg)
|
||||
{
|
||||
msg = "[CONTENT] " + msg;
|
||||
msg = "[CONTENT] "+ msg;
|
||||
if (type == "info") {
|
||||
TestBuffer(msg);
|
||||
LogInfo(msg);
|
||||
} else if (type == "warning") {
|
||||
logger.warning(msg);
|
||||
LogWarning(msg);
|
||||
} else {
|
||||
logger.error("REFTEST TEST-UNEXPECTED-FAIL | " + gCurrentURL + " | unknown log type " + type + "\n");
|
||||
gDumpLog("REFTEST TEST-UNEXPECTED-FAIL | " + gCurrentURL + " | unknown log type " + type + "\n");
|
||||
++gTestResults.Exception;
|
||||
}
|
||||
}
|
||||
|
@ -1,11 +1,8 @@
|
||||
import argparse
|
||||
import os
|
||||
import sys
|
||||
from collections import OrderedDict
|
||||
from urlparse import urlparse
|
||||
|
||||
import mozlog
|
||||
|
||||
here = os.path.abspath(os.path.dirname(__file__))
|
||||
|
||||
|
||||
@ -210,8 +207,6 @@ class ReftestArgumentsParser(argparse.ArgumentParser):
|
||||
nargs="*",
|
||||
help="Path to test file, manifest file, or directory containing tests")
|
||||
|
||||
mozlog.commandline.add_logging_group(self)
|
||||
|
||||
def get_ip(self):
|
||||
import moznetwork
|
||||
if os.name != "nt":
|
||||
@ -240,6 +235,8 @@ class ReftestArgumentsParser(argparse.ArgumentParser):
|
||||
self.error("Failed to determine test suite; supply --suite to set this explicitly")
|
||||
|
||||
def validate(self, options, reftest):
|
||||
import sys
|
||||
|
||||
if not options.tests:
|
||||
# Can't just set this in the argument parser because mach will set a default
|
||||
self.error("Must supply at least one path to a manifest file, test directory, or test file to run.")
|
||||
|
@ -8,21 +8,19 @@ import time
|
||||
import tempfile
|
||||
import traceback
|
||||
|
||||
# We need to know our current directory so that we can serve our test files from it.
|
||||
SCRIPT_DIRECTORY = os.path.abspath(os.path.realpath(os.path.dirname(__file__)))
|
||||
|
||||
from runreftest import RefTest, ReftestResolver
|
||||
from automation import Automation
|
||||
import devicemanager
|
||||
import droid
|
||||
import mozinfo
|
||||
import moznetwork
|
||||
from automation import Automation
|
||||
from remoteautomation import RemoteAutomation, fennecLogcatFilters
|
||||
|
||||
from output import OutputHandler
|
||||
from runreftest import RefTest, ReftestResolver
|
||||
import reftestcommandline
|
||||
|
||||
# We need to know our current directory so that we can serve our test files from it.
|
||||
SCRIPT_DIRECTORY = os.path.abspath(os.path.realpath(os.path.dirname(__file__)))
|
||||
|
||||
|
||||
class RemoteReftestResolver(ReftestResolver):
|
||||
def absManifestPath(self, path):
|
||||
script_abs_path = os.path.join(SCRIPT_DIRECTORY, path)
|
||||
@ -148,13 +146,6 @@ class RemoteReftest(RefTest):
|
||||
self.automation.deleteANRs()
|
||||
self.automation.deleteTombstones()
|
||||
|
||||
self._populate_logger(options)
|
||||
outputHandler = OutputHandler(self.log, options.utilityPath, options.symbolsPath)
|
||||
# RemoteAutomation.py's 'messageLogger' is also used by mochitest. Mimic a mochitest
|
||||
# MessageLogger object to re-use this code path.
|
||||
outputHandler.write = outputHandler.__call__
|
||||
self.automation._processArgs['messageLogger'] = outputHandler
|
||||
|
||||
def findPath(self, paths, filename = None):
|
||||
for path in paths:
|
||||
p = path
|
||||
@ -454,3 +445,4 @@ def main():
|
||||
|
||||
if __name__ == "__main__":
|
||||
sys.exit(main())
|
||||
|
||||
|
@ -26,23 +26,26 @@ import mozcrash
|
||||
import mozdebug
|
||||
import mozinfo
|
||||
import mozleak
|
||||
import mozlog
|
||||
import mozprocess
|
||||
import mozprofile
|
||||
import mozrunner
|
||||
from mozrunner.utils import get_stack_fixer_function, test_environment
|
||||
from mozscreenshot import printstatus, dump_screen
|
||||
|
||||
from output import OutputHandler, ReftestFormatter
|
||||
import reftestcommandline
|
||||
|
||||
here = os.path.abspath(os.path.dirname(__file__))
|
||||
# set up logging handler a la automation.py.in for compatability
|
||||
import logging
|
||||
log = logging.getLogger()
|
||||
|
||||
try:
|
||||
from mozbuild.base import MozbuildObject
|
||||
build_obj = MozbuildObject.from_environment(cwd=here)
|
||||
except ImportError:
|
||||
build_obj = None
|
||||
|
||||
def resetGlobalLog():
|
||||
while log.handlers:
|
||||
log.removeHandler(log.handlers[0])
|
||||
handler = logging.StreamHandler(sys.stdout)
|
||||
log.setLevel(logging.INFO)
|
||||
log.addHandler(handler)
|
||||
resetGlobalLog()
|
||||
|
||||
|
||||
def categoriesToRegex(categoryList):
|
||||
@ -200,21 +203,6 @@ class RefTest(object):
|
||||
self.lastTestSeen = 'reftest'
|
||||
self.haveDumpedScreen = False
|
||||
self.resolver = self.resolver_cls()
|
||||
self.log = None
|
||||
|
||||
def _populate_logger(self, options):
|
||||
if self.log:
|
||||
return
|
||||
|
||||
mozlog.commandline.log_formatters["tbpl"] = (ReftestFormatter,
|
||||
"Reftest specific formatter for the"
|
||||
"benefit of legacy log parsers and"
|
||||
"tools such as the reftest analyzer")
|
||||
fmt_options = {}
|
||||
if not options.log_tbpl_level and os.environ.get('MOZ_REFTEST_VERBOSE'):
|
||||
options.log_tbpl_level = fmt_options['level'] = 'debug'
|
||||
self.log = mozlog.commandline.setup_logging(
|
||||
"reftest harness", options, {"tbpl": sys.stdout}, fmt_options)
|
||||
|
||||
def update_mozinfo(self):
|
||||
"""walk up directories to find mozinfo.json update the info"""
|
||||
@ -267,7 +255,6 @@ class RefTest(object):
|
||||
if options.runUntilFailure:
|
||||
prefs['reftest.runUntilFailure'] = True
|
||||
prefs['reftest.focusFilterMode'] = options.focusFilterMode
|
||||
prefs['reftest.logLevel'] = options.log_tbpl_level or 'info'
|
||||
prefs['reftest.manifests'] = json.dumps(manifests)
|
||||
|
||||
# Ensure that telemetry is disabled, so we don't connect to the telemetry
|
||||
@ -344,7 +331,7 @@ class RefTest(object):
|
||||
return profile
|
||||
|
||||
def environment(self, **kwargs):
|
||||
kwargs['log'] = self.log
|
||||
kwargs['log'] = log
|
||||
return test_environment(**kwargs)
|
||||
|
||||
def buildBrowserEnv(self, options, profileDir):
|
||||
@ -373,11 +360,11 @@ class RefTest(object):
|
||||
|
||||
def killNamedOrphans(self, pname):
|
||||
""" Kill orphan processes matching the given command name """
|
||||
self.log.info("Checking for orphan %s processes..." % pname)
|
||||
log.info("Checking for orphan %s processes..." % pname)
|
||||
|
||||
def _psInfo(line):
|
||||
if pname in line:
|
||||
self.log.info(line)
|
||||
log.info(line)
|
||||
process = mozprocess.ProcessHandler(['ps', '-f'],
|
||||
processOutputLine=_psInfo)
|
||||
process.run()
|
||||
@ -388,13 +375,13 @@ class RefTest(object):
|
||||
if len(parts) == 3 and parts[0].isdigit():
|
||||
pid = int(parts[0])
|
||||
if parts[2] == pname and parts[1] == '1':
|
||||
self.log.info("killing %s orphan with pid %d" % (pname, pid))
|
||||
log.info("killing %s orphan with pid %d" % (pname, pid))
|
||||
try:
|
||||
os.kill(
|
||||
pid, getattr(signal, "SIGKILL", signal.SIGTERM))
|
||||
except Exception as e:
|
||||
self.log.info("Failed to kill process %d: %s" %
|
||||
(pid, str(e)))
|
||||
log.info("Failed to kill process %d: %s" %
|
||||
(pid, str(e)))
|
||||
process = mozprocess.ProcessHandler(['ps', '-o', 'pid,ppid,comm'],
|
||||
processOutputLine=_psKill)
|
||||
process.run()
|
||||
@ -405,8 +392,6 @@ class RefTest(object):
|
||||
shutil.rmtree(profileDir, True)
|
||||
|
||||
def runTests(self, tests, options, cmdlineArgs=None):
|
||||
self._populate_logger(options)
|
||||
|
||||
# Despite our efforts to clean up servers started by this script, in practice
|
||||
# we still see infrequent cases where a process is orphaned and interferes
|
||||
# with future tests, typically because the old server is keeping the port in use.
|
||||
@ -499,16 +484,17 @@ class RefTest(object):
|
||||
"""handle process output timeout"""
|
||||
# TODO: bug 913975 : _processOutput should call self.processOutputLine
|
||||
# one more time one timeout (I think)
|
||||
self.log.error("%s | application timed out after %d seconds with no output" % (self.lastTestSeen, int(timeout)))
|
||||
log.error("TEST-UNEXPECTED-FAIL | %s | application timed out after %d seconds with no output" %
|
||||
(self.lastTestSeen, int(timeout)))
|
||||
self.killAndGetStack(
|
||||
proc, utilityPath, debuggerInfo, dump_screen=not debuggerInfo)
|
||||
|
||||
def dumpScreen(self, utilityPath):
|
||||
if self.haveDumpedScreen:
|
||||
self.log.info("Not taking screenshot here: see the one that was previously logged")
|
||||
log.info("Not taking screenshot here: see the one that was previously logged")
|
||||
return
|
||||
self.haveDumpedScreen = True
|
||||
dump_screen(utilityPath, self.log)
|
||||
dump_screen(utilityPath, log)
|
||||
|
||||
def killAndGetStack(self, process, utilityPath, debuggerInfo, dump_screen=False):
|
||||
"""
|
||||
@ -536,11 +522,75 @@ class RefTest(object):
|
||||
process.kill(sig=signal.SIGABRT)
|
||||
except OSError:
|
||||
# https://bugzilla.mozilla.org/show_bug.cgi?id=921509
|
||||
self.log.info("Can't trigger Breakpad, process no longer exists")
|
||||
log.info("Can't trigger Breakpad, process no longer exists")
|
||||
return
|
||||
self.log.info("Can't trigger Breakpad, just killing process")
|
||||
log.info("Can't trigger Breakpad, just killing process")
|
||||
process.kill()
|
||||
|
||||
# output processing
|
||||
|
||||
class OutputHandler(object):
|
||||
|
||||
"""line output handler for mozrunner"""
|
||||
|
||||
def __init__(self, harness, utilityPath, symbolsPath=None, dump_screen_on_timeout=True):
|
||||
"""
|
||||
harness -- harness instance
|
||||
dump_screen_on_timeout -- whether to dump the screen on timeout
|
||||
"""
|
||||
self.harness = harness
|
||||
self.utilityPath = utilityPath
|
||||
self.symbolsPath = symbolsPath
|
||||
self.dump_screen_on_timeout = dump_screen_on_timeout
|
||||
self.stack_fixer_function = self.stack_fixer()
|
||||
|
||||
def processOutputLine(self, line):
|
||||
"""per line handler of output for mozprocess"""
|
||||
for handler in self.output_handlers():
|
||||
line = handler(line)
|
||||
__call__ = processOutputLine
|
||||
|
||||
def output_handlers(self):
|
||||
"""returns ordered list of output handlers"""
|
||||
return [self.fix_stack,
|
||||
self.format,
|
||||
self.record_last_test,
|
||||
self.handle_timeout_and_dump_screen,
|
||||
self.log,
|
||||
]
|
||||
|
||||
def stack_fixer(self):
|
||||
"""
|
||||
return get_stack_fixer_function, if any, to use on the output lines
|
||||
"""
|
||||
return get_stack_fixer_function(self.utilityPath, self.symbolsPath)
|
||||
|
||||
# output line handlers:
|
||||
# these take a line and return a line
|
||||
def fix_stack(self, line):
|
||||
if self.stack_fixer_function:
|
||||
return self.stack_fixer_function(line)
|
||||
return line
|
||||
|
||||
def format(self, line):
|
||||
"""format the line"""
|
||||
return line.rstrip().decode("UTF-8", "ignore")
|
||||
|
||||
def record_last_test(self, line):
|
||||
"""record last test on harness"""
|
||||
if "TEST-START" in line and "|" in line:
|
||||
self.harness.lastTestSeen = line.split("|")[1].strip()
|
||||
return line
|
||||
|
||||
def handle_timeout_and_dump_screen(self, line):
|
||||
if self.dump_screen_on_timeout and "TEST-UNEXPECTED-FAIL" in line and "Test timed out" in line:
|
||||
self.harness.dumpScreen(self.utilityPath)
|
||||
return line
|
||||
|
||||
def log(self, line):
|
||||
log.info(line)
|
||||
return line
|
||||
|
||||
def runApp(self, profile, binary, cmdargs, env,
|
||||
timeout=None, debuggerInfo=None,
|
||||
symbolsPath=None, options=None,
|
||||
@ -556,14 +606,10 @@ class RefTest(object):
|
||||
interactive = debuggerInfo.interactive
|
||||
debug_args = [debuggerInfo.path] + debuggerInfo.args
|
||||
|
||||
def record_last_test(message):
|
||||
"""Records the last test seen by this harness for the benefit of crash logging."""
|
||||
if message['action'] == 'test_start':
|
||||
self.lastTestSeen = message['test']
|
||||
|
||||
self.log.add_handler(record_last_test)
|
||||
|
||||
outputHandler = OutputHandler(self.log, options.utilityPath, symbolsPath=symbolsPath)
|
||||
outputHandler = self.OutputHandler(harness=self,
|
||||
utilityPath=options.utilityPath,
|
||||
symbolsPath=symbolsPath,
|
||||
dump_screen_on_timeout=not debuggerInfo)
|
||||
|
||||
kp_kwargs = {
|
||||
'kill_on_timeout': False,
|
||||
@ -593,18 +639,21 @@ class RefTest(object):
|
||||
interactive=interactive,
|
||||
outputTimeout=timeout)
|
||||
proc = runner.process_handler
|
||||
|
||||
status = runner.wait()
|
||||
runner.process_handler = None
|
||||
if timeout is None:
|
||||
didTimeout = False
|
||||
else:
|
||||
didTimeout = proc.didTimeout
|
||||
|
||||
if status:
|
||||
self.log.error("TEST-UNEXPECTED-FAIL | %s | application terminated with exit code %s" % (self.lastTestSeen, status))
|
||||
log.info("TEST-UNEXPECTED-FAIL | %s | application terminated with exit code %s",
|
||||
self.lastTestSeen, status)
|
||||
else:
|
||||
self.lastTestSeen = 'Main app process exited normally'
|
||||
|
||||
crashed = mozcrash.log_crashes(self.log, os.path.join(profile.profile, 'minidumps'),
|
||||
symbolsPath, test=self.lastTestSeen)
|
||||
|
||||
crashed = mozcrash.check_for_crashes(os.path.join(profile.profile, "minidumps"),
|
||||
symbolsPath, test_name=self.lastTestSeen)
|
||||
runner.cleanup()
|
||||
if not status and crashed:
|
||||
status = 1
|
||||
@ -618,7 +667,7 @@ class RefTest(object):
|
||||
|
||||
profileDir = None
|
||||
try:
|
||||
if cmdlineArgs is None:
|
||||
if cmdlineArgs == None:
|
||||
cmdlineArgs = []
|
||||
profile = self.createReftestProfile(options, manifests)
|
||||
profileDir = profile.profile # name makes more sense
|
||||
@ -626,6 +675,7 @@ class RefTest(object):
|
||||
# browser environment
|
||||
browserEnv = self.buildBrowserEnv(options, profileDir)
|
||||
|
||||
log.info("REFTEST INFO | runreftest.py | Running tests: start.\n")
|
||||
status = self.runApp(profile,
|
||||
binary=options.app,
|
||||
cmdargs=cmdlineArgs,
|
||||
@ -638,9 +688,11 @@ class RefTest(object):
|
||||
debuggerInfo=debuggerInfo)
|
||||
mozleak.process_leak_log(self.leakLogFile,
|
||||
leak_thresholds=options.leakThresholds,
|
||||
log=log,
|
||||
stack_fixer=get_stack_fixer_function(options.utilityPath,
|
||||
options.symbolsPath),
|
||||
)
|
||||
log.info("\nREFTEST INFO | runreftest.py | Running tests: end.")
|
||||
finally:
|
||||
self.cleanup(profileDir)
|
||||
return status
|
||||
@ -660,28 +712,31 @@ class RefTest(object):
|
||||
dest = os.path.join(profileDir, os.path.basename(abspath))
|
||||
shutil.copytree(abspath, dest)
|
||||
else:
|
||||
self.log.warning(
|
||||
"runreftest.py | Failed to copy %s to profile" % abspath)
|
||||
log.warning(
|
||||
"WARNING | runreftest.py | Failed to copy %s to profile", abspath)
|
||||
continue
|
||||
|
||||
|
||||
def run(**kwargs):
|
||||
parser = reftestcommandline.DesktopArgumentsParser()
|
||||
|
||||
# Mach gives us kwargs; this is a way to turn them back into an
|
||||
# options object
|
||||
parser.set_defaults(**kwargs)
|
||||
|
||||
if 'tests' in kwargs:
|
||||
options = parser.parse_args(kwargs["tests"])
|
||||
else:
|
||||
options = parser.parse_args()
|
||||
|
||||
parser = reftestcommandline.DesktopArgumentsParser()
|
||||
reftest = RefTest()
|
||||
parser.set_defaults(**kwargs)
|
||||
options = parser.parse_args(kwargs["tests"])
|
||||
parser.validate(options, reftest)
|
||||
|
||||
return reftest.runTests(options.tests, options)
|
||||
|
||||
|
||||
def main():
|
||||
parser = reftestcommandline.DesktopArgumentsParser()
|
||||
reftest = RefTest()
|
||||
|
||||
options = parser.parse_args()
|
||||
parser.validate(options, reftest)
|
||||
|
||||
sys.exit(reftest.runTests(options.tests, options))
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
sys.exit(run())
|
||||
main()
|
||||
|
@ -16,7 +16,6 @@ if here not in sys.path:
|
||||
from automation import Automation
|
||||
from b2gautomation import B2GRemoteAutomation
|
||||
from runreftestmulet import run_test_harness as run_mulet_reftests
|
||||
from output import OutputHandler
|
||||
from remotereftest import RemoteReftestResolver, ReftestServer
|
||||
from runreftest import RefTest
|
||||
import reftestcommandline
|
||||
@ -312,8 +311,6 @@ class B2GRemoteReftest(RefTest):
|
||||
timeout=None, debuggerInfo=None,
|
||||
symbolsPath=None, options=None,
|
||||
valgrindPath=None, valgrindArgs=None, valgrindSuppFiles=None):
|
||||
|
||||
outputHandler = OutputHandler(self.log, options.utilityPath, options.symbolsPath)
|
||||
status = self.automation.runApp(None, env,
|
||||
binary,
|
||||
profile.profile,
|
||||
@ -322,8 +319,7 @@ class B2GRemoteReftest(RefTest):
|
||||
xrePath=options.xrePath,
|
||||
debuggerInfo=debuggerInfo,
|
||||
symbolsPath=symbolsPath,
|
||||
timeout=timeout,
|
||||
outputHandler=outputHandler)
|
||||
timeout=timeout)
|
||||
return status
|
||||
|
||||
|
||||
@ -384,6 +380,7 @@ def run_remote_reftests(parser, options):
|
||||
auto.setProduct("b2g")
|
||||
auto.test_script = os.path.join(here, 'b2g_start_script.js')
|
||||
auto.test_script_args = [options.remoteWebServer, options.httpPort]
|
||||
auto.logFinish = "REFTEST TEST-START | Shutdown"
|
||||
|
||||
reftest = B2GRemoteReftest(auto, dm, options, here)
|
||||
parser.validate(options, reftest)
|
||||
|
@ -20,10 +20,11 @@ import mozinfo
|
||||
import mozlog
|
||||
|
||||
from runreftest import RefTest
|
||||
from output import OutputHandler
|
||||
import reftestcommandline
|
||||
|
||||
|
||||
log = mozlog.unstructured.getLogger('REFTEST')
|
||||
|
||||
class MuletReftest(RefTest):
|
||||
build_type = "mulet"
|
||||
marionette = None
|
||||
@ -58,11 +59,7 @@ class MuletReftest(RefTest):
|
||||
self.profile = self.create_profile(options, manifests,
|
||||
profile_to_clone=options.profile)
|
||||
env = self.buildBrowserEnv(options, self.profile.profile)
|
||||
|
||||
self._populate_logger(options)
|
||||
outputHandler = OutputHandler(self.log, options.utilityPath, symbolsPath=options.symbolsPath)
|
||||
|
||||
kp_kwargs = { 'processOutputLine': [outputHandler],
|
||||
kp_kwargs = { 'processOutputLine': [self._on_output],
|
||||
'onTimeout': [self._on_timeout],
|
||||
'kill_on_timeout': False }
|
||||
|
||||
@ -74,7 +71,7 @@ class MuletReftest(RefTest):
|
||||
options.timeout = 300
|
||||
self.timeout = options.timeout + 30.0
|
||||
|
||||
self.log.info("%s | Running tests: start." % os.path.basename(__file__))
|
||||
log.info("%s | Running tests: start.", os.path.basename(__file__))
|
||||
cmd, args = self.build_command_line(options.app,
|
||||
ignore_window_size=options.ignoreWindowSize,
|
||||
browser_arg=options.browser_arg)
|
||||
@ -89,9 +86,9 @@ class MuletReftest(RefTest):
|
||||
status = 0
|
||||
try:
|
||||
self.runner.start(outputTimeout=self.timeout)
|
||||
self.log.info("%s | Application pid: %d" % (
|
||||
log.info("%s | Application pid: %d",
|
||||
os.path.basename(__file__),
|
||||
self.runner.process_handler.pid))
|
||||
self.runner.process_handler.pid)
|
||||
|
||||
# kick starts the reftest harness
|
||||
self.run_marionette_script()
|
||||
@ -101,13 +98,13 @@ class MuletReftest(RefTest):
|
||||
self.runner.cleanup()
|
||||
|
||||
if status > 0:
|
||||
self.log.testFail("%s | application terminated with exit code %s" % (
|
||||
self.last_test, status))
|
||||
log.testFail("%s | application terminated with exit code %s",
|
||||
self.last_test, status)
|
||||
elif status < 0:
|
||||
self.log.info("%s | application killed with signal %s" % (
|
||||
self.last_test, -status))
|
||||
log.info("%s | application killed with signal %s",
|
||||
self.last_test, -status)
|
||||
|
||||
self.log.info("%s | Running tests: end." % os.path.basename(__file__))
|
||||
log.info("%s | Running tests: end.", os.path.basename(__file__))
|
||||
return status
|
||||
|
||||
def create_profile(self, options, manifests, profile_to_clone=None):
|
||||
@ -162,13 +159,22 @@ class MuletReftest(RefTest):
|
||||
args += ['-chrome', 'chrome://b2g/content/shell.html']
|
||||
return cmd, args
|
||||
|
||||
def _on_output(self, line):
|
||||
sys.stdout.write("%s\n" % line)
|
||||
sys.stdout.flush()
|
||||
|
||||
# TODO use structured logging
|
||||
if "TEST-START" in line and "|" in line:
|
||||
self.last_test = line.split("|")[1].strip()
|
||||
|
||||
def _on_timeout(self):
|
||||
msg = "%s | application timed out after %s seconds with no output"
|
||||
self.log.testFail(msg % (self.last_test, self.timeout))
|
||||
log.testFail(msg % (self.last_test, self.timeout))
|
||||
|
||||
# kill process to get a stack
|
||||
self.runner.stop(sig=signal.SIGABRT)
|
||||
|
||||
|
||||
def _unlockScreen(self):
|
||||
self.marionette.set_context(self.marionette.CONTEXT_CONTENT)
|
||||
self.marionette.import_script(os.path.abspath(
|
||||
@ -177,11 +183,10 @@ class MuletReftest(RefTest):
|
||||
self.marionette.execute_async_script('GaiaLockScreen.unlock()')
|
||||
|
||||
def _wait_for_homescreen(self, timeout):
|
||||
self.log.info("Waiting for home screen to load")
|
||||
log.info("Waiting for home screen to load")
|
||||
Wait(self.marionette, timeout).until(expected.element_present(
|
||||
By.CSS_SELECTOR, '#homescreen[loading-state=false]'))
|
||||
|
||||
|
||||
def run_test_harness(parser, options):
|
||||
marionette_args = {}
|
||||
if options.marionette:
|
||||
|
@ -87,35 +87,17 @@ StructuredLogger.prototype = {
|
||||
this._logData("test_end", data);
|
||||
},
|
||||
|
||||
suiteStart: function (tests, runinfo=null, versioninfo=null, deviceinfo=null, extra=null) {
|
||||
suiteStart: function (tests, runinfo=null) {
|
||||
var data = {tests: tests};
|
||||
if (runinfo !== null) {
|
||||
data.runinfo = runinfo;
|
||||
}
|
||||
|
||||
if (versioninfo !== null) {
|
||||
data.versioninfo = versioninfo;
|
||||
}
|
||||
|
||||
if (deviceinfo !== null) {
|
||||
data.deviceinfo = deviceinfo;
|
||||
}
|
||||
|
||||
if (extra !== null) {
|
||||
data.extra = extra;
|
||||
}
|
||||
|
||||
this._logData("suite_start", data);
|
||||
},
|
||||
|
||||
suiteEnd: function (extra=null) {
|
||||
var data = {};
|
||||
|
||||
if (extra !== null) {
|
||||
data.extra = extra;
|
||||
}
|
||||
|
||||
this._logData("suite_end", data);
|
||||
suiteEnd: function () {
|
||||
this._logData("suite_end");
|
||||
},
|
||||
|
||||
|
||||
@ -160,14 +142,6 @@ StructuredLogger.prototype = {
|
||||
this.log("CRITICAL", message, extra);
|
||||
},
|
||||
|
||||
processOutput: function(thread, message) {
|
||||
this._logData('process_output', {
|
||||
message: message,
|
||||
thread: thread,
|
||||
});
|
||||
},
|
||||
|
||||
|
||||
_logData: function (action, data={}) {
|
||||
var allData = {
|
||||
action: action,
|
||||
|
@ -10,15 +10,9 @@ import sys
|
||||
import mozinfo
|
||||
import mozrunner.utils
|
||||
|
||||
|
||||
def _get_default_logger():
|
||||
from mozlog import get_default_logger
|
||||
log = get_default_logger(component='mozleak')
|
||||
|
||||
if not log:
|
||||
import logging
|
||||
log = logging.getLogger(__name__)
|
||||
return log
|
||||
def _raw_log():
|
||||
import logging
|
||||
return logging.getLogger(__name__)
|
||||
|
||||
|
||||
# Do not add anything to this list, unless one of the existing leaks below
|
||||
@ -136,7 +130,7 @@ def process_single_leak_file(leakLogFileName, processType, leakThreshold,
|
||||
r"\s*-?\d+\s+(?P<numLeaked>-?\d+)")
|
||||
# The class name can contain spaces. We remove trailing whitespace later.
|
||||
|
||||
log = log or _get_default_logger()
|
||||
log = log or _raw_log()
|
||||
|
||||
processString = "%s process:" % processType
|
||||
expectedLeaks = expectedTabProcessLeakCounts() if processType == 'tab' else {}
|
||||
@ -282,7 +276,7 @@ def process_leak_log(leak_log_file, leak_thresholds=None,
|
||||
in the list ignore_missing_leaks.
|
||||
"""
|
||||
|
||||
log = log or _get_default_logger()
|
||||
log = log or _raw_log()
|
||||
|
||||
leakLogFile = leak_log_file
|
||||
if not os.path.exists(leakLogFile):
|
||||
|
@ -5,7 +5,6 @@
|
||||
from .base import BaseFormatter
|
||||
from .process import strstatus
|
||||
|
||||
|
||||
class TbplFormatter(BaseFormatter):
|
||||
"""Formatter that formats logs in the legacy formatting format used by TBPL
|
||||
This is intended to be used to preserve backward compatibility with existing tools
|
||||
@ -103,12 +102,12 @@ class TbplFormatter(BaseFormatter):
|
||||
|
||||
def test_end(self, data):
|
||||
test_id = self.test_id(data["test"])
|
||||
duration_msg = ""
|
||||
time_msg = ""
|
||||
|
||||
if test_id in self.test_start_times:
|
||||
start_time = self.test_start_times.pop(test_id)
|
||||
time = data["time"] - start_time
|
||||
duration_msg = "took %ims" % time
|
||||
time_msg = "took %ims" % time
|
||||
|
||||
if "expected" in data:
|
||||
message = data.get("message", "")
|
||||
@ -120,20 +119,18 @@ class TbplFormatter(BaseFormatter):
|
||||
message = message[:-1]
|
||||
|
||||
failure_line = "TEST-UNEXPECTED-%s | %s | %s\n" % (
|
||||
data["status"], self.id_str(test_id), message)
|
||||
data["status"], test_id, message)
|
||||
|
||||
if data["expected"] not in ("PASS", "OK"):
|
||||
expected_msg = "expected %s | " % data["expected"]
|
||||
else:
|
||||
expected_msg = ""
|
||||
info_line = "TEST-INFO %s%s\n" % (expected_msg, duration_msg)
|
||||
info_line = "TEST-INFO %s%s\n" % (expected_msg, time_msg)
|
||||
|
||||
return failure_line + info_line
|
||||
|
||||
sections = ["TEST-%s" % data['status'], self.id_str(test_id)]
|
||||
if duration_msg:
|
||||
sections.append(duration_msg)
|
||||
return ' | '.join(sections) + '\n'
|
||||
return "TEST-%s | %s | %s\n" % (
|
||||
data["status"], test_id, time_msg)
|
||||
|
||||
def suite_end(self, data):
|
||||
start_time = self.suite_start_time
|
||||
|
@ -132,9 +132,9 @@ class TestId(DataType):
|
||||
def convert(self, data):
|
||||
if isinstance(data, unicode):
|
||||
return data
|
||||
elif isinstance(data, bytes):
|
||||
elif isinstance(data, str):
|
||||
return data.decode("utf-8", "replace")
|
||||
elif isinstance(data, (tuple, list)):
|
||||
elif isinstance(data, tuple):
|
||||
# This is really a bit of a hack; should really split out convertors from the
|
||||
# fields they operate on
|
||||
func = Unicode(None).convert
|
||||
|
@ -238,8 +238,7 @@ class StructuredLogger(object):
|
||||
@log_action(List("tests", Unicode),
|
||||
Dict("run_info", default=None, optional=True),
|
||||
Dict("version_info", default=None, optional=True),
|
||||
Dict("device_info", default=None, optional=True),
|
||||
Dict("extra", default=None, optional=True))
|
||||
Dict("device_info", default=None, optional=True))
|
||||
def suite_start(self, data):
|
||||
"""Log a suite_start message
|
||||
|
||||
@ -253,7 +252,7 @@ class StructuredLogger(object):
|
||||
|
||||
self._log_data("suite_start", data)
|
||||
|
||||
@log_action(Dict("extra", default=None, optional=True))
|
||||
@log_action()
|
||||
def suite_end(self, data):
|
||||
"""Log a suite_end message"""
|
||||
if not self._ensure_suite_state('suite_end', data):
|
||||
|
Loading…
Reference in New Issue
Block a user