Bug 1014125 - Bisection Base Patch. r=ahal

This commit is contained in:
Vaibhav Agrawal 2014-07-04 13:55:00 +02:00
parent ba81663cbb
commit be65f1bbce
8 changed files with 408 additions and 80 deletions

View File

@ -819,7 +819,7 @@ class Automation(object):
xrePath = None, certPath = None, xrePath = None, certPath = None,
debuggerInfo = None, symbolsPath = None, debuggerInfo = None, symbolsPath = None,
timeout = -1, maxTime = None, onLaunch = None, timeout = -1, maxTime = None, onLaunch = None,
webapprtChrome = False, screenshotOnFail=False, testPath=None): webapprtChrome = False, screenshotOnFail=False, testPath=None, bisectChunk=None):
""" """
Run the app, log the duration it took to execute, return the status code. Run the app, log the duration it took to execute, return the status code.
Kills the app if it runs for longer than |maxTime| seconds, or outputs nothing for |timeout| seconds. Kills the app if it runs for longer than |maxTime| seconds, or outputs nothing for |timeout| seconds.

View File

@ -13,6 +13,7 @@ USE_EXTENSION_MANIFEST = 1
# files that get copied into $objdir/_tests/ # files that get copied into $objdir/_tests/
SERV_FILES = \ SERV_FILES = \
runtests.py \ runtests.py \
bisection.py \
automation.py \ automation.py \
runtestsb2g.py \ runtestsb2g.py \
runtestsremote.py \ runtestsremote.py \

View File

@ -0,0 +1,210 @@
import os
import math
import mozinfo
class Bisect(object):
"Class for creating, bisecting and summarizing for --bisect-chunk option."
def __init__(self, harness):
super(Bisect, self).__init__()
self.summary = []
self.contents = {}
self.testRoot = harness.testRoot
self.testRootAbs = harness.testRootAbs
def setup(self, tests):
"This method is used to initialize various variables that are required for test bisection"
status = 0
self.contents.clear()
# We need totalTests key in contents for sanity check
self.contents['totalTests'] = tests
self.contents['tests'] = tests
self.contents['loop'] = 0
return status
def reset(self, expectedError, result):
"This method is used to initialize self.expectedError and self.result for each loop in runtests."
self.expectedError = expectedError
self.result = result
def get_test_chunk(self, options, tests):
"This method is used to return the chunk of test that is to be run"
if not options.totalChunks or not options.thisChunk:
return tests
# The logic here is same as chunkifyTests.js, we need this for bisecting tests.
if options.chunkByDir:
tests_by_dir = {}
test_dirs = []
for test in tests:
directory = test.split("/")
directory = directory[0:min(options.chunkByDir, len(directory)-1)]
directory = "/".join(directory)
if not directory in tests_by_dir:
tests_by_dir[directory] = [test]
test_dirs.append(directory)
else:
tests_by_dir[directory].append(test)
tests_per_chunk = float(len(test_dirs)) / options.totalChunks
start = int(round((options.thisChunk-1) * tests_per_chunk))
end = int(round((options.thisChunk) * tests_per_chunk))
test_dirs = test_dirs[start:end]
return_tests = []
for directory in test_dirs:
return_tests += tests_by_dir[directory]
else:
tests_per_chunk = float(len(tests)) / options.totalChunks
start = int(round((options.thisChunk-1) * tests_per_chunk))
end = int(round(options.thisChunk * tests_per_chunk))
return_tests = tests[start:end]
options.totalChunks = None
options.thisChunk = None
options.chunkByDir = None
return return_tests
def get_tests_for_bisection(self, options, tests):
"Make a list of tests for bisection from a given list of tests"
tests = self.get_test_chunk(options, tests)
bisectlist = []
for test in tests:
bisectlist.append(test)
if test.endswith(options.bisectChunk):
break
return bisectlist
def pre_test(self, options, tests, status):
"This method is used to call other methods for setting up variables and getting the list of tests for bisection."
if options.bisectChunk == "default":
return tests
# The second condition in 'if' is required to verify that the failing test is the last one.
elif 'loop' not in self.contents or not self.contents['tests'][-1].endswith(options.bisectChunk):
tests = self.get_tests_for_bisection(options, tests)
status = self.setup(tests)
return self.next_chunk_reverse(options, status)
def post_test(self, options, expectedError, result):
"This method is used to call other methods to summarize results and check whether a sanity check is done or not."
self.reset(expectedError, result)
status = self.summarize_chunk(options)
# Check whether sanity check has to be done. Also it is necessary to check whether options.bisectChunk is present
# in self.expectedError as we do not want to run if it is "default".
if status == -1 and options.bisectChunk in self.expectedError:
# In case we have a debug build, we don't want to run a sanity check, will take too much time.
if mozinfo.info['debug']:
return status
testBleedThrough = self.contents['testsToRun'][0]
tests = self.contents['totalTests']
tests.remove(testBleedThrough)
# To make sure that the failing test is dependent on some other test.
if options.bisectChunk in testBleedThrough:
return status
status = self.setup(tests)
self.summary.append("Sanity Check:")
return status
def next_chunk_reverse(self, options, status):
"This method is used to bisect the tests in a reverse search fashion."
# Base Cases.
if self.contents['loop'] == 0:
self.contents['loop'] += 1
self.contents['testsToRun'] = self.contents['tests']
return self.contents['testsToRun']
if self.contents['loop'] == 1:
self.contents['loop'] += 1
self.contents['testsToRun'] = [self.contents['tests'][-1]]
return self.contents['testsToRun']
if 'result' in self.contents:
if self.contents['result'] == "PASS":
chunkSize = self.contents['end'] - self.contents['start']
self.contents['end'] = self.contents['start'] - 1
self.contents['start'] = self.contents['end'] - chunkSize
# self.contents['result'] will be expected error only if it fails.
elif self.contents['result'] == "FAIL":
self.contents['tests'] = self.contents['testsToRun']
status = 1 # for initializing
# initialize
if status:
totalTests = len(self.contents['tests'])
chunkSize = int(math.ceil(totalTests / 10.0))
self.contents['start'] = totalTests - chunkSize - 1
self.contents['end'] = totalTests - 2
start = self.contents['start']
end = self.contents['end'] + 1
self.contents['testsToRun'] = self.contents['tests'][start:end]
self.contents['testsToRun'].append(self.contents['tests'][-1])
self.contents['loop'] += 1
return self.contents['testsToRun']
def summarize_chunk(self, options):
"This method is used summarize the results after the list of tests is run."
if options.bisectChunk == "default":
# if no expectedError that means all the tests have successfully passed.
if len(self.expectedError) == 0:
return -1
options.bisectChunk = self.expectedError.keys()[0]
self.summary.append("\tFound Error in test: %s" % options.bisectChunk)
return 0
# If options.bisectChunk is not in self.result then we need to move to the next run.
if options.bisectChunk not in self.result:
return -1
self.summary.append("\tPass %d:" % self.contents['loop'])
if len(self.contents['testsToRun']) > 1:
self.summary.append("\t\t%d test files(start,end,failing). [%s, %s, %s]" % (len(self.contents['testsToRun']), self.contents['testsToRun'][0], self.contents['testsToRun'][-2], self.contents['testsToRun'][-1]))
else:
self.summary.append("\t\t1 test file [%s]" % self.contents['testsToRun'][0])
if self.result[options.bisectChunk] == "PASS":
self.summary.append("\t\tno failures found.")
if self.contents['loop'] == 1:
status = -1
elif self.contents['loop'] == 2:
status = 1
else:
self.contents['result'] = "PASS"
status = 0
elif self.result[options.bisectChunk] == "FAIL":
if 'expectedError' not in self.contents:
self.summary.append("\t\t%s failed." % self.contents['testsToRun'][-1])
self.contents['expectedError'] = self.expectedError[options.bisectChunk]
status = 0
elif self.expectedError[options.bisectChunk] == self.contents['expectedError']:
self.summary.append("\t\t%s failed with expected error." % self.contents['testsToRun'][-1])
self.contents['result'] = "FAIL"
status = 0
# This code checks for test-bleedthrough. Should work for any algorithm.
numberOfTests = len(self.contents['testsToRun'])
if numberOfTests < 3:
# This means that only 2 tests are run. Since the last test is the failing test itself therefore the bleedthrough test is the first test
self.summary.append("TEST-BLEEDTHROUGH - found failure, %s" % self.contents['testsToRun'][0])
status = -1
else:
self.summary.append("\t\t%s failed with different error." % self.contents['testsToRun'][-1])
status = -1
return status
def print_summary(self):
"This method is used to print the recorded summary."
print "Bisection summary:"
for line in self.summary:
print line

View File

@ -198,7 +198,7 @@ class MochitestRunner(MozbuildObject):
jsdebugger=False, debug_on_failure=False, start_at=None, end_at=None, jsdebugger=False, debug_on_failure=False, start_at=None, end_at=None,
e10s=False, dmd=False, dump_output_directory=None, e10s=False, dmd=False, dump_output_directory=None,
dump_about_memory_after_test=False, dump_dmd_after_test=False, dump_about_memory_after_test=False, dump_dmd_after_test=False,
install_extension=None, quiet=False, environment=[], app_override=None, runByDir=False, install_extension=None, quiet=False, environment=[], app_override=None, bisectChunk=None, runByDir=False,
useTestMediaDevices=False, **kwargs): useTestMediaDevices=False, **kwargs):
"""Runs a mochitest. """Runs a mochitest.
@ -324,6 +324,7 @@ class MochitestRunner(MozbuildObject):
options.dumpOutputDirectory = dump_output_directory options.dumpOutputDirectory = dump_output_directory
options.quiet = quiet options.quiet = quiet
options.environment = environment options.environment = environment
options.bisectChunk = bisectChunk
options.runByDir = runByDir options.runByDir = runByDir
options.useTestMediaDevices = useTestMediaDevices options.useTestMediaDevices = useTestMediaDevices
@ -541,6 +542,11 @@ def MochitestCommand(func):
help='Run each directory in a single browser instance with a fresh profile.') help='Run each directory in a single browser instance with a fresh profile.')
func = runbydir(func) func = runbydir(func)
bisect_chunk = CommandArgument('--bisect-chunk', type=str,
dest='bisectChunk',
help='Specify the failing test name to find the previous tests that may be causing the failure.')
func = bisect_chunk(func)
test_media = CommandArgument('--use-test-media-devices', default=False, test_media = CommandArgument('--use-test-media-devices', default=False,
action='store_true', action='store_true',
dest='useTestMediaDevices', dest='useTestMediaDevices',

View File

@ -151,6 +151,13 @@ class MochitestOptions(optparse.OptionParser):
"help": "start in the given directory's tests", "help": "start in the given directory's tests",
"default": "", "default": "",
}], }],
[["--bisect-chunk"],
{ "action": "store",
"type": "string",
"dest": "bisectChunk",
"help": "Specify the failing test name to find the previous tests that may be causing the failure.",
"default": None,
}],
[["--start-at"], [["--start-at"],
{ "action": "store", { "action": "store",
"type": "string", "type": "string",

View File

@ -29,6 +29,7 @@ import time
import traceback import traceback
import urllib2 import urllib2
import zipfile import zipfile
import bisection
from automationutils import environment, getDebuggerInfo, isURL, KeyValueParseError, parseKeyValue, processLeakLog, dumpScreen, ShutdownLeaks, printstatus, LSANLeaks from automationutils import environment, getDebuggerInfo, isURL, KeyValueParseError, parseKeyValue, processLeakLog, dumpScreen, ShutdownLeaks, printstatus, LSANLeaks
from datetime import datetime from datetime import datetime
@ -453,72 +454,27 @@ class MochitestUtilsMixin(object):
testURL = "about:blank" testURL = "about:blank"
return testURL return testURL
def buildTestPath(self, options, disabled=True): def buildTestPath(self, options, testsToFilter=None, disabled=True):
""" Build the url path to the specific test harness and test file or directory """ Build the url path to the specific test harness and test file or directory
Build a manifest of tests to run and write out a json file for the harness to read Build a manifest of tests to run and write out a json file for the harness to read
testsToFilter option is used to filter/keep the tests provided in the list
disabled -- This allows to add all disabled tests on the build side disabled -- This allows to add all disabled tests on the build side
and then on the run side to only run the enabled ones and then on the run side to only run the enabled ones
""" """
self.setTestRoot(options)
manifest = self.getTestManifest(options)
if manifest: tests = self.getActiveTests(options, disabled)
# Python 2.6 doesn't allow unicode keys to be used for keyword paths = []
# arguments. This gross hack works around the problem until we
# rid ourselves of 2.6.
info = {}
for k, v in mozinfo.info.items():
if isinstance(k, unicode):
k = k.encode('ascii')
info[k] = v
# Bug 883858 - return all tests including disabled tests for test in tests:
testPath = self.getTestPath(options) if testsToFilter and (test['path'] not in testsToFilter):
testPath = testPath.replace('\\', '/') continue
if testPath.endswith('.html') or \ paths.append(test)
testPath.endswith('.xhtml') or \
testPath.endswith('.xul') or \
testPath.endswith('.js'):
# In the case where we have a single file, we don't want to filter based on options such as subsuite.
tests = manifest.active_tests(disabled=disabled, options=None, **info)
for test in tests:
if 'disabled' in test:
del test['disabled']
else:
tests = manifest.active_tests(disabled=disabled, options=options, **info)
paths = []
for test in tests: # Bug 883865 - add this functionality into manifestparser
pathAbs = os.path.abspath(test['path']) with open(os.path.join(SCRIPT_DIR, 'tests.json'), 'w') as manifestFile:
assert pathAbs.startswith(self.testRootAbs) manifestFile.write(json.dumps({'tests': paths}))
tp = pathAbs[len(self.testRootAbs):].replace('\\', '/').strip('/') options.manifestFile = 'tests.json'
# Filter out tests if we are using --test-path
if testPath and not tp.startswith(testPath):
continue
if not self.isTest(options, tp):
log.warning('Warning: %s from manifest %s is not a valid test' % (test['name'], test['manifest']))
continue
testob = {'path': tp}
if test.has_key('disabled'):
testob['disabled'] = test['disabled']
paths.append(testob)
# Sort tests so they are run in a deterministic order.
def path_sort(ob1, ob2):
path1 = ob1['path'].split('/')
path2 = ob2['path'].split('/')
return cmp(path1, path2)
paths.sort(path_sort)
# Bug 883865 - add this functionality into manifestparser
with open(os.path.join(SCRIPT_DIR, 'tests.json'), 'w') as manifestFile:
manifestFile.write(json.dumps({'tests': paths}))
options.manifestFile = 'tests.json'
return self.buildTestURL(options) return self.buildTestURL(options)
@ -898,6 +854,13 @@ class Mochitest(MochitestUtilsMixin):
self.haveDumpedScreen = False self.haveDumpedScreen = False
# Create variables to count the number of passes, fails, todos.
self.countpass = 0
self.countfail = 0
self.counttodo = 0
self.expectedError = {}
self.result = {}
def extraPrefs(self, extraPrefs): def extraPrefs(self, extraPrefs):
"""interpolate extra preferences from option strings""" """interpolate extra preferences from option strings"""
@ -1078,6 +1041,7 @@ class Mochitest(MochitestUtilsMixin):
os.remove(options.pidFile + ".xpcshell.pid") os.remove(options.pidFile + ".xpcshell.pid")
except: except:
log.warn("cleaning up pidfile '%s' was unsuccessful from the test harness", options.pidFile) log.warn("cleaning up pidfile '%s' was unsuccessful from the test harness", options.pidFile)
options.manifestFile = None
def dumpScreen(self, utilityPath): def dumpScreen(self, utilityPath):
if self.haveDumpedScreen: if self.haveDumpedScreen:
@ -1189,7 +1153,8 @@ class Mochitest(MochitestUtilsMixin):
onLaunch=None, onLaunch=None,
webapprtChrome=False, webapprtChrome=False,
screenshotOnFail=False, screenshotOnFail=False,
testPath=None): testPath=None,
bisectChunk=None):
""" """
Run the app, log the duration it took to execute, return the status code. Run the app, log the duration it took to execute, return the status code.
Kills the app if it runs for longer than |maxTime| seconds, or outputs nothing for |timeout| seconds. Kills the app if it runs for longer than |maxTime| seconds, or outputs nothing for |timeout| seconds.
@ -1255,6 +1220,7 @@ class Mochitest(MochitestUtilsMixin):
dump_screen_on_fail=screenshotOnFail, dump_screen_on_fail=screenshotOnFail,
shutdownLeaks=shutdownLeaks, shutdownLeaks=shutdownLeaks,
lsanLeaks=lsanLeaks, lsanLeaks=lsanLeaks,
bisectChunk=bisectChunk
) )
def timeoutHandler(): def timeoutHandler():
@ -1340,19 +1306,129 @@ class Mochitest(MochitestUtilsMixin):
return status return status
def initializeLooping(self, options):
"""
This method is used to clear the contents before each run of for loop.
This method is used for --run-by-dir and --bisect-chunk.
"""
self.expectedError.clear()
self.result.clear()
options.manifestFile = None
options.profilePath = None
self.urlOpts = []
def getActiveTests(self, options, disabled=True):
"""
This method is used to parse the manifest and return active filtered tests.
"""
self.setTestRoot(options)
manifest = self.getTestManifest(options)
if manifest:
# Python 2.6 doesn't allow unicode keys to be used for keyword
# arguments. This gross hack works around the problem until we
# rid ourselves of 2.6.
info = {}
for k, v in mozinfo.info.items():
if isinstance(k, unicode):
k = k.encode('ascii')
info[k] = v
# Bug 883858 - return all tests including disabled tests
testPath = self.getTestPath(options)
testPath = testPath.replace('\\', '/')
if testPath.endswith('.html') or \
testPath.endswith('.xhtml') or \
testPath.endswith('.xul') or \
testPath.endswith('.js'):
# In the case where we have a single file, we don't want to filter based on options such as subsuite.
tests = manifest.active_tests(disabled=disabled, options=None, **info)
for test in tests:
if 'disabled' in test:
del test['disabled']
else:
tests = manifest.active_tests(disabled=disabled, options=options, **info)
paths = []
for test in tests:
pathAbs = os.path.abspath(test['path'])
assert pathAbs.startswith(self.testRootAbs)
tp = pathAbs[len(self.testRootAbs):].replace('\\', '/').strip('/')
# Filter out tests if we are using --test-path
if testPath and not tp.startswith(testPath):
continue
if not self.isTest(options, tp):
log.warning('Warning: %s from manifest %s is not a valid test' % (test['name'], test['manifest']))
continue
testob = {'path': tp}
if test.has_key('disabled'):
testob['disabled'] = test['disabled']
paths.append(testob)
def path_sort(ob1, ob2):
path1 = ob1['path'].split('/')
path2 = ob2['path'].split('/')
return cmp(path1, path2)
paths.sort(path_sort)
return paths
def getTestsToRun(self, options):
"""
This method makes a list of tests that are to be run. Required mainly for --bisect-chunk.
"""
tests = self.getActiveTests(options)
testsToRun = []
for test in tests:
if test.has_key('disabled'):
continue
testsToRun.append(test['path'])
return testsToRun
def runMochitests(self, options, onLaunch=None):
"This is a base method for calling other methods in this class for --bisect-chunk."
testsToRun = self.getTestsToRun(options)
# Making an instance of bisect class for --bisect-chunk option.
bisect = bisection.Bisect(self)
finished = False
status = 0
while not finished:
if options.bisectChunk:
testsToRun = bisect.pre_test(options, testsToRun, status)
self.doTests(options, onLaunch, testsToRun)
if options.bisectChunk:
status = bisect.post_test(options, self.expectedError, self.result)
else:
status = -1
if status == -1:
finished = True
# We need to print the summary only if options.bisectChunk has a value.
# Also we need to make sure that we do not print the summary in between running tests via --run-by-dir.
if options.bisectChunk and options.bisectChunk in self.result:
bisect.print_summary()
return -1
return 0
def runTests(self, options, onLaunch=None): def runTests(self, options, onLaunch=None):
""" Prepare, configure, run tests and cleanup """ """ Prepare, configure, run tests and cleanup """
# Create variables to count the number of passes, fails, todos.
self.countpass = 0
self.countfail = 0
self.counttodo = 0
self.setTestRoot(options) self.setTestRoot(options)
if not options.runByDir: if not options.runByDir:
return self.doTests(options, onLaunch) self.runMochitests(options, onLaunch)
return 0
# code for --run-by-dir
dirs = self.getDirectories(options) dirs = self.getDirectories(options)
if options.totalChunks > 1: if options.totalChunks > 1:
@ -1366,8 +1442,6 @@ class Mochitest(MochitestUtilsMixin):
options.chunkByDir = 0 options.chunkByDir = 0
inputTestPath = self.getTestPath(options) inputTestPath = self.getTestPath(options)
for dir in dirs: for dir in dirs:
options.manifestFile = None
if inputTestPath and not inputTestPath.startswith(dir): if inputTestPath and not inputTestPath.startswith(dir):
continue continue
@ -1377,9 +1451,9 @@ class Mochitest(MochitestUtilsMixin):
# If we are using --run-by-dir, we should not use the profile path (if) provided # If we are using --run-by-dir, we should not use the profile path (if) provided
# by the user, since we need to create a new directory for each run. We would face problems # by the user, since we need to create a new directory for each run. We would face problems
# if we use the directory provided by the user. # if we use the directory provided by the user.
options.profilePath = None runResult = self.runMochitests(options, onLaunch)
self.urlOpts = [] if runResult == -1:
self.doTests(options, onLaunch) return 0
# printing total number of tests # printing total number of tests
if options.browserChrome: if options.browserChrome:
@ -1396,7 +1470,12 @@ class Mochitest(MochitestUtilsMixin):
print "3 INFO Todo: %s" % self.counttodo print "3 INFO Todo: %s" % self.counttodo
print "4 INFO SimpleTest FINISHED" print "4 INFO SimpleTest FINISHED"
def doTests(self, options, onLaunch=None): def doTests(self, options, onLaunch=None, testsToFilter = None):
# A call to initializeLooping method is required in case of --run-by-dir or --bisect-chunk
# since we need to initialize variables for each loop.
if options.bisectChunk or options.runByDir:
self.initializeLooping(options)
# get debugger info, a dict of: # get debugger info, a dict of:
# {'path': path to the debugger (string), # {'path': path to the debugger (string),
# 'interactive': whether the debugger is interactive or not (bool) # 'interactive': whether the debugger is interactive or not (bool)
@ -1431,7 +1510,8 @@ class Mochitest(MochitestUtilsMixin):
try: try:
self.startServers(options, debuggerInfo) self.startServers(options, debuggerInfo)
testURL = self.buildTestPath(options) # testsToFilter parameter is used to filter out the test list that is sent to buildTestPath
testURL = self.buildTestPath(options, testsToFilter)
# read the number of tests here, if we are not going to run any, terminate early # read the number of tests here, if we are not going to run any, terminate early
if os.path.exists(os.path.join(SCRIPT_DIR, 'tests.json')): if os.path.exists(os.path.join(SCRIPT_DIR, 'tests.json')):
@ -1488,7 +1568,8 @@ class Mochitest(MochitestUtilsMixin):
onLaunch=onLaunch, onLaunch=onLaunch,
webapprtChrome=options.webapprtChrome, webapprtChrome=options.webapprtChrome,
screenshotOnFail=options.screenshotOnFail, screenshotOnFail=options.screenshotOnFail,
testPath=options.testPath testPath=options.testPath,
bisectChunk=options.bisectChunk
) )
except KeyboardInterrupt: except KeyboardInterrupt:
log.info("runtests.py | Received keyboard interrupt.\n"); log.info("runtests.py | Received keyboard interrupt.\n");
@ -1532,7 +1613,7 @@ class Mochitest(MochitestUtilsMixin):
class OutputHandler(object): class OutputHandler(object):
"""line output handler for mozrunner""" """line output handler for mozrunner"""
def __init__(self, harness, utilityPath, symbolsPath=None, dump_screen_on_timeout=True, dump_screen_on_fail=False, shutdownLeaks=None, lsanLeaks=None): def __init__(self, harness, utilityPath, symbolsPath=None, dump_screen_on_timeout=True, dump_screen_on_fail=False, shutdownLeaks=None, lsanLeaks=None, bisectChunk=None):
""" """
harness -- harness instance harness -- harness instance
dump_screen_on_timeout -- whether to dump the screen on timeout dump_screen_on_timeout -- whether to dump the screen on timeout
@ -1544,6 +1625,7 @@ class Mochitest(MochitestUtilsMixin):
self.dump_screen_on_fail = dump_screen_on_fail self.dump_screen_on_fail = dump_screen_on_fail
self.shutdownLeaks = shutdownLeaks self.shutdownLeaks = shutdownLeaks
self.lsanLeaks = lsanLeaks self.lsanLeaks = lsanLeaks
self.bisectChunk = bisectChunk
# perl binary to use # perl binary to use
self.perl = which('perl') self.perl = which('perl')
@ -1560,6 +1642,9 @@ class Mochitest(MochitestUtilsMixin):
"""per line handler of output for mozprocess""" """per line handler of output for mozprocess"""
for handler in self.outputHandlers(): for handler in self.outputHandlers():
line = handler(line) line = handler(line)
if self.bisectChunk:
self.record_result(line)
self.first_error(line)
__call__ = processOutputLine __call__ = processOutputLine
def outputHandlers(self): def outputHandlers(self):
@ -1629,6 +1714,25 @@ class Mochitest(MochitestUtilsMixin):
# output line handlers: # output line handlers:
# these take a line and return a line # these take a line and return a line
def record_result(self, line):
if "TEST-START" in line: #by default make the result key equal to pass.
key = line.split('|')[-1].split('/')[-1].strip()
self.harness.result[key] = "PASS"
elif "TEST-UNEXPECTED" in line:
key = line.split('|')[-2].split('/')[-1].strip()
self.harness.result[key] = "FAIL"
elif "TEST-KNOWN-FAIL" in line:
key = line.split('|')[-2].split('/')[-1].strip()
self.harness.result[key] = "TODO"
return line
def first_error(self, line):
if "TEST-UNEXPECTED-FAIL" in line:
key = line.split('|')[-2].split('/')[-1].strip()
if key not in self.harness.expectedError:
self.harness.expectedError[key] = line.split('|')[-1].strip()
return line
def countline(self, line): def countline(self, line):
val = 0 val = 0
try: try:

View File

@ -69,9 +69,9 @@ class B2GMochitest(MochitestUtilsMixin):
test_url += "?" + "&".join(self.urlOpts) test_url += "?" + "&".join(self.urlOpts)
self.test_script_args.append(test_url) self.test_script_args.append(test_url)
def buildTestPath(self, options): def buildTestPath(self, options, testsToFilter=None):
if options.manifestFile != 'tests.json': if options.manifestFile != 'tests.json':
super(B2GMochitest, self).buildTestPath(options, disabled=False) super(B2GMochitest, self).buildTestPath(options, testsToFilter, disabled=False)
return self.buildTestURL(options) return self.buildTestURL(options)
def build_profile(self, options): def build_profile(self, options):

View File

@ -380,13 +380,13 @@ class MochiRemote(Mochitest):
options.logFile = self.localLog options.logFile = self.localLog
return retVal return retVal
def buildTestPath(self, options): def buildTestPath(self, options, testsToFilter=None):
if options.robocopIni != "": if options.robocopIni != "":
# Skip over manifest building if we just want to run # Skip over manifest building if we just want to run
# robocop tests. # robocop tests.
return self.buildTestURL(options) return self.buildTestURL(options)
else: else:
return super(MochiRemote, self).buildTestPath(options) return super(MochiRemote, self).buildTestPath(options, testsToFilter)
def installChromeFile(self, filename, options): def installChromeFile(self, filename, options):
parts = options.app.split('/') parts = options.app.split('/')