mirror of
https://github.com/mozilla/gecko-dev.git
synced 2024-11-24 21:31:04 +00:00
Bug 1685878 - Remove remoteautomation.py; r=jmaher
remoteautomation.py is an old collection of code used by android mochitest and android reftest; it survived the removal of automation.py. This patch removes remoteautomation.py, moving the majority of the functionality to a new class in mozdevice. Some features are simplified or removed, and the remainder moved into the remote mochitest/reftest harnesses. Differential Revision: https://phabricator.services.mozilla.com/D102239
This commit is contained in:
parent
3fbacb4238
commit
fc9db06c35
@ -1,507 +0,0 @@
|
||||
# This Source Code Form is subject to the terms of the Mozilla Public
|
||||
# License, v. 2.0. If a copy of the MPL was not distributed with this
|
||||
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
|
||||
import datetime
|
||||
import logging
|
||||
import os
|
||||
import re
|
||||
import posixpath
|
||||
import shutil
|
||||
import sys
|
||||
import tempfile
|
||||
import time
|
||||
|
||||
import six
|
||||
|
||||
from mozdevice import ADBTimeoutError
|
||||
from mozlog import get_default_logger
|
||||
from mozscreenshot import dump_screen, dump_device_screen
|
||||
import mozcrash
|
||||
|
||||
|
||||
def resetGlobalLog(log):
|
||||
while _log.handlers:
|
||||
_log.removeHandler(_log.handlers[0])
|
||||
handler = logging.StreamHandler(log)
|
||||
_log.setLevel(logging.INFO)
|
||||
_log.addHandler(handler)
|
||||
|
||||
|
||||
# We use the logging system here primarily because it'll handle multiple
|
||||
# threads, which is needed to process the output of the server and application
|
||||
# processes simultaneously.
|
||||
_log = logging.getLogger()
|
||||
resetGlobalLog(sys.stdout)
|
||||
|
||||
# signatures for logcat messages that we don't care about much
|
||||
fennecLogcatFilters = [
|
||||
"The character encoding of the HTML document was not declared",
|
||||
"Use of Mutation Events is deprecated. Use MutationObserver instead.",
|
||||
"Unexpected value from nativeGetEnabledTags: 0",
|
||||
]
|
||||
|
||||
|
||||
class RemoteAutomation(object):
|
||||
def __init__(
|
||||
self, device, appName="", remoteProfile=None, remoteLog=None, processArgs=None
|
||||
):
|
||||
super(RemoteAutomation, self).__init__()
|
||||
self.device = device
|
||||
self.appName = appName
|
||||
self.remoteProfile = remoteProfile
|
||||
self.remoteLog = remoteLog
|
||||
self.processArgs = processArgs or {}
|
||||
self.lastTestSeen = "remoteautomation.py"
|
||||
self.log = _log
|
||||
|
||||
def runApp(
|
||||
self,
|
||||
testURL,
|
||||
env,
|
||||
app,
|
||||
profileDir,
|
||||
extraArgs,
|
||||
utilityPath=None,
|
||||
xrePath=None,
|
||||
debuggerInfo=None,
|
||||
symbolsPath=None,
|
||||
timeout=-1,
|
||||
maxTime=None,
|
||||
e10s=True,
|
||||
**kwargs
|
||||
):
|
||||
"""
|
||||
Run the app, log the duration it took to execute, return the status code.
|
||||
Kills the app if it runs for longer than |maxTime| seconds, or outputs nothing
|
||||
for |timeout| seconds.
|
||||
"""
|
||||
if self.device.is_file(self.remoteLog):
|
||||
self.device.rm(self.remoteLog)
|
||||
self.log.info("remoteautomation.py | runApp deleted %s" % self.remoteLog)
|
||||
|
||||
if timeout == -1:
|
||||
timeout = self.DEFAULT_TIMEOUT
|
||||
self.utilityPath = utilityPath
|
||||
|
||||
cmd, args = self.buildCommandLine(
|
||||
app, debuggerInfo, profileDir, testURL, extraArgs
|
||||
)
|
||||
startTime = datetime.datetime.now()
|
||||
|
||||
self.lastTestSeen = "remoteautomation.py"
|
||||
self.launchApp(
|
||||
[cmd] + args,
|
||||
env=self.environment(env=env, crashreporter=not debuggerInfo),
|
||||
e10s=e10s,
|
||||
**self.processArgs
|
||||
)
|
||||
|
||||
self.log.info("remoteautomation.py | Application pid: %d" % self.pid)
|
||||
|
||||
status = self.waitForFinish(timeout, maxTime)
|
||||
self.log.info(
|
||||
"remoteautomation.py | Application ran for: %s"
|
||||
% str(datetime.datetime.now() - startTime)
|
||||
)
|
||||
|
||||
crashed = self.checkForCrashes(symbolsPath)
|
||||
if crashed:
|
||||
status = 1
|
||||
|
||||
return status, self.lastTestSeen
|
||||
|
||||
# Set up what we need for the remote environment
|
||||
def environment(self, env=None, crashreporter=True, **kwargs):
|
||||
# Because we are running remote, we don't want to mimic the local env
|
||||
# so no copying of os.environ
|
||||
if env is None:
|
||||
env = {}
|
||||
|
||||
if crashreporter:
|
||||
env["MOZ_CRASHREPORTER_NO_REPORT"] = "1"
|
||||
env["MOZ_CRASHREPORTER"] = "1"
|
||||
env["MOZ_CRASHREPORTER_SHUTDOWN"] = "1"
|
||||
else:
|
||||
env["MOZ_CRASHREPORTER_DISABLE"] = "1"
|
||||
|
||||
# Crash on non-local network connections by default.
|
||||
# MOZ_DISABLE_NONLOCAL_CONNECTIONS can be set to "0" to temporarily
|
||||
# enable non-local connections for the purposes of local testing.
|
||||
# Don't override the user's choice here. See bug 1049688.
|
||||
env.setdefault("MOZ_DISABLE_NONLOCAL_CONNECTIONS", "1")
|
||||
|
||||
# Send an env var noting that we are in automation. Passing any
|
||||
# value except the empty string will declare the value to exist.
|
||||
#
|
||||
# This may be used to disabled network connections during testing, e.g.
|
||||
# Switchboard & telemetry uploads.
|
||||
env.setdefault("MOZ_IN_AUTOMATION", "1")
|
||||
|
||||
# Set WebRTC logging in case it is not set yet.
|
||||
env.setdefault("R_LOG_LEVEL", "6")
|
||||
env.setdefault("R_LOG_DESTINATION", "stderr")
|
||||
env.setdefault("R_LOG_VERBOSE", "1")
|
||||
|
||||
return env
|
||||
|
||||
def waitForFinish(self, timeout, maxTime):
|
||||
"""Wait for tests to finish.
|
||||
If maxTime seconds elapse or no output is detected for timeout
|
||||
seconds, kill the process and fail the test.
|
||||
"""
|
||||
# maxTime is used to override the default timeout, we should honor that
|
||||
status = self.wait(timeout=maxTime, noOutputTimeout=timeout)
|
||||
|
||||
topActivity = self.device.get_top_activity(timeout=60)
|
||||
if topActivity == self.procName:
|
||||
self.log.info("%s unexpectedly found running. Killing..." % self.procName)
|
||||
self.kill(True)
|
||||
if status == 1:
|
||||
if maxTime:
|
||||
self.log.error(
|
||||
"TEST-UNEXPECTED-FAIL | %s | "
|
||||
"application ran for longer than allowed maximum time "
|
||||
"of %s seconds" % (self.lastTestSeen, maxTime)
|
||||
)
|
||||
else:
|
||||
self.log.error(
|
||||
"TEST-UNEXPECTED-FAIL | %s | "
|
||||
"application ran for longer than allowed maximum time"
|
||||
% self.lastTestSeen
|
||||
)
|
||||
if status == 2:
|
||||
self.log.error(
|
||||
"TEST-UNEXPECTED-FAIL | %s | "
|
||||
"application timed out after %d seconds with no output"
|
||||
% (self.lastTestSeen, int(timeout))
|
||||
)
|
||||
|
||||
return status
|
||||
|
||||
def checkForCrashes(self, symbolsPath):
|
||||
try:
|
||||
dumpDir = tempfile.mkdtemp()
|
||||
remoteCrashDir = posixpath.join(self.remoteProfile, "minidumps")
|
||||
if not self.device.is_dir(remoteCrashDir):
|
||||
return False
|
||||
self.device.pull(remoteCrashDir, dumpDir)
|
||||
|
||||
logger = get_default_logger()
|
||||
crashed = mozcrash.log_crashes(
|
||||
logger, dumpDir, symbolsPath, test=self.lastTestSeen
|
||||
)
|
||||
|
||||
finally:
|
||||
try:
|
||||
shutil.rmtree(dumpDir)
|
||||
except Exception as e:
|
||||
print("WARNING: unable to remove directory %s: %s" % (dumpDir, str(e)))
|
||||
return crashed
|
||||
|
||||
def buildCommandLine(self, app, debuggerInfo, profileDir, testURL, extraArgs):
|
||||
# If remote profile is specified, use that instead
|
||||
if self.remoteProfile:
|
||||
profileDir = self.remoteProfile
|
||||
|
||||
# Hack for robocop, if app is "am" and extraArgs contains the rest of the stuff, lets
|
||||
# assume extraArgs is all we need
|
||||
if app == "am" and extraArgs[0] in ("instrument", "start"):
|
||||
return app, extraArgs
|
||||
|
||||
cmd = os.path.abspath(app)
|
||||
|
||||
args = []
|
||||
|
||||
if debuggerInfo:
|
||||
args.extend(debuggerInfo.args)
|
||||
args.append(cmd)
|
||||
cmd = os.path.abspath(debuggerInfo.path)
|
||||
|
||||
profileDirectory = profileDir + "/"
|
||||
|
||||
args.extend(("-no-remote", "-profile", profileDirectory))
|
||||
if testURL is not None:
|
||||
args.append((testURL))
|
||||
args.extend(extraArgs)
|
||||
|
||||
try:
|
||||
args.remove("-foreground")
|
||||
except Exception:
|
||||
pass
|
||||
return app, args
|
||||
|
||||
def launchApp(self, cmd, env=None, e10s=True, messageLogger=None, counts=None):
|
||||
self.messageLogger = messageLogger
|
||||
self.stdoutlen = 0
|
||||
|
||||
if self.appName and self.device.process_exist(self.appName):
|
||||
print(
|
||||
"remoteautomation.py %s is already running. Stopping..." % self.appName
|
||||
)
|
||||
self.device.stop_application(self.appName)
|
||||
|
||||
self.counts = counts
|
||||
if self.counts is not None:
|
||||
self.counts["pass"] = 0
|
||||
self.counts["fail"] = 0
|
||||
self.counts["todo"] = 0
|
||||
|
||||
if cmd[0] == "am":
|
||||
cmd = " ".join(cmd)
|
||||
self.procName = self.appName
|
||||
if not self.device.shell_bool(cmd):
|
||||
print("remoteautomation.py failed to launch %s" % cmd)
|
||||
else:
|
||||
self.procName = cmd[0].split(posixpath.sep)[-1]
|
||||
args = cmd
|
||||
if args[0] == self.appName:
|
||||
args = args[1:]
|
||||
url = args[-1:][0]
|
||||
if url.startswith("/"):
|
||||
# this is probably a reftest profile directory, not a url
|
||||
url = None
|
||||
else:
|
||||
args = args[:-1]
|
||||
if "geckoview" in self.appName:
|
||||
activity = "TestRunnerActivity"
|
||||
self.device.launch_activity(
|
||||
self.appName,
|
||||
activity_name=activity,
|
||||
e10s=e10s,
|
||||
moz_env=env,
|
||||
extra_args=args,
|
||||
url=url,
|
||||
)
|
||||
else:
|
||||
self.device.launch_fennec(
|
||||
self.appName, moz_env=env, extra_args=args, url=url
|
||||
)
|
||||
|
||||
# Setting timeout at 1 hour since on a remote device this takes much longer.
|
||||
# Temporarily increased to 110 minutes because no more chunks can be created.
|
||||
self.timeout = 6600
|
||||
|
||||
# Used to buffer log messages until we meet a line break
|
||||
self.logBuffer = ""
|
||||
|
||||
@property
|
||||
def pid(self):
|
||||
procs = self.device.get_process_list()
|
||||
# limit the comparison to the first 75 characters due to a
|
||||
# limitation in processname length in android.
|
||||
pids = [proc[0] for proc in procs if proc[1] == self.procName[:75]]
|
||||
|
||||
if pids is None or len(pids) < 1:
|
||||
return 0
|
||||
return pids[0]
|
||||
|
||||
def read_stdout(self):
|
||||
"""
|
||||
Fetch the full remote log file, log any new content and return True if new
|
||||
content processed.
|
||||
"""
|
||||
try:
|
||||
newLogContent = self.device.get_file(self.remoteLog, offset=self.stdoutlen)
|
||||
except ADBTimeoutError:
|
||||
raise
|
||||
except Exception as e:
|
||||
self.log.exception(
|
||||
"remoteautomation.py | exception reading log: %s" % str(e)
|
||||
)
|
||||
return False
|
||||
if not newLogContent:
|
||||
return False
|
||||
|
||||
self.stdoutlen += len(newLogContent)
|
||||
newLogContent = six.ensure_str(newLogContent, errors="replace")
|
||||
|
||||
if self.messageLogger is None:
|
||||
testStartFilenames = re.findall(r"TEST-START \| ([^\s]*)", newLogContent)
|
||||
if testStartFilenames:
|
||||
self.lastTestSeen = testStartFilenames[-1]
|
||||
print(newLogContent)
|
||||
return True
|
||||
|
||||
self.logBuffer += newLogContent
|
||||
lines = self.logBuffer.split("\n")
|
||||
lines = [l for l in lines if l]
|
||||
|
||||
if lines:
|
||||
if self.logBuffer.endswith("\n"):
|
||||
# all lines are complete; no need to buffer
|
||||
self.logBuffer = ""
|
||||
else:
|
||||
# keep the last (unfinished) line in the buffer
|
||||
self.logBuffer = lines[-1]
|
||||
del lines[-1]
|
||||
|
||||
if not lines:
|
||||
return False
|
||||
|
||||
for line in lines:
|
||||
# This passes the line to the logger (to be logged or buffered)
|
||||
if isinstance(line, six.text_type):
|
||||
# if line is unicode - let's encode it to bytes
|
||||
parsed_messages = self.messageLogger.write(
|
||||
line.encode("UTF-8", "replace")
|
||||
)
|
||||
else:
|
||||
# if line is bytes type, write it as it is
|
||||
parsed_messages = self.messageLogger.write(line)
|
||||
|
||||
for message in parsed_messages:
|
||||
if isinstance(message, dict):
|
||||
if message.get("action") == "test_start":
|
||||
self.lastTestSeen = message["test"]
|
||||
elif message.get("action") == "test_end":
|
||||
self.lastTestSeen = "{} (finished)".format(message["test"])
|
||||
elif message.get("action") == "suite_end":
|
||||
self.lastTestSeen = "Last test finished"
|
||||
elif message.get("action") == "log":
|
||||
line = message["message"].strip()
|
||||
if self.counts:
|
||||
m = re.match(".*:\s*(\d*)", line)
|
||||
if m:
|
||||
try:
|
||||
val = int(m.group(1))
|
||||
if "Passed:" in line:
|
||||
self.counts["pass"] += val
|
||||
self.lastTestSeen = "Last test finished"
|
||||
elif "Failed:" in line:
|
||||
self.counts["fail"] += val
|
||||
elif "Todo:" in line:
|
||||
self.counts["todo"] += val
|
||||
except ADBTimeoutError:
|
||||
raise
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
return True
|
||||
|
||||
# Wait for the remote process to end (or for its activity to go to background).
|
||||
# While waiting, periodically retrieve the process output and print it.
|
||||
# If the process is still running after *timeout* seconds, return 1;
|
||||
# If the process is still running but no output is received in *noOutputTimeout*
|
||||
# seconds, return 2;
|
||||
# Else, once the process exits/goes to background, return 0.
|
||||
def wait(self, timeout=None, noOutputTimeout=None):
|
||||
timer = 0
|
||||
noOutputTimer = 0
|
||||
interval = 10
|
||||
if timeout is None:
|
||||
timeout = self.timeout
|
||||
status = 0
|
||||
top = self.procName
|
||||
slowLog = False
|
||||
endTime = datetime.datetime.now() + datetime.timedelta(seconds=timeout)
|
||||
# wait for log creation on startup
|
||||
retries = 0
|
||||
while retries < 20 and not self.device.is_file(self.remoteLog):
|
||||
retries += 1
|
||||
time.sleep(1)
|
||||
if self.device.is_file(self.remoteLog):
|
||||
# We must change the remote log's permissions so that the shell can read it.
|
||||
self.device.chmod(self.remoteLog, mask="666")
|
||||
else:
|
||||
print("Failed wait for remote log: %s missing?" % self.remoteLog)
|
||||
while top == self.procName:
|
||||
# Get log updates on each interval, but if it is taking
|
||||
# too long, only do it every 60 seconds
|
||||
hasOutput = False
|
||||
if (not slowLog) or (timer % 60 == 0):
|
||||
startRead = datetime.datetime.now()
|
||||
hasOutput = self.read_stdout()
|
||||
if (datetime.datetime.now() - startRead) > datetime.timedelta(
|
||||
seconds=5
|
||||
):
|
||||
slowLog = True
|
||||
if hasOutput:
|
||||
noOutputTimer = 0
|
||||
if self.counts and "pass" in self.counts and self.counts["pass"] > 0:
|
||||
interval = 0.5
|
||||
time.sleep(interval)
|
||||
timer += interval
|
||||
noOutputTimer += interval
|
||||
if datetime.datetime.now() > endTime:
|
||||
status = 1
|
||||
break
|
||||
if noOutputTimeout and noOutputTimer > noOutputTimeout:
|
||||
status = 2
|
||||
break
|
||||
if not hasOutput:
|
||||
top = self.device.get_top_activity(timeout=60)
|
||||
if top is None:
|
||||
print("Failed to get top activity, retrying, once...")
|
||||
top = self.device.get_top_activity(timeout=60)
|
||||
# Flush anything added to stdout during the sleep
|
||||
self.read_stdout()
|
||||
print("wait for %s complete; top activity=%s" % (self.procName, top))
|
||||
return status
|
||||
|
||||
def kill(self, stagedShutdown=False):
|
||||
# Take a screenshot to capture the screen state just before
|
||||
# the application is killed.
|
||||
# Do not use the on-device screenshot options since
|
||||
# they rarely work well with Firefox on the Android
|
||||
# emulator. dump_screen provides an effective
|
||||
# screenshot of the emulator and its host desktop.
|
||||
if not self.device._device_serial.startswith("emulator-"):
|
||||
dump_device_screen(self.device, get_default_logger())
|
||||
elif self.utilityPath:
|
||||
dump_screen(self.utilityPath, get_default_logger())
|
||||
if stagedShutdown:
|
||||
# Trigger an ANR report with "kill -3" (SIGQUIT)
|
||||
try:
|
||||
self.device.pkill(self.procName, sig=3, attempts=1)
|
||||
except ADBTimeoutError:
|
||||
raise
|
||||
except: # NOQA: E722
|
||||
pass
|
||||
time.sleep(3)
|
||||
# Trigger a breakpad dump with "kill -6" (SIGABRT)
|
||||
try:
|
||||
self.device.pkill(self.procName, sig=6, attempts=1)
|
||||
except ADBTimeoutError:
|
||||
raise
|
||||
except: # NOQA: E722
|
||||
pass
|
||||
# Wait for process to end
|
||||
retries = 0
|
||||
while retries < 3:
|
||||
if self.device.process_exist(self.procName):
|
||||
print("%s still alive after SIGABRT: waiting..." % self.procName)
|
||||
time.sleep(5)
|
||||
else:
|
||||
break
|
||||
retries += 1
|
||||
if self.device.process_exist(self.procName):
|
||||
try:
|
||||
self.device.pkill(self.procName, sig=9, attempts=1)
|
||||
except ADBTimeoutError:
|
||||
raise
|
||||
except: # NOQA: E722
|
||||
print("%s still alive after SIGKILL!" % self.procName)
|
||||
if self.device.process_exist(self.procName):
|
||||
self.device.stop_application(self.procName)
|
||||
else:
|
||||
self.device.stop_application(self.procName)
|
||||
# Test harnesses use the MOZ_CRASHREPORTER environment variables to suppress
|
||||
# the interactive crash reporter, but that may not always be effective;
|
||||
# check for and cleanup errant crashreporters.
|
||||
crashreporter = "%s.CrashReporter" % self.procName
|
||||
if self.device.process_exist(crashreporter):
|
||||
print("Warning: %s unexpectedly found running. Killing..." % crashreporter)
|
||||
try:
|
||||
self.device.pkill(crashreporter)
|
||||
except ADBTimeoutError:
|
||||
raise
|
||||
except: # NOQA: E722
|
||||
pass
|
||||
if self.device.process_exist(crashreporter):
|
||||
print("ERROR: %s still running!!" % crashreporter)
|
||||
|
||||
@staticmethod
|
||||
def elf_arm(filename):
|
||||
data = open(filename, "rb").read(20)
|
||||
return data[:4] == "\x7fELF" and ord(data[18]) == 40 # EM_ARM
|
@ -18,7 +18,6 @@ FINAL_TARGET_FILES += [
|
||||
]
|
||||
|
||||
TEST_HARNESS_FILES.reftest += [
|
||||
"/build/mobile/remoteautomation.py",
|
||||
"/build/pgo/server-locations.txt",
|
||||
"/testing/mochitest/server.js",
|
||||
"mach_test_package_commands.py",
|
||||
|
@ -182,6 +182,9 @@ class OutputHandler(object):
|
||||
|
||||
return [data]
|
||||
|
||||
def write(self, data):
|
||||
return self.__call__(data)
|
||||
|
||||
def verbatim(self, line):
|
||||
if self.stack_fixer_function:
|
||||
line = self.stack_fixer_function(line)
|
||||
|
@ -4,8 +4,10 @@
|
||||
|
||||
from __future__ import absolute_import, print_function
|
||||
|
||||
import datetime
|
||||
import os
|
||||
import posixpath
|
||||
import shutil
|
||||
import signal
|
||||
import subprocess
|
||||
import sys
|
||||
@ -16,8 +18,8 @@ from contextlib import closing
|
||||
|
||||
from six.moves.urllib_request import urlopen
|
||||
|
||||
from mozdevice import ADBDeviceFactory, ADBTimeoutError
|
||||
from remoteautomation import RemoteAutomation, fennecLogcatFilters
|
||||
from mozdevice import ADBDeviceFactory, ADBTimeoutError, RemoteProcessMonitor
|
||||
import mozcrash
|
||||
|
||||
from output import OutputHandler
|
||||
from runreftest import RefTest, ReftestResolver, build_obj
|
||||
@ -123,7 +125,7 @@ class ReftestServer:
|
||||
|
||||
if not os.access(xpcshell, os.F_OK):
|
||||
raise Exception("xpcshell not found at %s" % xpcshell)
|
||||
if RemoteAutomation.elf_arm(xpcshell):
|
||||
if RemoteProcessMonitor.elf_arm(xpcshell):
|
||||
raise Exception(
|
||||
"xpcshell at %s is an ARM binary; please use "
|
||||
"the --utility-path argument to specify the path "
|
||||
@ -221,19 +223,7 @@ class RemoteReftest(RefTest):
|
||||
self.outputHandler = OutputHandler(
|
||||
self.log, options.utilityPath, options.symbolsPath
|
||||
)
|
||||
# RemoteAutomation.py's 'messageLogger' is also used by mochitest. Mimic a mochitest
|
||||
# MessageLogger object to re-use this code path.
|
||||
self.outputHandler.write = self.outputHandler.__call__
|
||||
args = {"messageLogger": self.outputHandler}
|
||||
self.automation = RemoteAutomation(
|
||||
self.device,
|
||||
appName=options.app,
|
||||
remoteProfile=self.remoteProfile,
|
||||
remoteLog=options.remoteLogFile,
|
||||
processArgs=args,
|
||||
)
|
||||
|
||||
self.environment = self.automation.environment
|
||||
self.SERVER_STARTUP_TIMEOUT = 90
|
||||
|
||||
self.remoteCache = os.path.join(options.remoteTestRoot, "cache/")
|
||||
@ -379,7 +369,7 @@ class RemoteReftest(RefTest):
|
||||
def printDeviceInfo(self, printLogcat=False):
|
||||
try:
|
||||
if printLogcat:
|
||||
logcat = self.device.get_logcat(filter_out_regexps=fennecLogcatFilters)
|
||||
logcat = self.device.get_logcat()
|
||||
for l in logcat:
|
||||
ul = l.decode("utf-8", errors="replace")
|
||||
sl = ul.encode("iso8859-1", errors="replace")
|
||||
@ -399,8 +389,37 @@ class RemoteReftest(RefTest):
|
||||
except Exception as e:
|
||||
print("WARNING: Error getting device information: %s" % str(e))
|
||||
|
||||
def environment(self, **kwargs):
|
||||
return self.automation.environment(**kwargs)
|
||||
def environment(self, env=None, crashreporter=True, **kwargs):
|
||||
# Since running remote, do not mimic the local env: do not copy os.environ
|
||||
if env is None:
|
||||
env = {}
|
||||
|
||||
if crashreporter:
|
||||
env["MOZ_CRASHREPORTER_NO_REPORT"] = "1"
|
||||
env["MOZ_CRASHREPORTER"] = "1"
|
||||
env["MOZ_CRASHREPORTER_SHUTDOWN"] = "1"
|
||||
else:
|
||||
env["MOZ_CRASHREPORTER_DISABLE"] = "1"
|
||||
|
||||
# Crash on non-local network connections by default.
|
||||
# MOZ_DISABLE_NONLOCAL_CONNECTIONS can be set to "0" to temporarily
|
||||
# enable non-local connections for the purposes of local testing.
|
||||
# Don't override the user's choice here. See bug 1049688.
|
||||
env.setdefault("MOZ_DISABLE_NONLOCAL_CONNECTIONS", "1")
|
||||
|
||||
# Send an env var noting that we are in automation. Passing any
|
||||
# value except the empty string will declare the value to exist.
|
||||
#
|
||||
# This may be used to disabled network connections during testing, e.g.
|
||||
# Switchboard & telemetry uploads.
|
||||
env.setdefault("MOZ_IN_AUTOMATION", "1")
|
||||
|
||||
# Set WebRTC logging in case it is not set yet.
|
||||
env.setdefault("R_LOG_LEVEL", "6")
|
||||
env.setdefault("R_LOG_DESTINATION", "stderr")
|
||||
env.setdefault("R_LOG_VERBOSE", "1")
|
||||
|
||||
return env
|
||||
|
||||
def buildBrowserEnv(self, options, profileDir):
|
||||
browserEnv = RefTest.buildBrowserEnv(self, options, profileDir)
|
||||
@ -435,23 +454,64 @@ class RemoteReftest(RefTest):
|
||||
|
||||
self.log.info("Running with e10s: {}".format(options.e10s))
|
||||
self.log.info("Running with fission: {}".format(options.fission))
|
||||
status, self.lastTestSeen = self.automation.runApp(
|
||||
None,
|
||||
env,
|
||||
|
||||
rpm = RemoteProcessMonitor(
|
||||
binary,
|
||||
profile.profile,
|
||||
self.device,
|
||||
self.log,
|
||||
self.outputHandler,
|
||||
options.remoteLogFile,
|
||||
self.remoteProfile,
|
||||
)
|
||||
startTime = datetime.datetime.now()
|
||||
status = 0
|
||||
profileDirectory = self.remoteProfile + "/"
|
||||
cmdargs.extend(("-no-remote", "-profile", profileDirectory))
|
||||
|
||||
pid = rpm.launch(
|
||||
binary,
|
||||
debuggerInfo,
|
||||
None,
|
||||
cmdargs,
|
||||
utilityPath=options.utilityPath,
|
||||
xrePath=options.xrePath,
|
||||
debuggerInfo=debuggerInfo,
|
||||
symbolsPath=symbolsPath,
|
||||
timeout=timeout,
|
||||
env=env,
|
||||
e10s=options.e10s,
|
||||
)
|
||||
self.log.info("remotereftest.py | Application pid: %d" % pid)
|
||||
if not rpm.wait(timeout):
|
||||
status = 1
|
||||
self.log.info(
|
||||
"remotereftest.py | Application ran for: %s"
|
||||
% str(datetime.datetime.now() - startTime)
|
||||
)
|
||||
crashed = self.check_for_crashes(symbolsPath, rpm.last_test_seen)
|
||||
if crashed:
|
||||
status = 1
|
||||
|
||||
self.cleanup(profile.profile)
|
||||
return status
|
||||
|
||||
def check_for_crashes(self, symbols_path, last_test_seen):
|
||||
"""
|
||||
Pull any minidumps from remote profile and log any associated crashes.
|
||||
"""
|
||||
try:
|
||||
dump_dir = tempfile.mkdtemp()
|
||||
remote_crash_dir = posixpath.join(self.remoteProfile, "minidumps")
|
||||
if not self.device.is_dir(remote_crash_dir):
|
||||
return False
|
||||
self.device.pull(remote_crash_dir, dump_dir)
|
||||
crashed = mozcrash.log_crashes(
|
||||
self.log, dump_dir, symbols_path, test=last_test_seen
|
||||
)
|
||||
finally:
|
||||
try:
|
||||
shutil.rmtree(dump_dir)
|
||||
except Exception as e:
|
||||
self.log.warning(
|
||||
"unable to remove directory %s: %s" % (dump_dir, str(e))
|
||||
)
|
||||
return crashed
|
||||
|
||||
def cleanup(self, profileDir):
|
||||
self.device.rm(self.remoteTestRoot, force=True, recursive=True)
|
||||
self.device.rm(self.remoteProfile, force=True, recursive=True)
|
||||
|
@ -45,7 +45,7 @@ def get_failures(task_id):
|
||||
re.compile(r"leakcheck"),
|
||||
re.compile(r"mozrunner-startup"),
|
||||
re.compile(r"pid: "),
|
||||
re.compile(r"remoteautomation.py"),
|
||||
re.compile(r"RemoteProcessMonitor"),
|
||||
re.compile(r"unknown test url"),
|
||||
]
|
||||
re_extract_tests = [
|
||||
|
@ -83,7 +83,6 @@ MOCHITEST_CHROME_MANIFESTS += [
|
||||
BROWSER_CHROME_MANIFESTS += ["baselinecoverage/browser_chrome/browser.ini"]
|
||||
|
||||
TEST_HARNESS_FILES.testing.mochitest += [
|
||||
"/build/mobile/remoteautomation.py",
|
||||
"/build/pgo/server-locations.txt",
|
||||
"/build/valgrind/cross-architecture.sup",
|
||||
"/build/valgrind/i386-pc-linux-gnu.sup",
|
||||
|
@ -3,19 +3,22 @@
|
||||
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
|
||||
from __future__ import absolute_import
|
||||
import datetime
|
||||
import os
|
||||
import posixpath
|
||||
import shutil
|
||||
import sys
|
||||
import tempfile
|
||||
import traceback
|
||||
import uuid
|
||||
|
||||
sys.path.insert(0, os.path.abspath(os.path.realpath(os.path.dirname(__file__))))
|
||||
|
||||
from remoteautomation import RemoteAutomation, fennecLogcatFilters
|
||||
from runtests import MochitestDesktop, MessageLogger
|
||||
from mochitest_options import MochitestArgumentParser, build_obj
|
||||
from mozdevice import ADBDeviceFactory, ADBTimeoutError
|
||||
from mozdevice import ADBDeviceFactory, ADBTimeoutError, RemoteProcessMonitor
|
||||
from mozscreenshot import dump_screen, dump_device_screen
|
||||
import mozcrash
|
||||
import mozinfo
|
||||
|
||||
SCRIPT_DIR = os.path.abspath(os.path.realpath(os.path.dirname(__file__)))
|
||||
@ -64,18 +67,8 @@ class MochiRemote(MochitestDesktop):
|
||||
self.remoteProfile = posixpath.join(options.remoteTestRoot, "profile")
|
||||
self.device.rm(self.remoteProfile, force=True, recursive=True)
|
||||
|
||||
self.counts = dict()
|
||||
self.message_logger = MessageLogger(logger=None)
|
||||
self.message_logger.logger = self.log
|
||||
process_args = {"messageLogger": self.message_logger, "counts": self.counts}
|
||||
self.automation = RemoteAutomation(
|
||||
self.device,
|
||||
options.remoteappname,
|
||||
self.remoteProfile,
|
||||
self.remoteLogFile,
|
||||
processArgs=process_args,
|
||||
)
|
||||
self.environment = self.automation.environment
|
||||
|
||||
# Check that Firefox is installed
|
||||
expected = options.app.split("/")[-1]
|
||||
@ -102,10 +95,10 @@ class MochiRemote(MochitestDesktop):
|
||||
self.device.rm(self.remoteChromeTestDir, force=True, recursive=True)
|
||||
self.device.mkdir(self.remoteChromeTestDir, parents=True)
|
||||
|
||||
procName = options.app.split("/")[-1]
|
||||
self.device.stop_application(procName)
|
||||
if self.device.process_exist(procName):
|
||||
self.log.warning("unable to kill %s before running tests!" % procName)
|
||||
self.appName = options.remoteappname
|
||||
self.device.stop_application(self.appName)
|
||||
if self.device.process_exist(self.appName):
|
||||
self.log.warning("unable to kill %s before running tests!" % self.appName)
|
||||
|
||||
# Add Android version (SDK level) to mozinfo so that manifest entries
|
||||
# can be conditional on android_version.
|
||||
@ -193,7 +186,7 @@ class MochiRemote(MochitestDesktop):
|
||||
sys.exit(1)
|
||||
|
||||
xpcshell_path = os.path.join(options.utilityPath, xpcshell)
|
||||
if RemoteAutomation.elf_arm(xpcshell_path):
|
||||
if RemoteProcessMonitor.elf_arm(xpcshell_path):
|
||||
self.log.error(
|
||||
"xpcshell at %s is an ARM binary; please use "
|
||||
"the --utility-path argument to specify the path "
|
||||
@ -278,7 +271,7 @@ class MochiRemote(MochitestDesktop):
|
||||
def printDeviceInfo(self, printLogcat=False):
|
||||
try:
|
||||
if printLogcat:
|
||||
logcat = self.device.get_logcat(filter_out_regexps=fennecLogcatFilters)
|
||||
logcat = self.device.get_logcat()
|
||||
for l in logcat:
|
||||
ul = l.decode("utf-8", errors="replace")
|
||||
sl = ul.encode("iso8859-1", errors="replace")
|
||||
@ -302,6 +295,38 @@ class MochiRemote(MochitestDesktop):
|
||||
# TODO: bug 1149374
|
||||
return None
|
||||
|
||||
def environment(self, env=None, crashreporter=True, **kwargs):
|
||||
# Since running remote, do not mimic the local env: do not copy os.environ
|
||||
if env is None:
|
||||
env = {}
|
||||
|
||||
if crashreporter:
|
||||
env["MOZ_CRASHREPORTER_NO_REPORT"] = "1"
|
||||
env["MOZ_CRASHREPORTER"] = "1"
|
||||
env["MOZ_CRASHREPORTER_SHUTDOWN"] = "1"
|
||||
else:
|
||||
env["MOZ_CRASHREPORTER_DISABLE"] = "1"
|
||||
|
||||
# Crash on non-local network connections by default.
|
||||
# MOZ_DISABLE_NONLOCAL_CONNECTIONS can be set to "0" to temporarily
|
||||
# enable non-local connections for the purposes of local testing.
|
||||
# Don't override the user's choice here. See bug 1049688.
|
||||
env.setdefault("MOZ_DISABLE_NONLOCAL_CONNECTIONS", "1")
|
||||
|
||||
# Send an env var noting that we are in automation. Passing any
|
||||
# value except the empty string will declare the value to exist.
|
||||
#
|
||||
# This may be used to disabled network connections during testing, e.g.
|
||||
# Switchboard & telemetry uploads.
|
||||
env.setdefault("MOZ_IN_AUTOMATION", "1")
|
||||
|
||||
# Set WebRTC logging in case it is not set yet.
|
||||
env.setdefault("R_LOG_LEVEL", "6")
|
||||
env.setdefault("R_LOG_DESTINATION", "stderr")
|
||||
env.setdefault("R_LOG_VERBOSE", "1")
|
||||
|
||||
return env
|
||||
|
||||
def buildBrowserEnv(self, options, debugger=False):
|
||||
browserEnv = MochitestDesktop.buildBrowserEnv(self, options, debugger=debugger)
|
||||
# remove desktop environment not used on device
|
||||
@ -320,23 +345,93 @@ class MochiRemote(MochitestDesktop):
|
||||
browserEnv["MOZ_UPLOAD_DIR"] = self.remoteMozLog
|
||||
return browserEnv
|
||||
|
||||
def runApp(self, *args, **kwargs):
|
||||
"""front-end automation's `runApp` functionality until FennecRunner is written"""
|
||||
def runApp(
|
||||
self,
|
||||
testUrl,
|
||||
env,
|
||||
app,
|
||||
profile,
|
||||
extraArgs,
|
||||
utilityPath,
|
||||
debuggerInfo=None,
|
||||
valgrindPath=None,
|
||||
valgrindArgs=None,
|
||||
valgrindSuppFiles=None,
|
||||
symbolsPath=None,
|
||||
timeout=-1,
|
||||
detectShutdownLeaks=False,
|
||||
screenshotOnFail=False,
|
||||
bisectChunk=None,
|
||||
marionette_args=None,
|
||||
e10s=True,
|
||||
):
|
||||
"""
|
||||
Run the app, log the duration it took to execute, return the status code.
|
||||
Kill the app if it outputs nothing for |timeout| seconds.
|
||||
"""
|
||||
|
||||
# remoteautomation `runApp` takes the profile path,
|
||||
# whereas runtest.py's `runApp` takes a mozprofile object.
|
||||
if "profileDir" not in kwargs and "profile" in kwargs:
|
||||
kwargs["profileDir"] = kwargs.pop("profile").profile
|
||||
if timeout == -1:
|
||||
timeout = self.DEFAULT_TIMEOUT
|
||||
|
||||
# remove args not supported by automation
|
||||
kwargs.pop("marionette_args", None)
|
||||
rpm = RemoteProcessMonitor(
|
||||
self.appName,
|
||||
self.device,
|
||||
self.log,
|
||||
self.message_logger,
|
||||
self.remoteLogFile,
|
||||
self.remoteProfile,
|
||||
)
|
||||
startTime = datetime.datetime.now()
|
||||
status = 0
|
||||
profileDirectory = self.remoteProfile + "/"
|
||||
extraArgs.extend(("-no-remote", "-profile", profileDirectory))
|
||||
|
||||
ret, _ = self.automation.runApp(*args, **kwargs)
|
||||
self.countpass += self.counts["pass"]
|
||||
self.countfail += self.counts["fail"]
|
||||
self.counttodo += self.counts["todo"]
|
||||
pid = rpm.launch(
|
||||
app,
|
||||
debuggerInfo,
|
||||
testUrl,
|
||||
extraArgs,
|
||||
env=self.environment(env=env, crashreporter=not debuggerInfo),
|
||||
e10s=e10s,
|
||||
)
|
||||
self.log.info("runtestsremote.py | Application pid: %d" % pid)
|
||||
if not rpm.wait(timeout):
|
||||
status = 1
|
||||
self.log.info(
|
||||
"runtestsremote.py | Application ran for: %s"
|
||||
% str(datetime.datetime.now() - startTime)
|
||||
)
|
||||
crashed = self.check_for_crashes(symbolsPath, rpm.last_test_seen)
|
||||
if crashed:
|
||||
status = 1
|
||||
|
||||
return ret, None
|
||||
self.countpass += rpm.counts["pass"]
|
||||
self.countfail += rpm.counts["fail"]
|
||||
self.counttodo += rpm.counts["todo"]
|
||||
|
||||
return status, rpm.last_test_seen
|
||||
|
||||
def check_for_crashes(self, symbols_path, last_test_seen):
|
||||
"""
|
||||
Pull any minidumps from remote profile and log any associated crashes.
|
||||
"""
|
||||
try:
|
||||
dump_dir = tempfile.mkdtemp()
|
||||
remote_crash_dir = posixpath.join(self.remoteProfile, "minidumps")
|
||||
if not self.device.is_dir(remote_crash_dir):
|
||||
return False
|
||||
self.device.pull(remote_crash_dir, dump_dir)
|
||||
crashed = mozcrash.log_crashes(
|
||||
self.log, dump_dir, symbols_path, test=last_test_seen
|
||||
)
|
||||
finally:
|
||||
try:
|
||||
shutil.rmtree(dump_dir)
|
||||
except Exception as e:
|
||||
self.log.warning(
|
||||
"unable to remove directory %s: %s" % (dump_dir, str(e))
|
||||
)
|
||||
return crashed
|
||||
|
||||
|
||||
def run_test_harness(parser, options):
|
||||
@ -348,10 +443,6 @@ def run_test_harness(parser, options):
|
||||
)
|
||||
|
||||
options.runByManifest = True
|
||||
# roboextender is used by mochitest-chrome tests like test_java_addons.html,
|
||||
# but not by any plain mochitests
|
||||
if options.flavor != "chrome":
|
||||
options.extensionsToExclude.append("roboextender@mozilla.org")
|
||||
|
||||
mochitest = MochiRemote(options)
|
||||
|
||||
|
@ -159,6 +159,7 @@ from __future__ import absolute_import
|
||||
from .adb import ADBError, ADBProcessError, ADBTimeoutError
|
||||
from .adb import ADBProcess, ADBCommand, ADBHost, ADBDevice, ADBDeviceFactory
|
||||
from .adb_android import ADBAndroid
|
||||
from .remote_process_monitor import RemoteProcessMonitor
|
||||
|
||||
__all__ = [
|
||||
"ADBError",
|
||||
@ -170,4 +171,5 @@ __all__ = [
|
||||
"ADBDevice",
|
||||
"ADBAndroid",
|
||||
"ADBDeviceFactory",
|
||||
"RemoteProcessMonitor",
|
||||
]
|
||||
|
285
testing/mozbase/mozdevice/mozdevice/remote_process_monitor.py
Normal file
285
testing/mozbase/mozdevice/mozdevice/remote_process_monitor.py
Normal file
@ -0,0 +1,285 @@
|
||||
# This Source Code Form is subject to the terms of the Mozilla Public
|
||||
# License, v. 2.0. If a copy of the MPL was not distributed with this
|
||||
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
|
||||
from __future__ import absolute_import, print_function
|
||||
import re
|
||||
import six
|
||||
import time
|
||||
|
||||
from .adb import ADBTimeoutError
|
||||
|
||||
|
||||
class RemoteProcessMonitor:
|
||||
"""
|
||||
RemoteProcessMonitor provides a convenient way to run a remote process,
|
||||
dump its log file, and wait for it to end.
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
app_name,
|
||||
device,
|
||||
log,
|
||||
message_logger,
|
||||
remote_log_file,
|
||||
remote_profile,
|
||||
):
|
||||
self.app_name = app_name
|
||||
self.device = device
|
||||
self.log = log
|
||||
self.remote_log_file = remote_log_file
|
||||
self.remote_profile = remote_profile
|
||||
self.counts = {}
|
||||
self.counts["pass"] = 0
|
||||
self.counts["fail"] = 0
|
||||
self.counts["todo"] = 0
|
||||
self.last_test_seen = "RemoteProcessMonitor"
|
||||
self.message_logger = message_logger
|
||||
if self.device.is_file(self.remote_log_file):
|
||||
self.device.rm(self.remote_log_file)
|
||||
self.log.info("deleted remote log %s" % self.remote_log_file)
|
||||
|
||||
def launch(self, app, debugger_info, test_url, extra_args, env, e10s):
|
||||
"""
|
||||
Start the remote activity.
|
||||
"""
|
||||
if self.app_name and self.device.process_exist(self.app_name):
|
||||
self.log.info("%s is already running. Stopping..." % self.app_name)
|
||||
self.device.stop_application(self.app_name)
|
||||
args = []
|
||||
if debugger_info:
|
||||
args.extend(debugger_info.args)
|
||||
args.append(app)
|
||||
args.extend(extra_args)
|
||||
activity = "TestRunnerActivity"
|
||||
self.device.launch_activity(
|
||||
self.app_name,
|
||||
activity_name=activity,
|
||||
e10s=e10s,
|
||||
moz_env=env,
|
||||
extra_args=args,
|
||||
url=test_url,
|
||||
)
|
||||
return self.pid
|
||||
|
||||
@property
|
||||
def pid(self):
|
||||
"""
|
||||
Determine the pid of the remote process (or the first process with
|
||||
the same name).
|
||||
"""
|
||||
procs = self.device.get_process_list()
|
||||
# limit the comparison to the first 75 characters due to a
|
||||
# limitation in processname length in android.
|
||||
pids = [proc[0] for proc in procs if proc[1] == self.app_name[:75]]
|
||||
if pids is None or len(pids) < 1:
|
||||
return 0
|
||||
return pids[0]
|
||||
|
||||
def read_stdout(self):
|
||||
"""
|
||||
Fetch the full remote log file, log any new content and return True if new
|
||||
content is processed.
|
||||
"""
|
||||
try:
|
||||
new_log_content = self.device.get_file(
|
||||
self.remote_log_file, offset=self.stdout_len
|
||||
)
|
||||
except ADBTimeoutError:
|
||||
raise
|
||||
except Exception as e:
|
||||
self.log.error(
|
||||
"%s | exception reading log: %s" % (self.last_test_seen, str(e))
|
||||
)
|
||||
return False
|
||||
if not new_log_content:
|
||||
return False
|
||||
|
||||
self.stdout_len += len(new_log_content)
|
||||
new_log_content = six.ensure_str(new_log_content, errors="replace")
|
||||
|
||||
self.log_buffer += new_log_content
|
||||
lines = self.log_buffer.split("\n")
|
||||
lines = [l for l in lines if l]
|
||||
|
||||
if lines:
|
||||
if self.log_buffer.endswith("\n"):
|
||||
# all lines are complete; no need to buffer
|
||||
self.log_buffer = ""
|
||||
else:
|
||||
# keep the last (unfinished) line in the buffer
|
||||
self.log_buffer = lines[-1]
|
||||
del lines[-1]
|
||||
if not lines:
|
||||
return False
|
||||
|
||||
for line in lines:
|
||||
# This passes the line to the logger (to be logged or buffered)
|
||||
if isinstance(line, six.text_type):
|
||||
# if line is unicode - let's encode it to bytes
|
||||
parsed_messages = self.message_logger.write(
|
||||
line.encode("UTF-8", "replace")
|
||||
)
|
||||
else:
|
||||
# if line is bytes type, write it as it is
|
||||
parsed_messages = self.message_logger.write(line)
|
||||
|
||||
for message in parsed_messages:
|
||||
if isinstance(message, dict):
|
||||
if message.get("action") == "test_start":
|
||||
self.last_test_seen = message["test"]
|
||||
elif message.get("action") == "test_end":
|
||||
self.last_test_seen = "{} (finished)".format(message["test"])
|
||||
elif message.get("action") == "suite_end":
|
||||
self.last_test_seen = "Last test finished"
|
||||
elif message.get("action") == "log":
|
||||
line = message["message"].strip()
|
||||
m = re.match(".*:\s*(\d*)", line)
|
||||
if m:
|
||||
try:
|
||||
val = int(m.group(1))
|
||||
if "Passed:" in line:
|
||||
self.counts["pass"] += val
|
||||
self.last_test_seen = "Last test finished"
|
||||
elif "Failed:" in line:
|
||||
self.counts["fail"] += val
|
||||
elif "Todo:" in line:
|
||||
self.counts["todo"] += val
|
||||
except ADBTimeoutError:
|
||||
raise
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
return True
|
||||
|
||||
def wait(self, timeout=None):
|
||||
"""
|
||||
Wait for the remote process to end (or for its activity to go to background).
|
||||
While waiting, periodically retrieve the process output and print it.
|
||||
If the process is still running but no output is received in *timeout*
|
||||
seconds, return False; else, once the process exits/goes to background,
|
||||
return True.
|
||||
"""
|
||||
self.log_buffer = ""
|
||||
self.stdout_len = 0
|
||||
|
||||
timer = 0
|
||||
output_timer = 0
|
||||
interval = 10
|
||||
status = True
|
||||
top = self.app_name
|
||||
|
||||
# wait for log creation on startup
|
||||
retries = 0
|
||||
while retries < 20 and not self.device.is_file(self.remote_log_file):
|
||||
retries += 1
|
||||
time.sleep(1)
|
||||
if self.device.is_file(self.remote_log_file):
|
||||
# We must change the remote log's permissions so that the shell can read it.
|
||||
self.device.chmod(self.remote_log_file, mask="666")
|
||||
else:
|
||||
self.log.warning(
|
||||
"Failed wait for remote log: %s missing?" % self.remote_log_file
|
||||
)
|
||||
|
||||
while top == self.app_name:
|
||||
has_output = self.read_stdout()
|
||||
if has_output:
|
||||
output_timer = 0
|
||||
if self.counts["pass"] > 0:
|
||||
interval = 0.5
|
||||
time.sleep(interval)
|
||||
timer += interval
|
||||
output_timer += interval
|
||||
if timeout and output_timer > timeout:
|
||||
status = False
|
||||
break
|
||||
if not has_output:
|
||||
top = self.device.get_top_activity(timeout=60)
|
||||
if top is None:
|
||||
self.log.info("Failed to get top activity, retrying, once...")
|
||||
top = self.device.get_top_activity(timeout=60)
|
||||
|
||||
# Flush anything added to stdout during the sleep
|
||||
self.read_stdout()
|
||||
self.log.info("wait for %s complete; top activity=%s" % (self.app_name, top))
|
||||
if top == self.app_name:
|
||||
self.log.info("%s unexpectedly found running. Killing..." % self.app_name)
|
||||
self.kill()
|
||||
if not status:
|
||||
self.log.error(
|
||||
"TEST-UNEXPECTED-FAIL | %s | "
|
||||
"application timed out after %d seconds with no output"
|
||||
% (self.last_test_seen, int(timeout))
|
||||
)
|
||||
return status
|
||||
|
||||
def kill(self):
|
||||
"""
|
||||
End a troublesome remote process: Trigger ANR and breakpad dumps, then
|
||||
force the application to end.
|
||||
"""
|
||||
|
||||
# Trigger an ANR report with "kill -3" (SIGQUIT)
|
||||
try:
|
||||
self.device.pkill(self.app_name, sig=3, attempts=1)
|
||||
except ADBTimeoutError:
|
||||
raise
|
||||
except: # NOQA: E722
|
||||
pass
|
||||
time.sleep(3)
|
||||
|
||||
# Trigger a breakpad dump with "kill -6" (SIGABRT)
|
||||
try:
|
||||
self.device.pkill(self.app_name, sig=6, attempts=1)
|
||||
except ADBTimeoutError:
|
||||
raise
|
||||
except: # NOQA: E722
|
||||
pass
|
||||
|
||||
# Wait for process to end
|
||||
retries = 0
|
||||
while retries < 3:
|
||||
if self.device.process_exist(self.app_name):
|
||||
self.log.info(
|
||||
"%s still alive after SIGABRT: waiting..." % self.app_name
|
||||
)
|
||||
time.sleep(5)
|
||||
else:
|
||||
break
|
||||
retries += 1
|
||||
if self.device.process_exist(self.app_name):
|
||||
try:
|
||||
self.device.pkill(self.app_name, sig=9, attempts=1)
|
||||
except ADBTimeoutError:
|
||||
raise
|
||||
except: # NOQA: E722
|
||||
self.log.error("%s still alive after SIGKILL!" % self.app_name)
|
||||
if self.device.process_exist(self.app_name):
|
||||
self.device.stop_application(self.app_name)
|
||||
|
||||
# Test harnesses use the MOZ_CRASHREPORTER environment variables to suppress
|
||||
# the interactive crash reporter, but that may not always be effective;
|
||||
# check for and cleanup errant crashreporters.
|
||||
crashreporter = "%s.CrashReporter" % self.app_name
|
||||
if self.device.process_exist(crashreporter):
|
||||
self.log.warning(
|
||||
"%s unexpectedly found running. Killing..." % crashreporter
|
||||
)
|
||||
try:
|
||||
self.device.pkill(crashreporter)
|
||||
except ADBTimeoutError:
|
||||
raise
|
||||
except: # NOQA: E722
|
||||
pass
|
||||
if self.device.process_exist(crashreporter):
|
||||
self.log.error("%s still running!!" % crashreporter)
|
||||
|
||||
@staticmethod
|
||||
def elf_arm(filename):
|
||||
"""
|
||||
Determine if the specified file is an ARM binary.
|
||||
"""
|
||||
data = open(filename, "rb").read(20)
|
||||
return data[:4] == "\x7fELF" and ord(data[18]) == 40 # EM_ARM
|
Loading…
Reference in New Issue
Block a user