Bug 1672023 - Remove excluded files from black.yml r=sylvestre,perftest-reviewers,geckoview-reviewers,agi

These files were omitted from the original patch because reformatting them required some manual intervention in order to avoid breaking unit tests. Generally the `noqa` lines were already there and just needed to be moved from one line to another (due to the reformatting by `black`), but sometimes `black` saw fit to move a bunch of stuff all onto one line, requiring me to introduce new `noqa` lines.

Besides the autoformat by `black` and some manual fixups, this patch contains no other changes.

# ignore-this-changeset

Differential Revision: https://phabricator.services.mozilla.com/D94052
This commit is contained in:
Ricky Stewart 2020-10-21 21:29:30 +00:00
parent 50762dacab
commit 8b352f1843
35 changed files with 9618 additions and 6804 deletions

View File

@ -41,7 +41,8 @@ class TestFirefoxRefresh(MarionetteTestCase):
_expectedURLs = ["about:robots", "about:mozilla"]
def savePassword(self):
self.runCode("""
self.runCode(
"""
let myLogin = new global.LoginInfo(
"test.marionette.mozilla.com",
"http://test.marionette.mozilla.com/some/form/",
@ -52,10 +53,13 @@ class TestFirefoxRefresh(MarionetteTestCase):
"password"
);
Services.logins.addLogin(myLogin)
""", script_args=(self._username, self._password))
""",
script_args=(self._username, self._password),
)
def createBookmarkInMenu(self):
error = self.runAsyncCode("""
error = self.runAsyncCode(
"""
// let url = arguments[0];
// let title = arguments[1];
// let resolve = arguments[arguments.length - 1];
@ -63,12 +67,15 @@ class TestFirefoxRefresh(MarionetteTestCase):
PlacesUtils.bookmarks.insert({
parentGuid: PlacesUtils.bookmarks.menuGuid, url, title
}).then(() => resolve(false), resolve);
""", script_args=(self._bookmarkURL, self._bookmarkText))
""",
script_args=(self._bookmarkURL, self._bookmarkText),
)
if error:
print(error)
def createBookmarksOnToolbar(self):
error = self.runAsyncCode("""
error = self.runAsyncCode(
"""
let resolve = arguments[arguments.length - 1];
let children = [];
for (let i = 1; i <= 5; i++) {
@ -78,12 +85,14 @@ class TestFirefoxRefresh(MarionetteTestCase):
guid: PlacesUtils.bookmarks.toolbarGuid,
children
}).then(() => resolve(false), resolve);
""")
"""
)
if error:
print(error)
def createHistory(self):
error = self.runAsyncCode("""
error = self.runAsyncCode(
"""
let resolve = arguments[arguments.length - 1];
PlacesUtils.history.insert({
url: arguments[0],
@ -94,12 +103,15 @@ class TestFirefoxRefresh(MarionetteTestCase):
}]
}).then(() => resolve(false),
ex => resolve("Unexpected error in adding visit: " + ex));
""", script_args=(self._historyURL, self._historyTitle))
""",
script_args=(self._historyURL, self._historyTitle),
)
if error:
print(error)
def createFormHistory(self):
error = self.runAsyncCode("""
error = self.runAsyncCode(
"""
let updateDefinition = {
op: "add",
fieldname: arguments[0],
@ -119,14 +131,17 @@ class TestFirefoxRefresh(MarionetteTestCase):
}
}
});
""", script_args=(self._formHistoryFieldName, self._formHistoryValue))
""",
script_args=(self._formHistoryFieldName, self._formHistoryValue),
)
if error:
print(error)
def createFormAutofill(self):
if not self._formAutofillAvailable:
return
self._formAutofillAddressGuid = self.runAsyncCode("""
self._formAutofillAddressGuid = self.runAsyncCode(
"""
let resolve = arguments[arguments.length - 1];
const TEST_ADDRESS_1 = {
"given-name": "John",
@ -144,19 +159,29 @@ class TestFirefoxRefresh(MarionetteTestCase):
return global.formAutofillStorage.initialize().then(() => {
return global.formAutofillStorage.addresses.add(TEST_ADDRESS_1);
}).then(resolve);
""")
"""
)
def createCookie(self):
self.runCode("""
self.runCode(
"""
// Expire in 15 minutes:
let expireTime = Math.floor(Date.now() / 1000) + 15 * 60;
Services.cookies.add(arguments[0], arguments[1], arguments[2], arguments[3],
true, false, false, expireTime, {},
Ci.nsICookie.SAMESITE_NONE, Ci.nsICookie.SCHEME_UNSET);
""", script_args=(self._cookieHost, self._cookiePath, self._cookieName, self._cookieValue))
""",
script_args=(
self._cookieHost,
self._cookiePath,
self._cookieName,
self._cookieValue,
),
)
def createSession(self):
self.runAsyncCode("""
self.runAsyncCode(
"""
let resolve = arguments[arguments.length - 1];
const COMPLETE_STATE = Ci.nsIWebProgressListener.STATE_STOP +
Ci.nsIWebProgressListener.STATE_IS_NETWORK;
@ -193,66 +218,82 @@ class TestFirefoxRefresh(MarionetteTestCase):
gBrowser.removeTab(tab);
}
}
""", script_args=(self._expectedURLs,)) # NOQA: E501
""", # NOQA: E501
script_args=(self._expectedURLs,),
)
def createFxa(self):
# This script will write an entry to the login manager and create
# a signedInUser.json in the profile dir.
self.runAsyncCode("""
self.runAsyncCode(
"""
let resolve = arguments[arguments.length - 1];
Cu.import("resource://gre/modules/FxAccountsStorage.jsm");
let storage = new FxAccountsStorageManager();
let data = {email: "test@test.com", uid: "uid", keyFetchToken: "top-secret"};
storage.initialize(data);
storage.finalize().then(resolve);
""")
"""
)
def createSync(self):
# This script will write the canonical preference which indicates a user
# is signed into sync.
self.marionette.execute_script("""
self.marionette.execute_script(
"""
Services.prefs.setStringPref("services.sync.username", "test@test.com");
""")
"""
)
def checkPassword(self):
loginInfo = self.marionette.execute_script("""
loginInfo = self.marionette.execute_script(
"""
let ary = Services.logins.findLogins(
"test.marionette.mozilla.com",
"http://test.marionette.mozilla.com/some/form/",
null, {});
return ary.length ? ary : {username: "null", password: "null"};
""")
"""
)
self.assertEqual(len(loginInfo), 1)
self.assertEqual(loginInfo[0]['username'], self._username)
self.assertEqual(loginInfo[0]['password'], self._password)
self.assertEqual(loginInfo[0]["username"], self._username)
self.assertEqual(loginInfo[0]["password"], self._password)
loginCount = self.marionette.execute_script("""
loginCount = self.marionette.execute_script(
"""
return Services.logins.getAllLogins().length;
""")
"""
)
# Note that we expect 2 logins - one from us, one from sync.
self.assertEqual(loginCount, 2, "No other logins are present")
def checkBookmarkInMenu(self):
titleInBookmarks = self.runAsyncCode("""
titleInBookmarks = self.runAsyncCode(
"""
let [url, resolve] = arguments;
PlacesUtils.bookmarks.fetch({url}).then(
bookmark => resolve(bookmark ? bookmark.title : ""),
ex => resolve(ex)
);
""", script_args=(self._bookmarkURL,))
""",
script_args=(self._bookmarkURL,),
)
self.assertEqual(titleInBookmarks, self._bookmarkText)
def checkBookmarkToolbarVisibility(self):
toolbarVisible = self.marionette.execute_script("""
toolbarVisible = self.marionette.execute_script(
"""
const BROWSER_DOCURL = AppConstants.BROWSER_CHROME_URL;
return Services.xulStore.getValue(BROWSER_DOCURL, "PersonalToolbar", "collapsed");
""")
"""
)
if toolbarVisible == "":
toolbarVisible = "false"
self.assertEqual(toolbarVisible, "false")
def checkHistory(self):
historyResult = self.runAsyncCode("""
historyResult = self.runAsyncCode(
"""
let resolve = arguments[arguments.length - 1];
PlacesUtils.history.fetch(arguments[0]).then(pageInfo => {
if (!pageInfo) {
@ -263,15 +304,18 @@ class TestFirefoxRefresh(MarionetteTestCase):
}).catch(e => {
resolve("Unexpected error in fetching page: " + e);
});
""", script_args=(self._historyURL,))
""",
script_args=(self._historyURL,),
)
if type(historyResult) == str:
self.fail(historyResult)
return
self.assertEqual(historyResult['title'], self._historyTitle)
self.assertEqual(historyResult["title"], self._historyTitle)
def checkFormHistory(self):
formFieldResults = self.runAsyncCode("""
formFieldResults = self.runAsyncCode(
"""
let resolve = arguments[arguments.length - 1];
let results = [];
global.FormHistory.search(["value"], {fieldname: arguments[0]}, {
@ -285,20 +329,24 @@ class TestFirefoxRefresh(MarionetteTestCase):
resolve(results);
},
});
""", script_args=(self._formHistoryFieldName,))
""",
script_args=(self._formHistoryFieldName,),
)
if type(formFieldResults) == str:
self.fail(formFieldResults)
return
formFieldResultCount = len(formFieldResults)
self.assertEqual(formFieldResultCount, 1,
"Should have exactly 1 entry for this field, got %d" %
formFieldResultCount)
self.assertEqual(
formFieldResultCount,
1,
"Should have exactly 1 entry for this field, got %d" % formFieldResultCount,
)
if formFieldResultCount == 1:
self.assertEqual(
formFieldResults[0]['value'], self._formHistoryValue)
self.assertEqual(formFieldResults[0]["value"], self._formHistoryValue)
formHistoryCount = self.runAsyncCode("""
formHistoryCount = self.runAsyncCode(
"""
let [resolve] = arguments;
let count;
let callbacks = {
@ -308,33 +356,42 @@ class TestFirefoxRefresh(MarionetteTestCase):
},
};
global.FormHistory.count({}, callbacks);
""")
self.assertEqual(formHistoryCount, 1,
"There should be only 1 entry in the form history")
"""
)
self.assertEqual(
formHistoryCount, 1, "There should be only 1 entry in the form history"
)
def checkFormAutofill(self):
if not self._formAutofillAvailable:
return
formAutofillResults = self.runAsyncCode("""
formAutofillResults = self.runAsyncCode(
"""
let resolve = arguments[arguments.length - 1];
return global.formAutofillStorage.initialize().then(() => {
return global.formAutofillStorage.addresses.getAll()
}).then(resolve);
""",)
""",
)
if type(formAutofillResults) == str:
self.fail(formAutofillResults)
return
formAutofillAddressCount = len(formAutofillResults)
self.assertEqual(formAutofillAddressCount, 1,
"Should have exactly 1 saved address, got %d" % formAutofillAddressCount)
self.assertEqual(
formAutofillAddressCount,
1,
"Should have exactly 1 saved address, got %d" % formAutofillAddressCount,
)
if formAutofillAddressCount == 1:
self.assertEqual(
formAutofillResults[0]['guid'], self._formAutofillAddressGuid)
formAutofillResults[0]["guid"], self._formAutofillAddressGuid
)
def checkCookie(self):
cookieInfo = self.runCode("""
cookieInfo = self.runCode(
"""
try {
let cookies = Services.cookies.getCookiesFromHost(arguments[0], {});
let cookie = null;
@ -351,18 +408,22 @@ class TestFirefoxRefresh(MarionetteTestCase):
} catch (ex) {
return "got exception trying to fetch cookie: " + ex;
}
""", script_args=(self._cookieHost,))
""",
script_args=(self._cookieHost,),
)
if not isinstance(cookieInfo, dict):
self.fail(cookieInfo)
return
self.assertEqual(cookieInfo['path'], self._cookiePath)
self.assertEqual(cookieInfo['value'], self._cookieValue)
self.assertEqual(cookieInfo['name'], self._cookieName)
self.assertEqual(cookieInfo["path"], self._cookiePath)
self.assertEqual(cookieInfo["value"], self._cookieValue)
self.assertEqual(cookieInfo["name"], self._cookieName)
def checkSession(self):
tabURIs = self.runCode("""
tabURIs = self.runCode(
"""
return [... gBrowser.browsers].map(b => b.currentURI && b.currentURI.spec)
""")
"""
)
self.assertSequenceEqual(tabURIs, ["about:welcomeback"])
# Dismiss modal dialog if any. This is mainly to dismiss the check for
@ -373,7 +434,8 @@ class TestFirefoxRefresh(MarionetteTestCase):
except NoAlertPresentException:
pass
tabURIs = self.runAsyncCode("""
tabURIs = self.runAsyncCode(
"""
let resolve = arguments[arguments.length - 1]
let mm = gBrowser.selectedBrowser.messageManager;
@ -396,11 +458,13 @@ class TestFirefoxRefresh(MarionetteTestCase):
};
mm.loadFrameScript("data:application/javascript,(" + fs.toString() + ")()", true);
""") # NOQA: E501
""" # NOQA: E501
)
self.assertSequenceEqual(tabURIs, self._expectedURLs)
def checkFxA(self):
result = self.runAsyncCode("""
result = self.runAsyncCode(
"""
Cu.import("resource://gre/modules/FxAccountsStorage.jsm");
let resolve = arguments[arguments.length - 1];
let storage = new FxAccountsStorageManager();
@ -414,7 +478,8 @@ class TestFirefoxRefresh(MarionetteTestCase):
}).catch(err => {
resolve(err.toString());
});
""")
"""
)
if type(result) != dict:
self.fail(result)
return
@ -423,9 +488,11 @@ class TestFirefoxRefresh(MarionetteTestCase):
self.assertEqual(result["accountData"]["keyFetchToken"], "top-secret")
def checkSync(self, expect_sync_user):
pref_value = self.marionette.execute_script("""
pref_value = self.marionette.execute_script(
"""
return Services.prefs.getStringPref("services.sync.username", null);
""")
"""
)
expected_value = "test@test.com" if expect_sync_user else None
self.assertEqual(pref_value, expected_value)
@ -456,35 +523,35 @@ class TestFirefoxRefresh(MarionetteTestCase):
def setUpScriptData(self):
self.marionette.set_context(self.marionette.CONTEXT_CHROME)
self.runCode("""
self.runCode(
"""
window.global = {};
global.LoginInfo = Components.Constructor("@mozilla.org/login-manager/loginInfo;1", "nsILoginInfo", "init");
global.profSvc = Cc["@mozilla.org/toolkit/profile-service;1"].getService(Ci.nsIToolkitProfileService);
global.Preferences = Cu.import("resource://gre/modules/Preferences.jsm", {}).Preferences;
global.FormHistory = Cu.import("resource://gre/modules/FormHistory.jsm", {}).FormHistory;
""") # NOQA: E501
self._formAutofillAvailable = self.runCode("""
""" # NOQA: E501
)
self._formAutofillAvailable = self.runCode(
"""
try {
global.formAutofillStorage = Cu.import("resource://formautofill/FormAutofillStorage.jsm", {}).formAutofillStorage;
} catch(e) {
return false;
}
return true;
""") # NOQA: E501
""" # NOQA: E501
)
def runCode(self, script, *args, **kwargs):
return self.marionette.execute_script(script,
new_sandbox=False,
sandbox=self._sandbox,
*args,
**kwargs)
return self.marionette.execute_script(
script, new_sandbox=False, sandbox=self._sandbox, *args, **kwargs
)
def runAsyncCode(self, script, *args, **kwargs):
return self.marionette.execute_async_script(script,
new_sandbox=False,
sandbox=self._sandbox,
*args,
**kwargs)
return self.marionette.execute_async_script(
script, new_sandbox=False, sandbox=self._sandbox, *args, **kwargs
)
def setUp(self):
MarionetteTestCase.setUp(self)
@ -511,14 +578,19 @@ class TestFirefoxRefresh(MarionetteTestCase):
if cleanup.reset_profile_path:
# Remove ourselves from profiles.ini
self.runCode("""
self.runCode(
"""
let name = arguments[0];
let profile = global.profSvc.getProfileByName(name);
profile.remove(false)
global.profSvc.flush();
""", script_args=(cleanup.profile_name_to_remove,))
""",
script_args=(cleanup.profile_name_to_remove,),
)
# Remove the local profile dir if it's not the same as the profile dir:
different_path = cleanup.reset_profile_local_path != cleanup.reset_profile_path
different_path = (
cleanup.reset_profile_local_path != cleanup.reset_profile_path
)
if cleanup.reset_profile_local_path and different_path:
mozfile.remove(cleanup.reset_profile_local_path)
@ -528,7 +600,8 @@ class TestFirefoxRefresh(MarionetteTestCase):
def doReset(self):
profileName = "marionette-test-profile-" + str(int(time.time() * 1000))
cleanup = PendingCleanup(profileName)
self.runCode("""
self.runCode(
"""
// Ensure the current (temporary) profile is in profiles.ini:
let profD = Services.dirsvc.get("ProfD", Ci.nsIFile);
let profileName = arguments[1];
@ -546,24 +619,33 @@ class TestFirefoxRefresh(MarionetteTestCase):
env.set("MOZ_MARIONETTE_PREF_STATE_ACROSS_RESTARTS", JSON.stringify(prefObj));
env.set("MOZ_RESET_PROFILE_RESTART", "1");
env.set("XRE_PROFILE_PATH", arguments[0]);
""", script_args=(self.marionette.instance.profile.profile, profileName,))
""",
script_args=(
self.marionette.instance.profile.profile,
profileName,
),
)
profileLeafName = os.path.basename(os.path.normpath(
self.marionette.instance.profile.profile))
profileLeafName = os.path.basename(
os.path.normpath(self.marionette.instance.profile.profile)
)
# Now restart the browser to get it reset:
self.marionette.restart(clean=False, in_app=True)
self.setUpScriptData()
# Determine the new profile path (we'll need to remove it when we're done)
[cleanup.reset_profile_path, cleanup.reset_profile_local_path] = self.runCode("""
[cleanup.reset_profile_path, cleanup.reset_profile_local_path] = self.runCode(
"""
let profD = Services.dirsvc.get("ProfD", Ci.nsIFile);
let localD = Services.dirsvc.get("ProfLD", Ci.nsIFile);
return [profD.path, localD.path];
""")
"""
)
# Determine the backup path
cleanup.desktop_backup_path = self.runCode("""
cleanup.desktop_backup_path = self.runCode(
"""
let container;
try {
container = Services.dirsvc.get("Desk", Ci.nsIFile);
@ -575,12 +657,18 @@ class TestFirefoxRefresh(MarionetteTestCase):
container.append(dirName);
container.append(arguments[0]);
return container.path;
""", script_args=(profileLeafName,)) # NOQA: E501
""", # NOQA: E501
script_args=(profileLeafName,),
)
self.assertTrue(os.path.isdir(cleanup.reset_profile_path),
"Reset profile path should be present")
self.assertTrue(os.path.isdir(cleanup.desktop_backup_path),
"Backup profile path should be present")
self.assertTrue(
os.path.isdir(cleanup.reset_profile_path),
"Reset profile path should be present",
)
self.assertTrue(
os.path.isdir(cleanup.desktop_backup_path),
"Backup profile path should be present",
)
self.assertIn(cleanup.profile_name_to_remove, cleanup.reset_profile_path)
return cleanup

View File

@ -23,7 +23,7 @@ from distutils.spawn import find_executable
dbFiles = [
re.compile("^cert[0-9]+\.db$"),
re.compile("^key[0-9]+\.db$"),
re.compile("^secmod\.db$")
re.compile("^secmod\.db$"),
]
@ -53,10 +53,13 @@ def runUtil(util, args, inputdata=None, outputstream=None):
env[pathvar] = "%s%s%s" % (app_path, os.pathsep, env[pathvar])
else:
env[pathvar] = app_path
proc = subprocess.Popen([util] + args, env=env,
stdin=subprocess.PIPE if inputdata else None,
stdout=outputstream,
universal_newlines=True)
proc = subprocess.Popen(
[util] + args,
env=env,
stdin=subprocess.PIPE if inputdata else None,
stdout=outputstream,
universal_newlines=True,
)
proc.communicate(inputdata)
return proc.returncode
@ -67,11 +70,13 @@ def createRandomFile(randomFile):
def writeCertspecForServerLocations(fd):
locations = ServerLocations(os.path.join(build.topsrcdir,
"build", "pgo",
"server-locations.txt"))
locations = ServerLocations(
os.path.join(build.topsrcdir, "build", "pgo", "server-locations.txt")
)
SAN = []
for loc in [i for i in iter(locations) if i.scheme == "https" and "nocert" not in i.options]:
for loc in [
i for i in iter(locations) if i.scheme == "https" and "nocert" not in i.options
]:
customCertOption = False
customCertRE = re.compile("^cert=(?:\w+)")
for _ in [i for i in loc.options if customCertRE.match(i)]:
@ -84,7 +89,9 @@ def writeCertspecForServerLocations(fd):
if not customCertOption:
SAN.append(loc.host)
fd.write("issuer:printableString/CN=Temporary Certificate Authority/O=Mozilla Testing/OU=Profile Guided Optimization\n") # NOQA: E501
fd.write(
"issuer:printableString/CN=Temporary Certificate Authority/O=Mozilla Testing/OU=Profile Guided Optimization\n" # NOQA: E501
)
fd.write("subject:{}\n".format(SAN[0]))
fd.write("extension:subjectAlternativeName:{}\n".format(",".join(SAN)))
@ -94,13 +101,15 @@ def constructCertDatabase(build, srcDir):
certutil = build.get_binary_path(what="certutil")
pk12util = build.get_binary_path(what="pk12util")
except BinaryNotFoundException as e:
print('{}\n\n{}\n'.format(e, e.help()))
print("{}\n\n{}\n".format(e, e.help()))
return 1
openssl = find_executable("openssl")
pycert = os.path.join(build.topsrcdir, "security", "manager", "ssl", "tests",
"unit", "pycert.py")
pykey = os.path.join(build.topsrcdir, "security", "manager", "ssl", "tests",
"unit", "pykey.py")
pycert = os.path.join(
build.topsrcdir, "security", "manager", "ssl", "tests", "unit", "pycert.py"
)
pykey = os.path.join(
build.topsrcdir, "security", "manager", "ssl", "tests", "unit", "pykey.py"
)
with NamedTemporaryFile(mode="wt+") as pwfile, TemporaryDirectory() as pemfolder:
pwfile.write("\n")
@ -112,15 +121,17 @@ def constructCertDatabase(build, srcDir):
# Copy all .certspec and .keyspec files to a temporary directory
for root, dirs, files in os.walk(srcDir):
for spec in [i for i in files if i.endswith(".certspec") or i.endswith(".keyspec")]:
shutil.copyfile(os.path.join(root, spec),
os.path.join(pemfolder, spec))
for spec in [
i for i in files if i.endswith(".certspec") or i.endswith(".keyspec")
]:
shutil.copyfile(os.path.join(root, spec), os.path.join(pemfolder, spec))
# Write a certspec for the "server-locations.txt" file to that temporary directory
pgoserver_certspec = os.path.join(pemfolder, "pgoserver.certspec")
if os.path.exists(pgoserver_certspec):
raise Exception(
"{} already exists, which isn't allowed".format(pgoserver_certspec))
"{} already exists, which isn't allowed".format(pgoserver_certspec)
)
with open(pgoserver_certspec, "w") as fd:
writeCertspecForServerLocations(fd)
@ -136,14 +147,27 @@ def constructCertDatabase(build, srcDir):
certspec_data = certspec_file.read()
with open(pem, "w") as pem_file:
status = runUtil(
pycert, [], inputdata=certspec_data, outputstream=pem_file)
pycert, [], inputdata=certspec_data, outputstream=pem_file
)
if status:
return status
status = runUtil(certutil, [
"-A", "-n", name, "-t", "P,,", "-i", pem,
"-d", srcDir, "-f", pwfile.name
])
status = runUtil(
certutil,
[
"-A",
"-n",
name,
"-t",
"P,,",
"-i",
pem,
"-d",
srcDir,
"-f",
pwfile.name,
],
)
if status:
return status
@ -152,9 +176,10 @@ def constructCertDatabase(build, srcDir):
name = parts[0]
key_type = parts[1]
if key_type not in ["ca", "client", "server"]:
raise Exception("{}: keyspec filenames must be of the form XXX.client.keyspec "
"or XXX.ca.keyspec (key_type={})".format(
keyspec, key_type))
raise Exception(
"{}: keyspec filenames must be of the form XXX.client.keyspec "
"or XXX.ca.keyspec (key_type={})".format(keyspec, key_type)
)
key_pem = os.path.join(pemfolder, "{}.key.pem".format(name))
print("Generating private key {} (pem={})".format(name, key_pem))
@ -163,42 +188,62 @@ def constructCertDatabase(build, srcDir):
keyspec_data = keyspec_file.read()
with open(key_pem, "w") as pem_file:
status = runUtil(
pykey, [], inputdata=keyspec_data, outputstream=pem_file)
pykey, [], inputdata=keyspec_data, outputstream=pem_file
)
if status:
return status
cert_pem = os.path.join(pemfolder, "{}.cert.pem".format(name))
if not os.path.exists(cert_pem):
raise Exception("There has to be a corresponding certificate named {} for "
"the keyspec {}".format(
cert_pem, keyspec))
raise Exception(
"There has to be a corresponding certificate named {} for "
"the keyspec {}".format(cert_pem, keyspec)
)
p12 = os.path.join(pemfolder, "{}.key.p12".format(name))
print("Converting private key {} to PKCS12 (p12={})".format(
key_pem, p12))
status = runUtil(openssl, ["pkcs12", "-export", "-inkey", key_pem, "-in",
cert_pem, "-name", name, "-out", p12, "-passout",
"file:"+pwfile.name])
print(
"Converting private key {} to PKCS12 (p12={})".format(key_pem, p12)
)
status = runUtil(
openssl,
[
"pkcs12",
"-export",
"-inkey",
key_pem,
"-in",
cert_pem,
"-name",
name,
"-out",
p12,
"-passout",
"file:" + pwfile.name,
],
)
if status:
return status
print("Importing private key {} to database".format(key_pem))
status = runUtil(
pk12util, ["-i", p12, "-d", srcDir, "-w", pwfile.name, "-k", pwfile.name])
pk12util,
["-i", p12, "-d", srcDir, "-w", pwfile.name, "-k", pwfile.name],
)
if status:
return status
if key_type == "ca":
shutil.copyfile(cert_pem, os.path.join(
srcDir, "{}.ca".format(name)))
shutil.copyfile(
cert_pem, os.path.join(srcDir, "{}.ca".format(name))
)
elif key_type == "client":
shutil.copyfile(p12, os.path.join(
srcDir, "{}.client".format(name)))
shutil.copyfile(p12, os.path.join(srcDir, "{}.client".format(name)))
elif key_type == "server":
pass # Nothing to do for server keys
else:
raise Exception(
"State error: Unknown keyspec key_type: {}".format(key_type))
"State error: Unknown keyspec key_type: {}".format(key_type)
)
return 0

View File

@ -28,56 +28,56 @@ import os
import re
import sys
architecture_independent = set(['generic'])
all_unsupported_architectures_names = set(['mips32', 'mips64', 'mips_shared'])
all_architecture_names = set(['x86', 'x64', 'arm', 'arm64'])
all_shared_architecture_names = set(['x86_shared', 'arm', 'arm64'])
architecture_independent = set(["generic"])
all_unsupported_architectures_names = set(["mips32", "mips64", "mips_shared"])
all_architecture_names = set(["x86", "x64", "arm", "arm64"])
all_shared_architecture_names = set(["x86_shared", "arm", "arm64"])
reBeforeArg = "(?<=[(,\s])"
reArgType = "(?P<type>[\w\s:*&]+)"
reArgName = "(?P<name>\s\w+)"
reArgDefault = "(?P<default>(?:\s=[^,)]+)?)"
reAfterArg = "(?=[,)])"
reMatchArg = re.compile(reBeforeArg + reArgType +
reArgName + reArgDefault + reAfterArg)
reMatchArg = re.compile(reBeforeArg + reArgType + reArgName + reArgDefault + reAfterArg)
def get_normalized_signatures(signature, fileAnnot=None):
# Remove static
signature = signature.replace('static', '')
signature = signature.replace("static", "")
# Remove semicolon.
signature = signature.replace(';', ' ')
signature = signature.replace(";", " ")
# Normalize spaces.
signature = re.sub(r'\s+', ' ', signature).strip()
signature = re.sub(r"\s+", " ", signature).strip()
# Remove new-line induced spaces after opening braces.
signature = re.sub(r'\(\s+', '(', signature).strip()
signature = re.sub(r"\(\s+", "(", signature).strip()
# Match arguments, and keep only the type.
signature = reMatchArg.sub('\g<type>', signature)
signature = reMatchArg.sub("\g<type>", signature)
# Remove class name
signature = signature.replace('MacroAssembler::', '')
signature = signature.replace("MacroAssembler::", "")
# Extract list of architectures
archs = ['generic']
archs = ["generic"]
if fileAnnot:
archs = [fileAnnot['arch']]
archs = [fileAnnot["arch"]]
if 'DEFINED_ON(' in signature:
if "DEFINED_ON(" in signature:
archs = re.sub(
r'.*DEFINED_ON\((?P<archs>[^()]*)\).*', '\g<archs>', signature).split(',')
r".*DEFINED_ON\((?P<archs>[^()]*)\).*", "\g<archs>", signature
).split(",")
archs = [a.strip() for a in archs]
signature = re.sub(r'\s+DEFINED_ON\([^()]*\)', '', signature)
signature = re.sub(r"\s+DEFINED_ON\([^()]*\)", "", signature)
elif 'PER_ARCH' in signature:
elif "PER_ARCH" in signature:
archs = all_architecture_names
signature = re.sub(r'\s+PER_ARCH', '', signature)
signature = re.sub(r"\s+PER_ARCH", "", signature)
elif 'PER_SHARED_ARCH' in signature:
elif "PER_SHARED_ARCH" in signature:
archs = all_shared_architecture_names
signature = re.sub(r'\s+PER_SHARED_ARCH', '', signature)
signature = re.sub(r"\s+PER_SHARED_ARCH", "", signature)
elif 'OOL_IN_HEADER' in signature:
assert archs == ['generic']
signature = re.sub(r'\s+OOL_IN_HEADER', '', signature)
elif "OOL_IN_HEADER" in signature:
assert archs == ["generic"]
signature = re.sub(r"\s+OOL_IN_HEADER", "", signature)
else:
# No signature annotation, the list of architectures remains unchanged.
@ -86,58 +86,55 @@ def get_normalized_signatures(signature, fileAnnot=None):
# Extract inline annotation
inline = False
if fileAnnot:
inline = fileAnnot['inline']
inline = fileAnnot["inline"]
if 'inline ' in signature:
signature = re.sub(r'inline\s+', '', signature)
if "inline " in signature:
signature = re.sub(r"inline\s+", "", signature)
inline = True
inlinePrefx = ''
inlinePrefx = ""
if inline:
inlinePrefx = 'inline '
signatures = [
{'arch': a, 'sig': inlinePrefx + signature}
for a in archs
]
inlinePrefx = "inline "
signatures = [{"arch": a, "sig": inlinePrefx + signature} for a in archs]
return signatures
file_suffixes = set([
a.replace('_', '-') for a in
all_architecture_names.union(all_shared_architecture_names)
.union(all_unsupported_architectures_names)
])
file_suffixes = set(
[
a.replace("_", "-")
for a in all_architecture_names.union(all_shared_architecture_names).union(
all_unsupported_architectures_names
)
]
)
def get_file_annotation(filename):
origFilename = filename
filename = filename.split('/')[-1]
filename = filename.split("/")[-1]
inline = False
if filename.endswith('.cpp'):
filename = filename[:-len('.cpp')]
elif filename.endswith('-inl.h'):
if filename.endswith(".cpp"):
filename = filename[: -len(".cpp")]
elif filename.endswith("-inl.h"):
inline = True
filename = filename[:-len('-inl.h')]
elif filename.endswith('.h'):
filename = filename[: -len("-inl.h")]
elif filename.endswith(".h"):
# This allows the definitions block in MacroAssembler.h to be
# style-checked.
inline = True
filename = filename[:-len('.h')]
filename = filename[: -len(".h")]
else:
raise Exception('unknown file name', origFilename)
raise Exception("unknown file name", origFilename)
arch = 'generic'
arch = "generic"
for suffix in file_suffixes:
if filename == 'MacroAssembler-' + suffix:
if filename == "MacroAssembler-" + suffix:
arch = suffix
break
return {
'inline': inline,
'arch': arch.replace('-', '_')
}
return {"inline": inline, "arch": arch.replace("-", "_")}
def get_macroassembler_definitions(filename):
@ -147,46 +144,45 @@ def get_macroassembler_definitions(filename):
return []
style_section = False
lines = ''
lines = ""
signatures = []
with open(filename) as f:
for line in f:
if '//{{{ check_macroassembler_style' in line:
if "//{{{ check_macroassembler_style" in line:
if style_section:
raise 'check_macroassembler_style section already opened.'
raise "check_macroassembler_style section already opened."
style_section = True
braces_depth = 0
elif '//}}} check_macroassembler_style' in line:
elif "//}}} check_macroassembler_style" in line:
style_section = False
if not style_section:
continue
# Ignore preprocessor directives.
if line.startswith('#'):
if line.startswith("#"):
continue
# Remove comments from the processed line.
line = re.sub(r'//.*', '', line)
line = re.sub(r"//.*", "", line)
# Locate and count curly braces.
open_curly_brace = line.find('{')
open_curly_brace = line.find("{")
was_braces_depth = braces_depth
braces_depth = braces_depth + line.count('{') - line.count('}')
braces_depth = braces_depth + line.count("{") - line.count("}")
# Raise an error if the check_macroassembler_style macro is used
# across namespaces / classes scopes.
if braces_depth < 0:
raise 'check_macroassembler_style annotations are not well scoped.'
raise "check_macroassembler_style annotations are not well scoped."
# If the current line contains an opening curly brace, check if
# this line combines with the previous one can be identified as a
# MacroAssembler function signature.
if open_curly_brace != -1 and was_braces_depth == 0:
lines = lines + line[:open_curly_brace]
if 'MacroAssembler::' in lines:
signatures.extend(
get_normalized_signatures(lines, fileAnnot))
lines = ''
if "MacroAssembler::" in lines:
signatures.extend(get_normalized_signatures(lines, fileAnnot))
lines = ""
continue
# We do not aggregate any lines if we are scanning lines which are
@ -194,15 +190,15 @@ def get_macroassembler_definitions(filename):
if braces_depth > 0:
continue
if was_braces_depth != 0:
line = line[line.rfind('}') + 1:]
line = line[line.rfind("}") + 1 :]
# This logic is used to remove template instantiation, static
# variable definitions and function declaration from the next
# function definition.
last_semi_colon = line.rfind(';')
last_semi_colon = line.rfind(";")
if last_semi_colon != -1:
lines = ''
line = line[last_semi_colon + 1:]
lines = ""
line = line[last_semi_colon + 1 :]
# Aggregate lines of non-braced text, which corresponds to the space
# where we are expecting to find function definitions.
@ -213,49 +209,49 @@ def get_macroassembler_definitions(filename):
def get_macroassembler_declaration(filename):
style_section = False
lines = ''
lines = ""
signatures = []
with open(filename) as f:
for line in f:
if '//{{{ check_macroassembler_decl_style' in line:
if "//{{{ check_macroassembler_decl_style" in line:
style_section = True
elif '//}}} check_macroassembler_decl_style' in line:
elif "//}}} check_macroassembler_decl_style" in line:
style_section = False
if not style_section:
continue
# Ignore preprocessor directives.
if line.startswith('#'):
if line.startswith("#"):
continue
line = re.sub(r'//.*', '', line)
if len(line.strip()) == 0 or 'public:' in line or 'private:' in line:
lines = ''
line = re.sub(r"//.*", "", line)
if len(line.strip()) == 0 or "public:" in line or "private:" in line:
lines = ""
continue
lines = lines + line
# Continue until we have a complete declaration
if ';' not in lines:
if ";" not in lines:
continue
# Skip member declarations: which are lines ending with a
# semi-colon without any list of arguments.
if ')' not in lines:
lines = ''
if ")" not in lines:
lines = ""
continue
signatures.extend(get_normalized_signatures(lines))
lines = ''
lines = ""
return signatures
def append_signatures(d, sigs):
for s in sigs:
if s['sig'] not in d:
d[s['sig']] = []
d[s['sig']].append(s['arch'])
if s["sig"] not in d:
d[s["sig"]] = []
d[s["sig"]].append(s["arch"])
return d
@ -265,65 +261,66 @@ def generate_file_content(signatures):
archs = set(sorted(signatures[s]))
archs -= all_unsupported_architectures_names
if len(archs.symmetric_difference(architecture_independent)) == 0:
output.append(s + ';\n')
if s.startswith('inline'):
output.append(s + ";\n")
if s.startswith("inline"):
# TODO, bug 1432600: This is mistaken for OOL_IN_HEADER
# functions. (Such annotation is already removed by the time
# this function sees the signature here.)
output.append(' is defined in MacroAssembler-inl.h\n')
output.append(" is defined in MacroAssembler-inl.h\n")
else:
output.append(' is defined in MacroAssembler.cpp\n')
output.append(" is defined in MacroAssembler.cpp\n")
else:
if len(archs.symmetric_difference(all_architecture_names)) == 0:
output.append(s + ' PER_ARCH;\n')
output.append(s + " PER_ARCH;\n")
elif len(archs.symmetric_difference(all_shared_architecture_names)) == 0:
output.append(s + ' PER_SHARED_ARCH;\n')
output.append(s + " PER_SHARED_ARCH;\n")
else:
output.append(
s + ' DEFINED_ON(' + ', '.join(sorted(archs)) + ');\n')
output.append(s + " DEFINED_ON(" + ", ".join(sorted(archs)) + ");\n")
for a in sorted(archs):
a = a.replace('_', '-')
masm = '%s/MacroAssembler-%s' % (a, a)
if s.startswith('inline'):
output.append(' is defined in %s-inl.h\n' % masm)
a = a.replace("_", "-")
masm = "%s/MacroAssembler-%s" % (a, a)
if s.startswith("inline"):
output.append(" is defined in %s-inl.h\n" % masm)
else:
output.append(' is defined in %s.cpp\n' % masm)
output.append(" is defined in %s.cpp\n" % masm)
return output
def check_style():
# We read from the header file the signature of each function.
decls = dict() # type: dict(signature => ['x86', 'x64'])
decls = dict() # type: dict(signature => ['x86', 'x64'])
# We infer from each file the signature of each MacroAssembler function.
defs = dict() # type: dict(signature => ['x86', 'x64'])
defs = dict() # type: dict(signature => ['x86', 'x64'])
root_dir = os.path.join('js', 'src', 'jit')
root_dir = os.path.join("js", "src", "jit")
for dirpath, dirnames, filenames in os.walk(root_dir):
for filename in filenames:
if 'MacroAssembler' not in filename:
if "MacroAssembler" not in filename:
continue
filepath = os.path.join(dirpath, filename).replace('\\', '/')
filepath = os.path.join(dirpath, filename).replace("\\", "/")
if filepath.endswith('MacroAssembler.h'):
if filepath.endswith("MacroAssembler.h"):
decls = append_signatures(
decls, get_macroassembler_declaration(filepath))
defs = append_signatures(
defs, get_macroassembler_definitions(filepath))
decls, get_macroassembler_declaration(filepath)
)
defs = append_signatures(defs, get_macroassembler_definitions(filepath))
if not decls or not defs:
raise Exception("Did not find any definitions or declarations")
# Compare declarations and definitions output.
difflines = difflib.unified_diff(generate_file_content(decls),
generate_file_content(defs),
fromfile='check_macroassembler_style.py declared syntax',
tofile='check_macroassembler_style.py found definitions')
difflines = difflib.unified_diff(
generate_file_content(decls),
generate_file_content(defs),
fromfile="check_macroassembler_style.py declared syntax",
tofile="check_macroassembler_style.py found definitions",
)
ok = True
for diffline in difflines:
ok = False
print(diffline, end='')
print(diffline, end="")
return ok
@ -332,12 +329,14 @@ def main():
ok = check_style()
if ok:
print('TEST-PASS | check_macroassembler_style.py | ok')
print("TEST-PASS | check_macroassembler_style.py | ok")
else:
print('TEST-UNEXPECTED-FAIL | check_macroassembler_style.py | actual output does not match expected output; diff is above') # noqa: E501
print(
"TEST-UNEXPECTED-FAIL | check_macroassembler_style.py | actual output does not match expected output; diff is above" # noqa: E501
)
sys.exit(0 if ok else 1)
if __name__ == '__main__':
if __name__ == "__main__":
main()

View File

@ -8,23 +8,24 @@ from collections import defaultdict, namedtuple
scriptdir = os.path.abspath(os.path.join(os.path.dirname(__file__), ".."))
HazardSummary = namedtuple('HazardSummary', [
'function',
'variable',
'type',
'GCFunction',
'location'])
HazardSummary = namedtuple(
"HazardSummary", ["function", "variable", "type", "GCFunction", "location"]
)
Callgraph = namedtuple('Callgraph', [
'functionNames',
'nameToId',
'mangledToUnmangled',
'unmangledToMangled',
'calleesOf',
'callersOf',
'tags',
'calleeGraph',
'callerGraph'])
Callgraph = namedtuple(
"Callgraph",
[
"functionNames",
"nameToId",
"mangledToUnmangled",
"unmangledToMangled",
"calleesOf",
"callersOf",
"tags",
"calleeGraph",
"callerGraph",
],
)
def equal(got, expected):
@ -33,7 +34,7 @@ def equal(got, expected):
def extract_unmangled(func):
return func.split('$')[-1]
return func.split("$")[-1]
class Test(object):
@ -49,24 +50,27 @@ class Test(object):
def binpath(self, prog):
return os.path.join(self.cfg.sixgill_bin, prog)
def compile(self, source, options=''):
def compile(self, source, options=""):
env = os.environ
env['CCACHE_DISABLE'] = '1'
env["CCACHE_DISABLE"] = "1"
cmd = "{CXX} -c {source} -O3 -std=c++11 -fplugin={sixgill} -fplugin-arg-xgill-mangle=1 {options}".format( # NOQA: E501
source=self.infile(source),
CXX=self.cfg.cxx, sixgill=self.cfg.sixgill_plugin,
options=options)
CXX=self.cfg.cxx,
sixgill=self.cfg.sixgill_plugin,
options=options,
)
if self.cfg.verbose:
print("Running %s" % cmd)
subprocess.check_call(["sh", "-c", cmd])
def load_db_entry(self, dbname, pattern):
'''Look up an entry from an XDB database file, 'pattern' may be an exact
matching string, or an re pattern object matching a single entry.'''
"""Look up an entry from an XDB database file, 'pattern' may be an exact
matching string, or an re pattern object matching a single entry."""
if hasattr(pattern, 'match'):
output = subprocess.check_output([self.binpath("xdbkeys"), dbname + ".xdb"],
universal_newlines=True)
if hasattr(pattern, "match"):
output = subprocess.check_output(
[self.binpath("xdbkeys"), dbname + ".xdb"], universal_newlines=True
)
matches = list(filter(lambda _: re.search(pattern, _), output.splitlines()))
if len(matches) == 0:
raise Exception("entry not found")
@ -74,17 +78,26 @@ class Test(object):
raise Exception("multiple entries found")
pattern = matches[0]
output = subprocess.check_output([self.binpath("xdbfind"), "-json", dbname + ".xdb",
pattern],
universal_newlines=True)
output = subprocess.check_output(
[self.binpath("xdbfind"), "-json", dbname + ".xdb", pattern],
universal_newlines=True,
)
return json.loads(output)
def run_analysis_script(self, phase, upto=None):
open("defaults.py", "w").write('''\
open("defaults.py", "w").write(
"""\
analysis_scriptdir = '{scriptdir}'
sixgill_bin = '{bindir}'
'''.format(scriptdir=scriptdir, bindir=self.cfg.sixgill_bin))
cmd = [os.path.join(scriptdir, "analyze.py"), '-v' if self.verbose else '-q', phase]
""".format(
scriptdir=scriptdir, bindir=self.cfg.sixgill_bin
)
)
cmd = [
os.path.join(scriptdir, "analyze.py"),
"-v" if self.verbose else "-q",
phase,
]
if upto:
cmd += ["--upto", upto]
cmd.append("--source=%s" % self.indir)
@ -107,17 +120,23 @@ sixgill_bin = '{bindir}'
return list(filter(lambda _: _ is not None, values))
def load_suppressed_functions(self):
return set(self.load_text_file("limitedFunctions.lst", extract=lambda l: l.split(' ')[1]))
return set(
self.load_text_file(
"limitedFunctions.lst", extract=lambda l: l.split(" ")[1]
)
)
def load_gcTypes(self):
def grab_type(line):
m = re.match(r'^(GC\w+): (.*)', line)
m = re.match(r"^(GC\w+): (.*)", line)
if m:
return (m.group(1) + 's', m.group(2))
return (m.group(1) + "s", m.group(2))
return None
gctypes = defaultdict(list)
for collection, typename in self.load_text_file('gcTypes.txt', extract=grab_type):
for collection, typename in self.load_text_file(
"gcTypes.txt", extract=grab_type
):
gctypes[collection].append(typename)
return gctypes
@ -126,11 +145,11 @@ sixgill_bin = '{bindir}'
return json.load(fh)
def load_gcFunctions(self):
return self.load_text_file('gcFunctions.lst', extract=extract_unmangled)
return self.load_text_file("gcFunctions.lst", extract=extract_unmangled)
def load_callgraph(self):
data = Callgraph(
functionNames=['dummy'],
functionNames=["dummy"],
nameToId={},
mangledToUnmangled={},
unmangledToMangled={},
@ -152,14 +171,14 @@ sixgill_bin = '{bindir}'
data.callerGraph[callee][caller] = True
def process(line):
if line.startswith('#'):
if line.startswith("#"):
name = line.split(" ", 1)[1]
data.nameToId[name] = len(data.functionNames)
data.functionNames.append(name)
return
if line.startswith('='):
m = re.match(r'^= (\d+) (.*)', line)
if line.startswith("="):
m = re.match(r"^= (\d+) (.*)", line)
mangled = data.functionNames[int(m.group(1))]
unmangled = m.group(2)
data.nameToId[unmangled] = id
@ -168,32 +187,34 @@ sixgill_bin = '{bindir}'
return
limit = 0
m = re.match(r'^\w (?:/(\d+))? ', line)
m = re.match(r"^\w (?:/(\d+))? ", line)
if m:
limit = int(m[1])
tokens = line.split(' ')
if tokens[0] in ('D', 'R'):
tokens = line.split(" ")
if tokens[0] in ("D", "R"):
_, caller, callee = tokens
add_call(lookup(caller), lookup(callee), limit)
elif tokens[0] == 'T':
data.tags[tokens[1]].add(line.split(' ', 2)[2])
elif tokens[0] in ('F', 'V'):
m = re.match(r'^[FV] (\d+) (\d+) CLASS (.*?) FIELD (.*)', line)
elif tokens[0] == "T":
data.tags[tokens[1]].add(line.split(" ", 2)[2])
elif tokens[0] in ("F", "V"):
m = re.match(r"^[FV] (\d+) (\d+) CLASS (.*?) FIELD (.*)", line)
caller, callee, csu, field = m.groups()
add_call(lookup(caller), lookup(callee), limit)
elif tokens[0] == 'I':
m = re.match(r'^I (\d+) VARIABLE ([^\,]*)', line)
elif tokens[0] == "I":
m = re.match(r"^I (\d+) VARIABLE ([^\,]*)", line)
pass
self.load_text_file('callgraph.txt', extract=process)
self.load_text_file("callgraph.txt", extract=process)
return data
def load_hazards(self):
def grab_hazard(line):
m = re.match(
r"Function '(.*?)' has unrooted '(.*?)' of type '(.*?)' live across GC call '(.*?)' at (.*)", line) # NOQA: E501
r"Function '(.*?)' has unrooted '(.*?)' of type '(.*?)' live across GC call '(.*?)' at (.*)", # NOQA: E501
line,
)
if m:
info = list(m.groups())
info[0] = info[0].split("$")[-1]
@ -201,7 +222,7 @@ sixgill_bin = '{bindir}'
return HazardSummary(*info)
return None
return self.load_text_file('rootingHazards.txt', extract=grab_hazard)
return self.load_text_file("rootingHazards.txt", extract=grab_hazard)
def process_body(self, body):
return Body(body)

File diff suppressed because it is too large Load Diff

View File

@ -51,150 +51,223 @@ def REMOVED(cls):
@CommandProvider
class MachCommands(MachCommandBase):
@Command('android', category='devenv',
description='Run Android-specific commands.',
conditions=[conditions.is_android])
@Command(
"android",
category="devenv",
description="Run Android-specific commands.",
conditions=[conditions.is_android],
)
def android(self):
pass
@SubCommand('android', 'assemble-app',
"""Assemble Firefox for Android.
See http://firefox-source-docs.mozilla.org/build/buildsystem/toolchains.html#firefox-for-android-with-gradle""") # NOQA: E501
@CommandArgument('args', nargs=argparse.REMAINDER)
@SubCommand(
"android",
"assemble-app",
"""Assemble Firefox for Android.
See http://firefox-source-docs.mozilla.org/build/buildsystem/toolchains.html#firefox-for-android-with-gradle""", # NOQA: E501
)
@CommandArgument("args", nargs=argparse.REMAINDER)
def android_assemble_app(self, args):
ret = self.gradle(self.substs['GRADLE_ANDROID_APP_TASKS'] +
['-x', 'lint'] + args, verbose=True)
ret = self.gradle(
self.substs["GRADLE_ANDROID_APP_TASKS"] + ["-x", "lint"] + args,
verbose=True,
)
return ret
@SubCommand('android', 'generate-sdk-bindings',
"""Generate SDK bindings used when building GeckoView.""")
@CommandArgument('inputs', nargs='+', help='config files, '
'like [/path/to/ClassName-classes.txt]+')
@CommandArgument('args', nargs=argparse.REMAINDER)
@SubCommand(
"android",
"generate-sdk-bindings",
"""Generate SDK bindings used when building GeckoView.""",
)
@CommandArgument(
"inputs",
nargs="+",
help="config files, " "like [/path/to/ClassName-classes.txt]+",
)
@CommandArgument("args", nargs=argparse.REMAINDER)
def android_generate_sdk_bindings(self, inputs, args):
import itertools
def stem(input):
# Turn "/path/to/ClassName-classes.txt" into "ClassName".
return os.path.basename(input).rsplit('-classes.txt', 1)[0]
return os.path.basename(input).rsplit("-classes.txt", 1)[0]
bindings_inputs = list(itertools.chain(*((input, stem(input)) for input in inputs)))
bindings_args = '-Pgenerate_sdk_bindings_args={}'.format(';'.join(bindings_inputs))
bindings_inputs = list(
itertools.chain(*((input, stem(input)) for input in inputs))
)
bindings_args = "-Pgenerate_sdk_bindings_args={}".format(
";".join(bindings_inputs)
)
ret = self.gradle(
self.substs['GRADLE_ANDROID_GENERATE_SDK_BINDINGS_TASKS'] + [bindings_args] + args,
verbose=True)
self.substs["GRADLE_ANDROID_GENERATE_SDK_BINDINGS_TASKS"]
+ [bindings_args]
+ args,
verbose=True,
)
return ret
@SubCommand('android', 'generate-generated-jni-wrappers',
"""Generate GeckoView JNI wrappers used when building GeckoView.""")
@CommandArgument('args', nargs=argparse.REMAINDER)
@SubCommand(
"android",
"generate-generated-jni-wrappers",
"""Generate GeckoView JNI wrappers used when building GeckoView.""",
)
@CommandArgument("args", nargs=argparse.REMAINDER)
def android_generate_generated_jni_wrappers(self, args):
ret = self.gradle(
self.substs['GRADLE_ANDROID_GENERATE_GENERATED_JNI_WRAPPERS_TASKS'] + args,
verbose=True)
self.substs["GRADLE_ANDROID_GENERATE_GENERATED_JNI_WRAPPERS_TASKS"] + args,
verbose=True,
)
return ret
@SubCommand('android', 'api-lint',
"""Run Android api-lint.
REMOVED/DEPRECATED: Use 'mach lint --linter android-api-lint'.""")
@SubCommand(
"android",
"api-lint",
"""Run Android api-lint.
REMOVED/DEPRECATED: Use 'mach lint --linter android-api-lint'.""",
)
def android_apilint_REMOVED(self):
print(LINT_DEPRECATION_MESSAGE)
return 1
@SubCommand('android', 'test',
"""Run Android test.
REMOVED/DEPRECATED: Use 'mach lint --linter android-test'.""")
@SubCommand(
"android",
"test",
"""Run Android test.
REMOVED/DEPRECATED: Use 'mach lint --linter android-test'.""",
)
def android_test_REMOVED(self):
print(LINT_DEPRECATION_MESSAGE)
return 1
@SubCommand('android', 'lint',
"""Run Android lint.
REMOVED/DEPRECATED: Use 'mach lint --linter android-lint'.""")
@SubCommand(
"android",
"lint",
"""Run Android lint.
REMOVED/DEPRECATED: Use 'mach lint --linter android-lint'.""",
)
def android_lint_REMOVED(self):
print(LINT_DEPRECATION_MESSAGE)
return 1
@SubCommand('android', 'checkstyle',
"""Run Android checkstyle.
REMOVED/DEPRECATED: Use 'mach lint --linter android-checkstyle'.""")
@SubCommand(
"android",
"checkstyle",
"""Run Android checkstyle.
REMOVED/DEPRECATED: Use 'mach lint --linter android-checkstyle'.""",
)
def android_checkstyle_REMOVED(self):
print(LINT_DEPRECATION_MESSAGE)
return 1
@SubCommand('android', 'gradle-dependencies',
"""Collect Android Gradle dependencies.
See http://firefox-source-docs.mozilla.org/build/buildsystem/toolchains.html#firefox-for-android-with-gradle""") # NOQA: E501
@CommandArgument('args', nargs=argparse.REMAINDER)
@SubCommand(
"android",
"gradle-dependencies",
"""Collect Android Gradle dependencies.
See http://firefox-source-docs.mozilla.org/build/buildsystem/toolchains.html#firefox-for-android-with-gradle""", # NOQA: E501
)
@CommandArgument("args", nargs=argparse.REMAINDER)
def android_gradle_dependencies(self, args):
# We don't want to gate producing dependency archives on clean
# lint or checkstyle, particularly because toolchain versions
# can change the outputs for those processes.
self.gradle(self.substs['GRADLE_ANDROID_DEPENDENCIES_TASKS'] +
["--continue"] + args, verbose=True)
self.gradle(
self.substs["GRADLE_ANDROID_DEPENDENCIES_TASKS"] + ["--continue"] + args,
verbose=True,
)
return 0
@SubCommand('android', 'archive-geckoview',
"""Create GeckoView archives.
See http://firefox-source-docs.mozilla.org/build/buildsystem/toolchains.html#firefox-for-android-with-gradle""") # NOQA: E501
@CommandArgument('args', nargs=argparse.REMAINDER)
@SubCommand(
"android",
"archive-geckoview",
"""Create GeckoView archives.
See http://firefox-source-docs.mozilla.org/build/buildsystem/toolchains.html#firefox-for-android-with-gradle""", # NOQA: E501
)
@CommandArgument("args", nargs=argparse.REMAINDER)
def android_archive_geckoview(self, args):
ret = self.gradle(
self.substs['GRADLE_ANDROID_ARCHIVE_GECKOVIEW_TASKS'] + args,
verbose=True)
self.substs["GRADLE_ANDROID_ARCHIVE_GECKOVIEW_TASKS"] + args, verbose=True
)
return ret
@SubCommand('android', 'build-geckoview_example',
"""Build geckoview_example """)
@CommandArgument('args', nargs=argparse.REMAINDER)
@SubCommand("android", "build-geckoview_example", """Build geckoview_example """)
@CommandArgument("args", nargs=argparse.REMAINDER)
def android_build_geckoview_example(self, args):
self.gradle(self.substs['GRADLE_ANDROID_BUILD_GECKOVIEW_EXAMPLE_TASKS'] + args,
verbose=True)
self.gradle(
self.substs["GRADLE_ANDROID_BUILD_GECKOVIEW_EXAMPLE_TASKS"] + args,
verbose=True,
)
print('Execute `mach android install-geckoview_example` '
'to push the geckoview_example and test APKs to a device.')
print(
"Execute `mach android install-geckoview_example` "
"to push the geckoview_example and test APKs to a device."
)
return 0
@SubCommand('android', 'install-geckoview_example',
"""Install geckoview_example """)
@CommandArgument('args', nargs=argparse.REMAINDER)
@SubCommand(
"android", "install-geckoview_example", """Install geckoview_example """
)
@CommandArgument("args", nargs=argparse.REMAINDER)
def android_install_geckoview_example(self, args):
self.gradle(self.substs['GRADLE_ANDROID_INSTALL_GECKOVIEW_EXAMPLE_TASKS'] + args,
verbose=True)
self.gradle(
self.substs["GRADLE_ANDROID_INSTALL_GECKOVIEW_EXAMPLE_TASKS"] + args,
verbose=True,
)
print('Execute `mach android build-geckoview_example` '
'to just build the geckoview_example and test APKs.')
print(
"Execute `mach android build-geckoview_example` "
"to just build the geckoview_example and test APKs."
)
return 0
@SubCommand('android', 'geckoview-docs',
"""Create GeckoView javadoc and optionally upload to Github""")
@CommandArgument('--archive', action='store_true',
help='Generate a javadoc archive.')
@CommandArgument('--upload', metavar='USER/REPO',
help='Upload geckoview documentation to Github, '
'using the specified USER/REPO.')
@CommandArgument('--upload-branch', metavar='BRANCH[/PATH]',
default='gh-pages',
help='Use the specified branch/path for documentation commits.')
@CommandArgument('--javadoc-path', metavar='/PATH',
default='javadoc',
help='Use the specified path for javadoc commits.')
@CommandArgument('--upload-message', metavar='MSG',
default='GeckoView docs upload',
help='Use the specified message for commits.')
def android_geckoview_docs(self, archive, upload, upload_branch, javadoc_path,
upload_message):
@SubCommand(
"android",
"geckoview-docs",
"""Create GeckoView javadoc and optionally upload to Github""",
)
@CommandArgument(
"--archive", action="store_true", help="Generate a javadoc archive."
)
@CommandArgument(
"--upload",
metavar="USER/REPO",
help="Upload geckoview documentation to Github, "
"using the specified USER/REPO.",
)
@CommandArgument(
"--upload-branch",
metavar="BRANCH[/PATH]",
default="gh-pages",
help="Use the specified branch/path for documentation commits.",
)
@CommandArgument(
"--javadoc-path",
metavar="/PATH",
default="javadoc",
help="Use the specified path for javadoc commits.",
)
@CommandArgument(
"--upload-message",
metavar="MSG",
default="GeckoView docs upload",
help="Use the specified message for commits.",
)
def android_geckoview_docs(
self, archive, upload, upload_branch, javadoc_path, upload_message
):
tasks = (self.substs['GRADLE_ANDROID_GECKOVIEW_DOCS_ARCHIVE_TASKS'] if archive or upload
else self.substs['GRADLE_ANDROID_GECKOVIEW_DOCS_TASKS'])
tasks = (
self.substs["GRADLE_ANDROID_GECKOVIEW_DOCS_ARCHIVE_TASKS"]
if archive or upload
else self.substs["GRADLE_ANDROID_GECKOVIEW_DOCS_TASKS"]
)
ret = self.gradle(tasks, verbose=True)
if ret or not upload:
@ -202,9 +275,9 @@ REMOVED/DEPRECATED: Use 'mach lint --linter android-checkstyle'.""")
# Upload to Github.
fmt = {
'level': os.environ.get('MOZ_SCM_LEVEL', '0'),
'project': os.environ.get('MH_BRANCH', 'unknown'),
'revision': os.environ.get('GECKO_HEAD_REV', 'tip'),
"level": os.environ.get("MOZ_SCM_LEVEL", "0"),
"project": os.environ.get("MH_BRANCH", "unknown"),
"revision": os.environ.get("GECKO_HEAD_REV", "tip"),
}
env = {}
@ -212,66 +285,104 @@ REMOVED/DEPRECATED: Use 'mach lint --linter android-checkstyle'.""")
# in the TaskCluster secrets store in the format {"content": "<KEY>"},
# and the corresponding public key as a writable deploy key for the
# destination repo on GitHub.
secret = os.environ.get('GECKOVIEW_DOCS_UPLOAD_SECRET', '').format(**fmt)
secret = os.environ.get("GECKOVIEW_DOCS_UPLOAD_SECRET", "").format(**fmt)
if secret:
# Set up a private key from the secrets store if applicable.
import requests
req = requests.get('http://taskcluster/secrets/v1/secret/' + secret)
req = requests.get("http://taskcluster/secrets/v1/secret/" + secret)
req.raise_for_status()
keyfile = mozpath.abspath('gv-docs-upload-key')
with open(keyfile, 'w') as f:
keyfile = mozpath.abspath("gv-docs-upload-key")
with open(keyfile, "w") as f:
os.chmod(keyfile, 0o600)
f.write(req.json()['secret']['content'])
f.write(req.json()["secret"]["content"])
# Turn off strict host key checking so ssh does not complain about
# unknown github.com host. We're not pushing anything sensitive, so
# it's okay to not check GitHub's host keys.
env['GIT_SSH_COMMAND'] = 'ssh -i "%s" -o StrictHostKeyChecking=no' % keyfile
env["GIT_SSH_COMMAND"] = 'ssh -i "%s" -o StrictHostKeyChecking=no' % keyfile
# Clone remote repo.
branch = upload_branch.format(**fmt)
repo_url = 'git@github.com:%s.git' % upload
repo_path = mozpath.abspath('gv-docs-repo')
self.run_process(['git', 'clone', '--branch', upload_branch, '--depth', '1',
repo_url, repo_path], append_env=env, pass_thru=True)
env['GIT_DIR'] = mozpath.join(repo_path, '.git')
env['GIT_WORK_TREE'] = repo_path
env['GIT_AUTHOR_NAME'] = env['GIT_COMMITTER_NAME'] = 'GeckoView Docs Bot'
env['GIT_AUTHOR_EMAIL'] = env['GIT_COMMITTER_EMAIL'] = 'nobody@mozilla.com'
repo_url = "git@github.com:%s.git" % upload
repo_path = mozpath.abspath("gv-docs-repo")
self.run_process(
[
"git",
"clone",
"--branch",
upload_branch,
"--depth",
"1",
repo_url,
repo_path,
],
append_env=env,
pass_thru=True,
)
env["GIT_DIR"] = mozpath.join(repo_path, ".git")
env["GIT_WORK_TREE"] = repo_path
env["GIT_AUTHOR_NAME"] = env["GIT_COMMITTER_NAME"] = "GeckoView Docs Bot"
env["GIT_AUTHOR_EMAIL"] = env["GIT_COMMITTER_EMAIL"] = "nobody@mozilla.com"
# Copy over user documentation.
import mozfile
# Extract new javadoc to specified directory inside repo.
src_tar = mozpath.join(self.topobjdir, 'gradle', 'build', 'mobile', 'android',
'geckoview', 'libs', 'geckoview-javadoc.jar')
src_tar = mozpath.join(
self.topobjdir,
"gradle",
"build",
"mobile",
"android",
"geckoview",
"libs",
"geckoview-javadoc.jar",
)
dst_path = mozpath.join(repo_path, javadoc_path.format(**fmt))
mozfile.remove(dst_path)
mozfile.extract_zip(src_tar, dst_path)
# Commit and push.
self.run_process(['git', 'add', '--all'], append_env=env, pass_thru=True)
if self.run_process(['git', 'diff', '--cached', '--quiet'],
append_env=env, pass_thru=True, ensure_exit_code=False) != 0:
self.run_process(["git", "add", "--all"], append_env=env, pass_thru=True)
if (
self.run_process(
["git", "diff", "--cached", "--quiet"],
append_env=env,
pass_thru=True,
ensure_exit_code=False,
)
!= 0
):
# We have something to commit.
self.run_process(['git', 'commit',
'--message', upload_message.format(**fmt)],
append_env=env, pass_thru=True)
self.run_process(['git', 'push', 'origin', branch],
append_env=env, pass_thru=True)
self.run_process(
["git", "commit", "--message", upload_message.format(**fmt)],
append_env=env,
pass_thru=True,
)
self.run_process(
["git", "push", "origin", branch], append_env=env, pass_thru=True
)
mozfile.remove(repo_path)
if secret:
mozfile.remove(keyfile)
return 0
@Command('gradle', category='devenv',
description='Run gradle.',
conditions=[conditions.is_android])
@CommandArgument('-v', '--verbose', action='store_true',
help='Verbose output for what commands the build is running.')
@CommandArgument('args', nargs=argparse.REMAINDER)
@Command(
"gradle",
category="devenv",
description="Run gradle.",
conditions=[conditions.is_android],
)
@CommandArgument(
"-v",
"--verbose",
action="store_true",
help="Verbose output for what commands the build is running.",
)
@CommandArgument("args", nargs=argparse.REMAINDER)
def gradle(self, args, verbose=False):
if not verbose:
# Avoid logging the command
@ -280,10 +391,11 @@ REMOVED/DEPRECATED: Use 'mach lint --linter android-checkstyle'.""")
# In automation, JAVA_HOME is set via mozconfig, which needs
# to be specially handled in each mach command. This turns
# $JAVA_HOME/bin/java into $JAVA_HOME.
java_home = os.path.dirname(os.path.dirname(self.substs['JAVA']))
java_home = os.path.dirname(os.path.dirname(self.substs["JAVA"]))
gradle_flags = self.substs.get('GRADLE_FLAGS', '') or \
os.environ.get('GRADLE_FLAGS', '')
gradle_flags = self.substs.get("GRADLE_FLAGS", "") or os.environ.get(
"GRADLE_FLAGS", ""
)
gradle_flags = shell_split(gradle_flags)
# We force the Gradle JVM to run with the UTF-8 encoding, since we
@ -304,30 +416,32 @@ REMOVED/DEPRECATED: Use 'mach lint --linter android-checkstyle'.""")
# https://discuss.gradle.org/t/unmappable-character-for-encoding-ascii-when-building-a-utf-8-project/10692/11 # NOQA: E501
# and especially https://stackoverflow.com/a/21755671.
if self.substs.get('MOZ_AUTOMATION'):
gradle_flags += ['--console=plain']
if self.substs.get("MOZ_AUTOMATION"):
gradle_flags += ["--console=plain"]
env = os.environ.copy()
env.update({
'GRADLE_OPTS': '-Dfile.encoding=utf-8',
'JAVA_HOME': java_home,
'JAVA_TOOL_OPTIONS': '-Dfile.encoding=utf-8',
})
env.update(
{
"GRADLE_OPTS": "-Dfile.encoding=utf-8",
"JAVA_HOME": java_home,
"JAVA_TOOL_OPTIONS": "-Dfile.encoding=utf-8",
}
)
# Set ANDROID_SDK_ROOT if --with-android-sdk was set.
# See https://bugzilla.mozilla.org/show_bug.cgi?id=1576471
android_sdk_root = self.substs.get('ANDROID_SDK_ROOT', '')
android_sdk_root = self.substs.get("ANDROID_SDK_ROOT", "")
if android_sdk_root:
env['ANDROID_SDK_ROOT'] = android_sdk_root
env["ANDROID_SDK_ROOT"] = android_sdk_root
return self.run_process(
[self.substs['GRADLE']] + gradle_flags + args,
[self.substs["GRADLE"]] + gradle_flags + args,
explicit_env=env,
pass_thru=True, # Allow user to run gradle interactively.
ensure_exit_code=False, # Don't throw on non-zero exit code.
cwd=mozpath.join(self.topsrcdir))
cwd=mozpath.join(self.topsrcdir),
)
@Command('gradle-install', category='devenv',
conditions=[REMOVED])
@Command("gradle-install", category="devenv", conditions=[REMOVED])
def gradle_install_REMOVED(self):
pass
@ -335,36 +449,50 @@ REMOVED/DEPRECATED: Use 'mach lint --linter android-checkstyle'.""")
@CommandProvider
class AndroidEmulatorCommands(MachCommandBase):
"""
Run the Android emulator with one of the AVDs used in the Mozilla
automated test environment. If necessary, the AVD is fetched from
the tooltool server and installed.
Run the Android emulator with one of the AVDs used in the Mozilla
automated test environment. If necessary, the AVD is fetched from
the tooltool server and installed.
"""
@Command('android-emulator', category='devenv',
conditions=[],
description='Run the Android emulator with an AVD from test automation. '
'Environment variable MOZ_EMULATOR_COMMAND_ARGS, if present, will '
'over-ride the command line arguments used to launch the emulator.')
@CommandArgument('--version', metavar='VERSION',
choices=['arm-4.3', 'x86-7.0'],
help='Specify which AVD to run in emulator. '
'One of "arm-4.3" (Android 4.3 supporting armv7 binaries), or '
'"x86-7.0" (Android 7.0 supporting x86 or x86_64 binaries, '
'recommended for most applications). '
'By default, "arm-4.3" will be used if the current build environment '
'architecture is arm; otherwise "x86-7.0".')
@CommandArgument('--wait', action='store_true',
help='Wait for emulator to be closed.')
@CommandArgument('--force-update', action='store_true',
help='Update AVD definition even when AVD is already installed.')
@CommandArgument('--gpu',
help='Over-ride the emulator -gpu argument.')
@CommandArgument('--verbose', action='store_true',
help='Log informative status messages.')
def emulator(self, version, wait=False, force_update=False, gpu=None, verbose=False):
@Command(
"android-emulator",
category="devenv",
conditions=[],
description="Run the Android emulator with an AVD from test automation. "
"Environment variable MOZ_EMULATOR_COMMAND_ARGS, if present, will "
"over-ride the command line arguments used to launch the emulator.",
)
@CommandArgument(
"--version",
metavar="VERSION",
choices=["arm-4.3", "x86-7.0"],
help="Specify which AVD to run in emulator. "
'One of "arm-4.3" (Android 4.3 supporting armv7 binaries), or '
'"x86-7.0" (Android 7.0 supporting x86 or x86_64 binaries, '
"recommended for most applications). "
'By default, "arm-4.3" will be used if the current build environment '
'architecture is arm; otherwise "x86-7.0".',
)
@CommandArgument(
"--wait", action="store_true", help="Wait for emulator to be closed."
)
@CommandArgument(
"--force-update",
action="store_true",
help="Update AVD definition even when AVD is already installed.",
)
@CommandArgument("--gpu", help="Over-ride the emulator -gpu argument.")
@CommandArgument(
"--verbose", action="store_true", help="Log informative status messages."
)
def emulator(
self, version, wait=False, force_update=False, gpu=None, verbose=False
):
from mozrunner.devices.android_device import AndroidEmulator
emulator = AndroidEmulator(version, verbose, substs=self.substs,
device_serial='emulator-5554')
emulator = AndroidEmulator(
version, verbose, substs=self.substs, device_serial="emulator-5554"
)
if emulator.is_running():
# It is possible to run multiple emulators simultaneously, but:
# - if more than one emulator is using the same avd, errors may
@ -372,51 +500,86 @@ class AndroidEmulatorCommands(MachCommandBase):
# - additional parameters must be specified when running tests,
# to select a specific device.
# To avoid these complications, allow just one emulator at a time.
self.log(logging.ERROR, "emulator", {},
"An Android emulator is already running.\n"
"Close the existing emulator and re-run this command.")
self.log(
logging.ERROR,
"emulator",
{},
"An Android emulator is already running.\n"
"Close the existing emulator and re-run this command.",
)
return 1
if not emulator.is_available():
self.log(logging.WARN, "emulator", {},
"Emulator binary not found.\n"
"Install the Android SDK and make sure 'emulator' is in your PATH.")
self.log(
logging.WARN,
"emulator",
{},
"Emulator binary not found.\n"
"Install the Android SDK and make sure 'emulator' is in your PATH.",
)
return 2
if not emulator.check_avd(force_update):
self.log(logging.INFO, "emulator", {},
"Fetching and installing AVD. This may take a few minutes...")
self.log(
logging.INFO,
"emulator",
{},
"Fetching and installing AVD. This may take a few minutes...",
)
emulator.update_avd(force_update)
self.log(logging.INFO, "emulator", {},
"Starting Android emulator running %s..." %
emulator.get_avd_description())
self.log(
logging.INFO,
"emulator",
{},
"Starting Android emulator running %s..." % emulator.get_avd_description(),
)
emulator.start(gpu)
if emulator.wait_for_start():
self.log(logging.INFO, "emulator", {},
"Android emulator is running.")
self.log(logging.INFO, "emulator", {}, "Android emulator is running.")
else:
# This is unusual but the emulator may still function.
self.log(logging.WARN, "emulator", {},
"Unable to verify that emulator is running.")
self.log(
logging.WARN,
"emulator",
{},
"Unable to verify that emulator is running.",
)
if conditions.is_android(self):
self.log(logging.INFO, "emulator", {},
"Use 'mach install' to install or update Firefox on your emulator.")
self.log(
logging.INFO,
"emulator",
{},
"Use 'mach install' to install or update Firefox on your emulator.",
)
else:
self.log(logging.WARN, "emulator", {},
"No Firefox for Android build detected.\n"
"Switch to a Firefox for Android build context or use 'mach bootstrap'\n"
"to setup an Android build environment.")
self.log(
logging.WARN,
"emulator",
{},
"No Firefox for Android build detected.\n"
"Switch to a Firefox for Android build context or use 'mach bootstrap'\n"
"to setup an Android build environment.",
)
if wait:
self.log(logging.INFO, "emulator", {},
"Waiting for Android emulator to close...")
self.log(
logging.INFO, "emulator", {}, "Waiting for Android emulator to close..."
)
rc = emulator.wait()
if rc is not None:
self.log(logging.INFO, "emulator", {},
"Android emulator completed with return code %d." % rc)
self.log(
logging.INFO,
"emulator",
{},
"Android emulator completed with return code %d." % rc,
)
else:
self.log(logging.WARN, "emulator", {},
"Unable to retrieve Android emulator return code.")
self.log(
logging.WARN,
"emulator",
{},
"Unable to retrieve Android emulator return code.",
)
return 0

File diff suppressed because it is too large Load Diff

View File

@ -4,10 +4,10 @@
from __future__ import division, absolute_import, print_function, unicode_literals
'''
"""
This file contains a voluptuous schema definition for build system telemetry, and functions
to fill an instance of that schema for a single mach invocation.
'''
"""
import json
import os
@ -31,104 +31,150 @@ import mozpack.path as mozpath
from .base import BuildEnvironmentNotFoundException
from .configure.constants import CompilerType
schema = Schema({
Required('client_id', description='A UUID to uniquely identify a client'): Any(*string_types),
Required('time', description='Time at which this event happened'): Datetime(),
Required('command', description='The mach command that was invoked'): Any(*string_types),
Required('argv', description=(
'Full mach commandline. ' +
'If the commandline contains ' +
'absolute paths they will be sanitized.')): [Any(*string_types)],
Required('success', description='true if the command succeeded'): bool,
Optional('exception', description=(
'If a Python exception was encountered during the execution ' +
'of the command, this value contains the result of calling `repr` ' +
'on the exception object.')): Any(*string_types),
Optional('file_types_changed', description=(
'This array contains a list of objects with {ext, count} properties giving the count ' +
'of files changed since the last invocation grouped by file type')): [
schema = Schema(
{
Required("client_id", description="A UUID to uniquely identify a client"): Any(
*string_types
),
Required("time", description="Time at which this event happened"): Datetime(),
Required("command", description="The mach command that was invoked"): Any(
*string_types
),
Required(
"argv",
description=(
"Full mach commandline. "
+ "If the commandline contains "
+ "absolute paths they will be sanitized."
),
): [Any(*string_types)],
Required("success", description="true if the command succeeded"): bool,
Optional(
"exception",
description=(
"If a Python exception was encountered during the execution "
+ "of the command, this value contains the result of calling `repr` "
+ "on the exception object."
),
): Any(*string_types),
Optional(
"file_types_changed",
description=(
"This array contains a list of objects with {ext, count} properties giving the "
+ "count of files changed since the last invocation grouped by file type"
),
): [
{
Required('ext', description='File extension'): Any(*string_types),
Required('count', description='Count of changed files with this extension'): int,
Required("ext", description="File extension"): Any(*string_types),
Required(
"count", description="Count of changed files with this extension"
): int,
}
],
Required('duration_ms', description='Command duration in milliseconds'): int,
Required('build_opts', description='Selected build options'): {
Optional('compiler', description='The compiler type in use (CC_TYPE)'):
Any(*CompilerType.POSSIBLE_VALUES),
Optional('artifact', description='true if --enable-artifact-builds'): bool,
Optional('debug', description='true if build is debug (--enable-debug)'): bool,
Optional('opt', description='true if build is optimized (--enable-optimize)'): bool,
Optional('ccache', description='true if ccache is in use (--with-ccache)'): bool,
Optional('sccache', description='true if ccache in use is sccache'): bool,
Optional('icecream', description='true if icecream in use'): bool,
},
Optional('build_attrs', description='Attributes characterizing a build'): {
Optional('cpu_percent', description='cpu utilization observed during a build'): int,
Optional('clobber', description='true if the build was a clobber/full build'): bool,
},
Required('system'): {
# We don't need perfect granularity here.
Required('os', description='Operating system'): Any('windows', 'macos', 'linux', 'other'),
Optional('cpu_brand', description='CPU brand string from CPUID'): Any(*string_types),
Optional('logical_cores', description='Number of logical CPU cores present'): int,
Optional('physical_cores', description='Number of physical CPU cores present'): int,
Optional('memory_gb', description='System memory in GB'): int,
Optional('drive_is_ssd',
description='true if the source directory is on a solid-state disk'): bool,
Optional('virtual_machine',
description='true if the OS appears to be running in a virtual machine'): bool,
},
})
Required("duration_ms", description="Command duration in milliseconds"): int,
Required("build_opts", description="Selected build options"): {
Optional("compiler", description="The compiler type in use (CC_TYPE)"): Any(
*CompilerType.POSSIBLE_VALUES
),
Optional("artifact", description="true if --enable-artifact-builds"): bool,
Optional(
"debug", description="true if build is debug (--enable-debug)"
): bool,
Optional(
"opt", description="true if build is optimized (--enable-optimize)"
): bool,
Optional(
"ccache", description="true if ccache is in use (--with-ccache)"
): bool,
Optional("sccache", description="true if ccache in use is sccache"): bool,
Optional("icecream", description="true if icecream in use"): bool,
},
Optional("build_attrs", description="Attributes characterizing a build"): {
Optional(
"cpu_percent", description="cpu utilization observed during a build"
): int,
Optional(
"clobber", description="true if the build was a clobber/full build"
): bool,
},
Required("system"): {
# We don't need perfect granularity here.
Required("os", description="Operating system"): Any(
"windows", "macos", "linux", "other"
),
Optional("cpu_brand", description="CPU brand string from CPUID"): Any(
*string_types
),
Optional(
"logical_cores", description="Number of logical CPU cores present"
): int,
Optional(
"physical_cores", description="Number of physical CPU cores present"
): int,
Optional("memory_gb", description="System memory in GB"): int,
Optional(
"drive_is_ssd",
description="true if the source directory is on a solid-state disk",
): bool,
Optional(
"virtual_machine",
description="true if the OS appears to be running in a virtual machine",
): bool,
},
}
)
def get_client_id(state_dir):
'''
"""
Get a client id, which is a UUID, from a file in the state directory. If the file doesn't
exist, generate a UUID and save it to a file.
'''
path = os.path.join(state_dir, 'telemetry_client_id.json')
"""
path = os.path.join(state_dir, "telemetry_client_id.json")
if os.path.exists(path):
with open(path, 'r') as f:
return json.load(f)['client_id']
with open(path, "r") as f:
return json.load(f)["client_id"]
import uuid
# uuid4 is random, other uuid types may include identifiers from the local system.
client_id = str(uuid.uuid4())
if PY3:
file_mode = 'w'
file_mode = "w"
else:
file_mode = 'wb'
file_mode = "wb"
with open(path, file_mode) as f:
json.dump({'client_id': client_id}, f)
json.dump({"client_id": client_id}, f)
return client_id
def cpu_brand_linux():
'''
"""
Read the CPU brand string out of /proc/cpuinfo on Linux.
'''
with open('/proc/cpuinfo', 'r') as f:
"""
with open("/proc/cpuinfo", "r") as f:
for line in f:
if line.startswith('model name'):
_, brand = line.split(': ', 1)
if line.startswith("model name"):
_, brand = line.split(": ", 1)
return brand.rstrip()
# not found?
return None
def cpu_brand_windows():
'''
"""
Read the CPU brand string from the registry on Windows.
'''
"""
try:
import _winreg
except ImportError:
import winreg as _winreg
try:
h = _winreg.OpenKey(_winreg.HKEY_LOCAL_MACHINE,
r'HARDWARE\DESCRIPTION\System\CentralProcessor\0')
(brand, ty) = _winreg.QueryValueEx(h, 'ProcessorNameString')
h = _winreg.OpenKey(
_winreg.HKEY_LOCAL_MACHINE,
r"HARDWARE\DESCRIPTION\System\CentralProcessor\0",
)
(brand, ty) = _winreg.QueryValueEx(h, "ProcessorNameString")
if ty == _winreg.REG_SZ:
return brand
except WindowsError:
@ -137,23 +183,26 @@ def cpu_brand_windows():
def cpu_brand_mac():
'''
"""
Get the CPU brand string via sysctl on macos.
'''
"""
import ctypes
import ctypes.util
libc = ctypes.cdll.LoadLibrary(ctypes.util.find_library("c"))
# First, find the required buffer size.
bufsize = ctypes.c_size_t(0)
result = libc.sysctlbyname(b'machdep.cpu.brand_string', None, ctypes.byref(bufsize),
None, 0)
result = libc.sysctlbyname(
b"machdep.cpu.brand_string", None, ctypes.byref(bufsize), None, 0
)
if result != 0:
return None
bufsize.value += 1
buf = ctypes.create_string_buffer(bufsize.value)
# Now actually get the value.
result = libc.sysctlbyname(b'machdep.cpu.brand_string', buf, ctypes.byref(bufsize), None, 0)
result = libc.sysctlbyname(
b"machdep.cpu.brand_string", buf, ctypes.byref(bufsize), None, 0
)
if result != 0:
return None
@ -161,30 +210,30 @@ def cpu_brand_mac():
def get_cpu_brand():
'''
"""
Get the CPU brand string as returned by CPUID.
'''
"""
return {
'Linux': cpu_brand_linux,
'Windows': cpu_brand_windows,
'Darwin': cpu_brand_mac,
"Linux": cpu_brand_linux,
"Windows": cpu_brand_windows,
"Darwin": cpu_brand_mac,
}.get(platform.system(), lambda: None)()
def get_os_name():
return {
'Linux': 'linux',
'Windows': 'windows',
'Darwin': 'macos',
}.get(platform.system(), 'other')
"Linux": "linux",
"Windows": "windows",
"Darwin": "macos",
}.get(platform.system(), "other")
def get_psutil_stats():
'''Return whether psutil exists and its associated stats.
"""Return whether psutil exists and its associated stats.
@returns (bool, int, int, int) whether psutil exists, the logical CPU count,
physical CPU count, and total number of bytes of memory.
'''
"""
try:
import psutil
@ -192,91 +241,93 @@ def get_psutil_stats():
True,
psutil.cpu_count(),
psutil.cpu_count(logical=False),
psutil.virtual_memory().total)
psutil.virtual_memory().total,
)
except ImportError:
return False, None, None, None
def get_system_info():
'''
"""
Gather info to fill the `system` keys in the schema.
'''
"""
# Normalize OS names a bit, and bucket non-tier-1 platforms into "other".
has_psutil, logical_cores, physical_cores, memory_total = get_psutil_stats()
info = {
'os': get_os_name(),
"os": get_os_name(),
}
if has_psutil:
# `total` on Linux is gathered from /proc/meminfo's `MemTotal`, which is the
# total amount of physical memory minus some kernel usage, so round up to the
# nearest GB to get a sensible answer.
info['memory_gb'] = int(math.ceil(float(memory_total) / (1024 * 1024 * 1024)))
info['logical_cores'] = logical_cores
info["memory_gb"] = int(math.ceil(float(memory_total) / (1024 * 1024 * 1024)))
info["logical_cores"] = logical_cores
if physical_cores is not None:
info['physical_cores'] = physical_cores
info["physical_cores"] = physical_cores
cpu_brand = get_cpu_brand()
if cpu_brand is not None:
info['cpu_brand'] = cpu_brand
info["cpu_brand"] = cpu_brand
# TODO: drive_is_ssd, virtual_machine: https://bugzilla.mozilla.org/show_bug.cgi?id=1481613
return info
def get_build_opts(substs):
'''
"""
Translate selected items from `substs` into `build_opts` keys in the schema.
'''
"""
try:
opts = {
k: ty(substs.get(s, None)) for (k, s, ty) in (
k: ty(substs.get(s, None))
for (k, s, ty) in (
# Selected substitutions.
('artifact', 'MOZ_ARTIFACT_BUILDS', bool),
('debug', 'MOZ_DEBUG', bool),
('opt', 'MOZ_OPTIMIZE', bool),
('ccache', 'CCACHE', bool),
('sccache', 'MOZ_USING_SCCACHE', bool),
("artifact", "MOZ_ARTIFACT_BUILDS", bool),
("debug", "MOZ_DEBUG", bool),
("opt", "MOZ_OPTIMIZE", bool),
("ccache", "CCACHE", bool),
("sccache", "MOZ_USING_SCCACHE", bool),
)
}
compiler = substs.get('CC_TYPE', None)
compiler = substs.get("CC_TYPE", None)
if compiler:
opts['compiler'] = str(compiler)
if substs.get('CXX_IS_ICECREAM', None):
opts['icecream'] = True
opts["compiler"] = str(compiler)
if substs.get("CXX_IS_ICECREAM", None):
opts["icecream"] = True
return opts
except BuildEnvironmentNotFoundException:
return {}
def get_build_attrs(attrs):
'''
"""
Extracts clobber and cpu usage info from command attributes.
'''
"""
res = {}
clobber = attrs.get('clobber')
clobber = attrs.get("clobber")
if clobber:
res['clobber'] = clobber
usage = attrs.get('usage')
res["clobber"] = clobber
usage = attrs.get("usage")
if usage:
cpu_percent = usage.get('cpu_percent')
cpu_percent = usage.get("cpu_percent")
if cpu_percent:
res['cpu_percent'] = int(round(cpu_percent))
res["cpu_percent"] = int(round(cpu_percent))
return res
def filter_args(command, argv, instance):
'''
"""
Given the full list of command-line arguments, remove anything up to and including `command`,
and attempt to filter absolute pathnames out of any arguments after that.
'''
"""
# Each key is a pathname and the values are replacement sigils
paths = {
instance.topsrcdir: '$topsrcdir/',
instance.topobjdir: '$topobjdir/',
mozpath.normpath(os.path.expanduser('~')): '$HOME/',
instance.topsrcdir: "$topsrcdir/",
instance.topobjdir: "$topobjdir/",
mozpath.normpath(os.path.expanduser("~")): "$HOME/",
# This might override one of the existing entries, that's OK.
# We don't use a sigil here because we treat all arguments as potentially relative
# paths, so we'd like to get them back as they were specified.
mozpath.normpath(os.getcwd()): '',
mozpath.normpath(os.getcwd()): "",
}
args = list(argv)
@ -291,36 +342,38 @@ def filter_args(command, argv, instance):
if base:
return paths[base] + mozpath.relpath(p, base)
# Best-effort.
return '<path omitted>'
return "<path omitted>"
return [filter_path(arg) for arg in args]
def gather_telemetry(command, success, start_time, end_time, mach_context,
instance, command_attrs):
'''
def gather_telemetry(
command, success, start_time, end_time, mach_context, instance, command_attrs
):
"""
Gather telemetry about the build and the user's system and pass it to the telemetry
handler to be stored for later submission.
Any absolute paths on the command line will be made relative to a relevant base path
or replaced with a placeholder to avoid including paths from developer's machines.
'''
"""
try:
substs = instance.substs
except BuildEnvironmentNotFoundException:
substs = {}
data = {
'client_id': get_client_id(mach_context.state_dir),
"client_id": get_client_id(mach_context.state_dir),
# Get an rfc3339 datetime string.
'time': datetime.utcfromtimestamp(start_time).strftime('%Y-%m-%dT%H:%M:%S.%fZ'),
'command': command,
'argv': filter_args(command, sys.argv, instance),
'success': success,
"time": datetime.utcfromtimestamp(start_time).strftime("%Y-%m-%dT%H:%M:%S.%fZ"),
"command": command,
"argv": filter_args(command, sys.argv, instance),
"success": success,
# TODO: use a monotonic clock: https://bugzilla.mozilla.org/show_bug.cgi?id=1481624
'duration_ms': int((end_time - start_time) * 1000),
'build_opts': get_build_opts(substs),
'build_attrs': get_build_attrs(command_attrs),
'system': get_system_info(),
"duration_ms": int((end_time - start_time) * 1000),
"build_opts": get_build_opts(substs),
"build_attrs": get_build_attrs(command_attrs),
"system": get_system_info(),
# TODO: exception: https://bugzilla.mozilla.org/show_bug.cgi?id=1481617
# TODO: file_types_changed: https://bugzilla.mozilla.org/show_bug.cgi?id=1481774
}
@ -329,15 +382,15 @@ def gather_telemetry(command, success, start_time, end_time, mach_context,
schema(data)
return data
except MultipleInvalid as exc:
msg = ['Build telemetry is invalid:']
msg = ["Build telemetry is invalid:"]
for error in exc.errors:
msg.append(str(error))
print('\n'.join(msg) + '\n' + pprint.pformat(data))
print("\n".join(msg) + "\n" + pprint.pformat(data))
return None
def verify_statedir(statedir):
'''
"""
Verifies the statedir is structured correctly. Returns the outgoing,
submitted and log paths.
@ -347,18 +400,18 @@ def verify_statedir(statedir):
Creates the following directories and files if absent (first submission):
- statedir/telemetry/submitted
'''
"""
telemetry_dir = os.path.join(statedir, 'telemetry')
outgoing = os.path.join(telemetry_dir, 'outgoing')
submitted = os.path.join(telemetry_dir, 'submitted')
telemetry_log = os.path.join(telemetry_dir, 'telemetry.log')
telemetry_dir = os.path.join(statedir, "telemetry")
outgoing = os.path.join(telemetry_dir, "outgoing")
submitted = os.path.join(telemetry_dir, "submitted")
telemetry_log = os.path.join(telemetry_dir, "telemetry.log")
if not os.path.isdir(telemetry_dir):
raise Exception('{} does not exist'.format(telemetry_dir))
raise Exception("{} does not exist".format(telemetry_dir))
if not os.path.isdir(outgoing):
raise Exception('{} does not exist'.format(outgoing))
raise Exception("{} does not exist".format(outgoing))
if not os.path.isdir(submitted):
os.mkdir(submitted)

View File

@ -26,18 +26,18 @@ from tempfile import mkdtemp
BASE_SUBSTS = [
('PYTHON', mozpath.normsep(sys.executable)),
('PYTHON3', mozpath.normsep(sys.executable)),
('MOZ_UI_LOCALE', 'en-US'),
("PYTHON", mozpath.normsep(sys.executable)),
("PYTHON3", mozpath.normsep(sys.executable)),
("MOZ_UI_LOCALE", "en-US"),
]
class TestBuild(unittest.TestCase):
def setUp(self):
self._old_env = dict(os.environ)
os.environ.pop('MOZCONFIG', None)
os.environ.pop('MOZ_OBJDIR', None)
os.environ.pop('MOZ_PGO', None)
os.environ.pop("MOZCONFIG", None)
os.environ.pop("MOZ_OBJDIR", None)
os.environ.pop("MOZ_PGO", None)
def tearDown(self):
os.environ.clear()
@ -49,13 +49,11 @@ class TestBuild(unittest.TestCase):
# the same drive on Windows.
topobjdir = mkdtemp(dir=buildconfig.topsrcdir)
try:
config = ConfigEnvironment(buildconfig.topsrcdir, topobjdir,
**kwargs)
config = ConfigEnvironment(buildconfig.topsrcdir, topobjdir, **kwargs)
reader = BuildReader(config)
emitter = TreeMetadataEmitter(config)
moz_build = mozpath.join(config.topsrcdir, 'test.mozbuild')
definitions = list(emitter.emit(
reader.read_mozbuild(moz_build, config)))
moz_build = mozpath.join(config.topsrcdir, "test.mozbuild")
definitions = list(emitter.emit(reader.read_mozbuild(moz_build, config)))
for backend in backends:
backend(config).consume(definitions)
@ -63,7 +61,7 @@ class TestBuild(unittest.TestCase):
except Exception:
raise
finally:
if not os.environ.get('MOZ_NO_CLEANUP'):
if not os.environ.get("MOZ_NO_CLEANUP"):
shutil.rmtree(topobjdir)
@contextmanager
@ -76,162 +74,172 @@ class TestBuild(unittest.TestCase):
try:
yield handle_make_line
except Exception:
print('\n'.join(lines))
print("\n".join(lines))
raise
if os.environ.get('MOZ_VERBOSE_MAKE'):
print('\n'.join(lines))
if os.environ.get("MOZ_VERBOSE_MAKE"):
print("\n".join(lines))
def test_recursive_make(self):
substs = list(BASE_SUBSTS)
with self.do_test_backend(RecursiveMakeBackend,
substs=substs) as config:
build = MozbuildObject(config.topsrcdir, None, None,
config.topobjdir)
with self.do_test_backend(RecursiveMakeBackend, substs=substs) as config:
build = MozbuildObject(config.topsrcdir, None, None, config.topobjdir)
overrides = [
'install_manifest_depends=',
'MOZ_JAR_MAKER_FILE_FORMAT=flat',
'TEST_MOZBUILD=1',
"install_manifest_depends=",
"MOZ_JAR_MAKER_FILE_FORMAT=flat",
"TEST_MOZBUILD=1",
]
with self.line_handler() as handle_make_line:
build._run_make(directory=config.topobjdir, target=overrides,
silent=False, line_handler=handle_make_line)
build._run_make(
directory=config.topobjdir,
target=overrides,
silent=False,
line_handler=handle_make_line,
)
self.validate(config)
def test_faster_recursive_make(self):
substs = list(BASE_SUBSTS) + [
('BUILD_BACKENDS', 'FasterMake+RecursiveMake'),
("BUILD_BACKENDS", "FasterMake+RecursiveMake"),
]
with self.do_test_backend(get_backend_class(
'FasterMake+RecursiveMake'), substs=substs) as config:
buildid = mozpath.join(config.topobjdir, 'config', 'buildid')
with self.do_test_backend(
get_backend_class("FasterMake+RecursiveMake"), substs=substs
) as config:
buildid = mozpath.join(config.topobjdir, "config", "buildid")
ensureParentDir(buildid)
with open(buildid, 'w') as fh:
fh.write('20100101012345\n')
with open(buildid, "w") as fh:
fh.write("20100101012345\n")
build = MozbuildObject(config.topsrcdir, None, None,
config.topobjdir)
build = MozbuildObject(config.topsrcdir, None, None, config.topobjdir)
overrides = [
'install_manifest_depends=',
'MOZ_JAR_MAKER_FILE_FORMAT=flat',
'TEST_MOZBUILD=1',
"install_manifest_depends=",
"MOZ_JAR_MAKER_FILE_FORMAT=flat",
"TEST_MOZBUILD=1",
]
with self.line_handler() as handle_make_line:
build._run_make(directory=config.topobjdir, target=overrides,
silent=False, line_handler=handle_make_line)
build._run_make(
directory=config.topobjdir,
target=overrides,
silent=False,
line_handler=handle_make_line,
)
self.validate(config)
def test_faster_make(self):
substs = list(BASE_SUBSTS) + [
('MOZ_BUILD_APP', 'dummy_app'),
('MOZ_WIDGET_TOOLKIT', 'dummy_widget'),
("MOZ_BUILD_APP", "dummy_app"),
("MOZ_WIDGET_TOOLKIT", "dummy_widget"),
]
with self.do_test_backend(RecursiveMakeBackend, FasterMakeBackend,
substs=substs) as config:
buildid = mozpath.join(config.topobjdir, 'config', 'buildid')
with self.do_test_backend(
RecursiveMakeBackend, FasterMakeBackend, substs=substs
) as config:
buildid = mozpath.join(config.topobjdir, "config", "buildid")
ensureParentDir(buildid)
with open(buildid, 'w') as fh:
fh.write('20100101012345\n')
with open(buildid, "w") as fh:
fh.write("20100101012345\n")
build = MozbuildObject(config.topsrcdir, None, None,
config.topobjdir)
build = MozbuildObject(config.topsrcdir, None, None, config.topobjdir)
overrides = [
'TEST_MOZBUILD=1',
"TEST_MOZBUILD=1",
]
with self.line_handler() as handle_make_line:
build._run_make(directory=mozpath.join(config.topobjdir,
'faster'),
target=overrides, silent=False,
line_handler=handle_make_line)
build._run_make(
directory=mozpath.join(config.topobjdir, "faster"),
target=overrides,
silent=False,
line_handler=handle_make_line,
)
self.validate(config)
def validate(self, config):
self.maxDiff = None
test_path = mozpath.join('$SRCDIR', 'python', 'mozbuild', 'mozbuild',
'test', 'backend', 'data', 'build')
test_path = mozpath.join(
"$SRCDIR",
"python",
"mozbuild",
"mozbuild",
"test",
"backend",
"data",
"build",
)
result = {
p: six.ensure_text(f.open().read())
for p, f in FileFinder(mozpath.join(config.topobjdir, 'dist'))
for p, f in FileFinder(mozpath.join(config.topobjdir, "dist"))
}
self.assertTrue(len(result))
self.assertEqual(result, {
'bin/baz.ini': 'baz.ini: FOO is foo\n',
'bin/child/bar.ini': 'bar.ini\n',
'bin/child2/foo.css': 'foo.css: FOO is foo\n',
'bin/child2/qux.ini': 'qux.ini: BAR is not defined\n',
'bin/chrome.manifest':
'manifest chrome/foo.manifest\n'
'manifest components/components.manifest\n',
'bin/chrome/foo.manifest':
'content bar foo/child/\n'
'content foo foo/\n'
'override chrome://foo/bar.svg#hello '
'chrome://bar/bar.svg#hello\n',
'bin/chrome/foo/bar.js': 'bar.js\n',
'bin/chrome/foo/child/baz.jsm':
'//@line 2 "%s/baz.jsm"\nbaz.jsm: FOO is foo\n' % (test_path),
'bin/chrome/foo/child/hoge.js':
'//@line 2 "%s/bar.js"\nbar.js: FOO is foo\n' % (test_path),
'bin/chrome/foo/foo.css': 'foo.css: FOO is foo\n',
'bin/chrome/foo/foo.js': 'foo.js\n',
'bin/chrome/foo/qux.js': 'bar.js\n',
'bin/components/bar.js':
'//@line 2 "%s/bar.js"\nbar.js: FOO is foo\n' % (test_path),
'bin/components/components.manifest':
'component {foo} foo.js\ncomponent {bar} bar.js\n',
'bin/components/foo.js': 'foo.js\n',
'bin/defaults/pref/prefs.js': 'prefs.js\n',
'bin/foo.ini': 'foo.ini\n',
'bin/modules/baz.jsm':
'//@line 2 "%s/baz.jsm"\nbaz.jsm: FOO is foo\n' % (test_path),
'bin/modules/child/bar.jsm': 'bar.jsm\n',
'bin/modules/child2/qux.jsm':
'//@line 4 "%s/qux.jsm"\nqux.jsm: BAR is not defined\n'
self.assertEqual(
result,
{
"bin/baz.ini": "baz.ini: FOO is foo\n",
"bin/child/bar.ini": "bar.ini\n",
"bin/child2/foo.css": "foo.css: FOO is foo\n",
"bin/child2/qux.ini": "qux.ini: BAR is not defined\n",
"bin/chrome.manifest": "manifest chrome/foo.manifest\n"
"manifest components/components.manifest\n",
"bin/chrome/foo.manifest": "content bar foo/child/\n"
"content foo foo/\n"
"override chrome://foo/bar.svg#hello "
"chrome://bar/bar.svg#hello\n",
"bin/chrome/foo/bar.js": "bar.js\n",
"bin/chrome/foo/child/baz.jsm": '//@line 2 "%s/baz.jsm"\nbaz.jsm: FOO is foo\n'
% (test_path),
'bin/modules/foo.jsm': 'foo.jsm\n',
'bin/res/resource': 'resource\n',
'bin/res/child/resource2': 'resource2\n',
'bin/app/baz.ini': 'baz.ini: FOO is bar\n',
'bin/app/child/bar.ini': 'bar.ini\n',
'bin/app/child2/qux.ini': 'qux.ini: BAR is defined\n',
'bin/app/chrome.manifest':
'manifest chrome/foo.manifest\n'
'manifest components/components.manifest\n',
'bin/app/chrome/foo.manifest':
'content bar foo/child/\n'
'content foo foo/\n'
'override chrome://foo/bar.svg#hello '
'chrome://bar/bar.svg#hello\n',
'bin/app/chrome/foo/bar.js': 'bar.js\n',
'bin/app/chrome/foo/child/baz.jsm':
'//@line 2 "%s/baz.jsm"\nbaz.jsm: FOO is bar\n' % (test_path),
'bin/app/chrome/foo/child/hoge.js':
'//@line 2 "%s/bar.js"\nbar.js: FOO is bar\n' % (test_path),
'bin/app/chrome/foo/foo.css': 'foo.css: FOO is bar\n',
'bin/app/chrome/foo/foo.js': 'foo.js\n',
'bin/app/chrome/foo/qux.js': 'bar.js\n',
'bin/app/components/bar.js':
'//@line 2 "%s/bar.js"\nbar.js: FOO is bar\n' % (test_path),
'bin/app/components/components.manifest':
'component {foo} foo.js\ncomponent {bar} bar.js\n',
'bin/app/components/foo.js': 'foo.js\n',
'bin/app/defaults/preferences/prefs.js': 'prefs.js\n',
'bin/app/foo.css': 'foo.css: FOO is bar\n',
'bin/app/foo.ini': 'foo.ini\n',
'bin/app/modules/baz.jsm':
'//@line 2 "%s/baz.jsm"\nbaz.jsm: FOO is bar\n' % (test_path),
'bin/app/modules/child/bar.jsm': 'bar.jsm\n',
'bin/app/modules/child2/qux.jsm':
'//@line 2 "%s/qux.jsm"\nqux.jsm: BAR is defined\n'
"bin/chrome/foo/child/hoge.js": '//@line 2 "%s/bar.js"\nbar.js: FOO is foo\n'
% (test_path),
'bin/app/modules/foo.jsm': 'foo.jsm\n',
})
"bin/chrome/foo/foo.css": "foo.css: FOO is foo\n",
"bin/chrome/foo/foo.js": "foo.js\n",
"bin/chrome/foo/qux.js": "bar.js\n",
"bin/components/bar.js": '//@line 2 "%s/bar.js"\nbar.js: FOO is foo\n'
% (test_path),
"bin/components/components.manifest": "component {foo} foo.js\ncomponent {bar} bar.js\n", # NOQA: E501
"bin/components/foo.js": "foo.js\n",
"bin/defaults/pref/prefs.js": "prefs.js\n",
"bin/foo.ini": "foo.ini\n",
"bin/modules/baz.jsm": '//@line 2 "%s/baz.jsm"\nbaz.jsm: FOO is foo\n'
% (test_path),
"bin/modules/child/bar.jsm": "bar.jsm\n",
"bin/modules/child2/qux.jsm": '//@line 4 "%s/qux.jsm"\nqux.jsm: BAR is not defined\n' # NOQA: E501
% (test_path),
"bin/modules/foo.jsm": "foo.jsm\n",
"bin/res/resource": "resource\n",
"bin/res/child/resource2": "resource2\n",
"bin/app/baz.ini": "baz.ini: FOO is bar\n",
"bin/app/child/bar.ini": "bar.ini\n",
"bin/app/child2/qux.ini": "qux.ini: BAR is defined\n",
"bin/app/chrome.manifest": "manifest chrome/foo.manifest\n"
"manifest components/components.manifest\n",
"bin/app/chrome/foo.manifest": "content bar foo/child/\n"
"content foo foo/\n"
"override chrome://foo/bar.svg#hello "
"chrome://bar/bar.svg#hello\n",
"bin/app/chrome/foo/bar.js": "bar.js\n",
"bin/app/chrome/foo/child/baz.jsm": '//@line 2 "%s/baz.jsm"\nbaz.jsm: FOO is bar\n'
% (test_path),
"bin/app/chrome/foo/child/hoge.js": '//@line 2 "%s/bar.js"\nbar.js: FOO is bar\n'
% (test_path),
"bin/app/chrome/foo/foo.css": "foo.css: FOO is bar\n",
"bin/app/chrome/foo/foo.js": "foo.js\n",
"bin/app/chrome/foo/qux.js": "bar.js\n",
"bin/app/components/bar.js": '//@line 2 "%s/bar.js"\nbar.js: FOO is bar\n'
% (test_path),
"bin/app/components/components.manifest": "component {foo} foo.js\ncomponent {bar} bar.js\n", # NOQA: E501
"bin/app/components/foo.js": "foo.js\n",
"bin/app/defaults/preferences/prefs.js": "prefs.js\n",
"bin/app/foo.css": "foo.css: FOO is bar\n",
"bin/app/foo.ini": "foo.ini\n",
"bin/app/modules/baz.jsm": '//@line 2 "%s/baz.jsm"\nbaz.jsm: FOO is bar\n'
% (test_path),
"bin/app/modules/child/bar.jsm": "bar.jsm\n",
"bin/app/modules/child2/qux.jsm": '//@line 2 "%s/qux.jsm"\nqux.jsm: BAR is defined\n' # NOQA: E501
% (test_path),
"bin/app/modules/foo.jsm": "foo.jsm\n",
},
)
if __name__ == '__main__':
if __name__ == "__main__":
main()

File diff suppressed because it is too large Load Diff

View File

@ -19,10 +19,10 @@ import mozunit
here = os.path.dirname(__file__)
BUILDCONFIG = {
'topobjdir': buildconfig.topobjdir,
'MOZ_APP_NAME': buildconfig.substs.get('MOZ_APP_NAME', 'nightly'),
'OMNIJAR_NAME': buildconfig.substs.get('OMNIJAR_NAME', 'omni.ja'),
'MOZ_MACBUNDLE_NAME': buildconfig.substs.get('MOZ_MACBUNDLE_NAME', 'Nightly.app'),
"topobjdir": buildconfig.topobjdir,
"MOZ_APP_NAME": buildconfig.substs.get("MOZ_APP_NAME", "nightly"),
"OMNIJAR_NAME": buildconfig.substs.get("OMNIJAR_NAME", "omni.ja"),
"MOZ_MACBUNDLE_NAME": buildconfig.substs.get("MOZ_MACBUNDLE_NAME", "Nightly.app"),
}
basic_file = """TN:Compartment_5f7f5c30251800
@ -110,9 +110,9 @@ end_of_record
"""
class TempFile():
class TempFile:
def __init__(self, content):
self.file = NamedTemporaryFile(mode='w', delete=False, encoding='utf-8')
self.file = NamedTemporaryFile(mode="w", delete=False, encoding="utf-8")
self.file.write(content)
self.file.close()
@ -124,7 +124,6 @@ class TempFile():
class TestLcovParser(unittest.TestCase):
def parser_roundtrip(self, lcov_string):
with TempFile(lcov_string) as fname:
file_obj = lcov_rewriter.LcovFile([fname])
@ -172,21 +171,24 @@ bazbarfoo
class TestLineRemapping(unittest.TestCase):
def setUp(self):
chrome_map_file = os.path.join(buildconfig.topobjdir, 'chrome-map.json')
chrome_map_file = os.path.join(buildconfig.topobjdir, "chrome-map.json")
self._old_chrome_info_file = None
if os.path.isfile(chrome_map_file):
backup_file = os.path.join(buildconfig.topobjdir, 'chrome-map-backup.json')
backup_file = os.path.join(buildconfig.topobjdir, "chrome-map-backup.json")
self._old_chrome_info_file = backup_file
self._chrome_map_file = chrome_map_file
shutil.move(chrome_map_file, backup_file)
empty_chrome_info = [
{}, {}, {}, BUILDCONFIG,
{},
{},
{},
BUILDCONFIG,
]
with open(chrome_map_file, 'w') as fh:
with open(chrome_map_file, "w") as fh:
json.dump(empty_chrome_info, fh)
self.lcov_rewriter = lcov_rewriter.LcovFileRewriter(chrome_map_file, '', '', [])
self.lcov_rewriter = lcov_rewriter.LcovFileRewriter(chrome_map_file, "", "", [])
self.pp_rewriter = self.lcov_rewriter.pp_rewriter
def tearDown(self):
@ -195,52 +197,52 @@ class TestLineRemapping(unittest.TestCase):
def test_map_multiple_included(self):
with TempFile(multiple_included_files) as fname:
actual = chrome_map.generate_pp_info(fname, '/src/dir')
actual = chrome_map.generate_pp_info(fname, "/src/dir")
expected = {
"2,3": ('foo.js', 1),
"4,5": ('path/bar.js', 2),
"6,7": ('foo.js', 3),
"8,9": ('path/bar.js', 2),
"10,11": ('path2/test.js', 3),
"12,13": ('path/baz.js', 1),
"14,15": ('f.js', 6),
"2,3": ("foo.js", 1),
"4,5": ("path/bar.js", 2),
"6,7": ("foo.js", 3),
"8,9": ("path/bar.js", 2),
"10,11": ("path2/test.js", 3),
"12,13": ("path/baz.js", 1),
"14,15": ("f.js", 6),
}
self.assertEqual(actual, expected)
def test_map_srcdir_prefix(self):
with TempFile(srcdir_prefix_files) as fname:
actual = chrome_map.generate_pp_info(fname, '/src/dir')
actual = chrome_map.generate_pp_info(fname, "/src/dir")
expected = {
"2,3": ('foo.js', 1),
"4,5": ('path/file.js', 2),
"6,7": ('foo.js', 3),
"2,3": ("foo.js", 1),
"4,5": ("path/file.js", 2),
"6,7": ("foo.js", 3),
}
self.assertEqual(actual, expected)
def test_remap_lcov(self):
pp_remap = {
"1941,2158": ('dropPreview.js', 6),
"2159,2331": ('updater.js', 6),
"2584,2674": ('intro.js', 6),
"2332,2443": ('undo.js', 6),
"864,985": ('cells.js', 6),
"2444,2454": ('search.js', 6),
"1567,1712": ('drop.js', 6),
"2455,2583": ('customize.js', 6),
"1713,1940": ('dropTargetShim.js', 6),
"1402,1548": ('drag.js', 6),
"1549,1566": ('dragDataHelper.js', 6),
"453,602": ('page.js', 141),
"2675,2678": ('newTab.js', 70),
"56,321": ('transformations.js', 6),
"603,863": ('grid.js', 6),
"322,452": ('page.js', 6),
"986,1401": ('sites.js', 6)
"1941,2158": ("dropPreview.js", 6),
"2159,2331": ("updater.js", 6),
"2584,2674": ("intro.js", 6),
"2332,2443": ("undo.js", 6),
"864,985": ("cells.js", 6),
"2444,2454": ("search.js", 6),
"1567,1712": ("drop.js", 6),
"2455,2583": ("customize.js", 6),
"1713,1940": ("dropTargetShim.js", 6),
"1402,1548": ("drag.js", 6),
"1549,1566": ("dragDataHelper.js", 6),
"453,602": ("page.js", 141),
"2675,2678": ("newTab.js", 70),
"56,321": ("transformations.js", 6),
"603,863": ("grid.js", 6),
"322,452": ("page.js", 6),
"986,1401": ("sites.js", 6),
}
fpath = os.path.join(here, 'sample_lcov.info')
fpath = os.path.join(here, "sample_lcov.info")
# Read original records
lcov_file = lcov_rewriter.LcovFile([fpath])
@ -272,78 +274,76 @@ class TestLineRemapping(unittest.TestCase):
# Read rewritten lcov.
with TempFile(out.getvalue()) as fname:
lcov_file = lcov_rewriter.LcovFile([fname])
records = [lcov_file.parse_record(r) for _, _, r in lcov_file.iterate_records()]
records = [
lcov_file.parse_record(r) for _, _, r in lcov_file.iterate_records()
]
self.assertEqual(len(records), 17)
# Lines/functions are only "moved" between records, not duplicated or omited.
self.assertEqual(original_line_count,
sum(r.line_count for r in records))
self.assertEqual(original_covered_line_count,
sum(r.covered_line_count for r in records))
self.assertEqual(original_function_count,
sum(r.function_count for r in records))
self.assertEqual(original_covered_function_count,
sum(r.covered_function_count for r in records))
self.assertEqual(original_line_count, sum(r.line_count for r in records))
self.assertEqual(
original_covered_line_count, sum(r.covered_line_count for r in records)
)
self.assertEqual(
original_function_count, sum(r.function_count for r in records)
)
self.assertEqual(
original_covered_function_count,
sum(r.covered_function_count for r in records),
)
class TestUrlFinder(unittest.TestCase):
def setUp(self):
chrome_map_file = os.path.join(buildconfig.topobjdir, 'chrome-map.json')
chrome_map_file = os.path.join(buildconfig.topobjdir, "chrome-map.json")
self._old_chrome_info_file = None
if os.path.isfile(chrome_map_file):
backup_file = os.path.join(buildconfig.topobjdir, 'chrome-map-backup.json')
backup_file = os.path.join(buildconfig.topobjdir, "chrome-map-backup.json")
self._old_chrome_info_file = backup_file
self._chrome_map_file = chrome_map_file
shutil.move(chrome_map_file, backup_file)
dummy_chrome_info = [
{
'resource://activity-stream/': [
'dist/bin/browser/chrome/browser/res/activity-stream',
"resource://activity-stream/": [
"dist/bin/browser/chrome/browser/res/activity-stream",
],
'chrome://browser/content/': [
'dist/bin/browser/chrome/browser/content/browser',
"chrome://browser/content/": [
"dist/bin/browser/chrome/browser/content/browser",
],
},
{
'chrome://global/content/netError.xhtml':
'chrome://browser/content/aboutNetError.xhtml',
"chrome://global/content/netError.xhtml": "chrome://browser/content/aboutNetError.xhtml", # NOQA: E501
},
{
'dist/bin/components/MainProcessSingleton.js': [
'path1',
None
"dist/bin/components/MainProcessSingleton.js": ["path1", None],
"dist/bin/browser/features/firefox@getpocket.com/bootstrap.js": [
"path4",
None,
],
'dist/bin/browser/features/firefox@getpocket.com/bootstrap.js': [
'path4',
None
"dist/bin/modules/osfile/osfile_async_worker.js": [
"toolkit/components/osfile/modules/osfile_async_worker.js",
None,
],
'dist/bin/modules/osfile/osfile_async_worker.js': [
'toolkit/components/osfile/modules/osfile_async_worker.js',
None
"dist/bin/browser/chrome/browser/res/activity-stream/lib/": [
"browser/components/newtab/lib/*",
None,
],
'dist/bin/browser/chrome/browser/res/activity-stream/lib/': [
'browser/components/newtab/lib/*',
None
"dist/bin/browser/chrome/browser/content/browser/aboutNetError.xhtml": [
"browser/base/content/aboutNetError.xhtml",
None,
],
'dist/bin/browser/chrome/browser/content/browser/aboutNetError.xhtml': [
'browser/base/content/aboutNetError.xhtml',
None
],
'dist/bin/modules/AppConstants.jsm': [
'toolkit/modules/AppConstants.jsm',
"dist/bin/modules/AppConstants.jsm": [
"toolkit/modules/AppConstants.jsm",
{
'101,102': [
'toolkit/modules/AppConstants.jsm',
135
],
}
"101,102": ["toolkit/modules/AppConstants.jsm", 135],
},
],
},
BUILDCONFIG,
]
with open(chrome_map_file, 'w') as fh:
with open(chrome_map_file, "w") as fh:
json.dump(dummy_chrome_info, fh)
def tearDown(self):
@ -351,67 +351,96 @@ class TestUrlFinder(unittest.TestCase):
shutil.move(self._old_chrome_info_file, self._chrome_map_file)
def test_jar_paths(self):
app_name = BUILDCONFIG['MOZ_APP_NAME']
omnijar_name = BUILDCONFIG['OMNIJAR_NAME']
app_name = BUILDCONFIG["MOZ_APP_NAME"]
omnijar_name = BUILDCONFIG["OMNIJAR_NAME"]
paths = [
('jar:file:///home/worker/workspace/build/application/' + app_name +
'/' + omnijar_name + '!/components/MainProcessSingleton.js', 'path1'),
('jar:file:///home/worker/workspace/build/application/' + app_name +
'/browser/features/firefox@getpocket.com.xpi!/bootstrap.js', 'path4'),
(
"jar:file:///home/worker/workspace/build/application/"
+ app_name
+ "/"
+ omnijar_name
+ "!/components/MainProcessSingleton.js",
"path1",
),
(
"jar:file:///home/worker/workspace/build/application/"
+ app_name
+ "/browser/features/firefox@getpocket.com.xpi!/bootstrap.js",
"path4",
),
]
url_finder = lcov_rewriter.UrlFinder(self._chrome_map_file, '', '', [])
url_finder = lcov_rewriter.UrlFinder(self._chrome_map_file, "", "", [])
for path, expected in paths:
self.assertEqual(url_finder.rewrite_url(path)[0], expected)
def test_wrong_scheme_paths(self):
paths = [
'http://www.mozilla.org/aFile.js',
'https://www.mozilla.org/aFile.js',
'data:something',
'about:newtab',
'javascript:something',
"http://www.mozilla.org/aFile.js",
"https://www.mozilla.org/aFile.js",
"data:something",
"about:newtab",
"javascript:something",
]
url_finder = lcov_rewriter.UrlFinder(self._chrome_map_file, '', '', [])
url_finder = lcov_rewriter.UrlFinder(self._chrome_map_file, "", "", [])
for path in paths:
self.assertIsNone(url_finder.rewrite_url(path))
def test_chrome_resource_paths(self):
paths = [
# Path with default url prefix
('resource://gre/modules/osfile/osfile_async_worker.js',
('toolkit/components/osfile/modules/osfile_async_worker.js', None)),
(
"resource://gre/modules/osfile/osfile_async_worker.js",
("toolkit/components/osfile/modules/osfile_async_worker.js", None),
),
# Path with url prefix that is in chrome map
('resource://activity-stream/lib/PrefsFeed.jsm',
('browser/components/newtab/lib/PrefsFeed.jsm', None)),
(
"resource://activity-stream/lib/PrefsFeed.jsm",
("browser/components/newtab/lib/PrefsFeed.jsm", None),
),
# Path which is in url overrides
('chrome://global/content/netError.xhtml',
('browser/base/content/aboutNetError.xhtml', None)),
(
"chrome://global/content/netError.xhtml",
("browser/base/content/aboutNetError.xhtml", None),
),
# Path which ends with > eval
('resource://gre/modules/osfile/osfile_async_worker.js line 3 > eval', None),
(
"resource://gre/modules/osfile/osfile_async_worker.js line 3 > eval",
None,
),
# Path which ends with > Function
('resource://gre/modules/osfile/osfile_async_worker.js line 3 > Function', None),
(
"resource://gre/modules/osfile/osfile_async_worker.js line 3 > Function",
None,
),
# Path which contains "->"
('resource://gre/modules/addons/XPIProvider.jsm -> resource://gre/modules/osfile/osfile_async_worker.js', # noqa
('toolkit/components/osfile/modules/osfile_async_worker.js', None)),
(
"resource://gre/modules/addons/XPIProvider.jsm -> resource://gre/modules/osfile/osfile_async_worker.js", # noqa
("toolkit/components/osfile/modules/osfile_async_worker.js", None),
),
# Path with pp_info
('resource://gre/modules/AppConstants.jsm', ('toolkit/modules/AppConstants.jsm', {
'101,102': [
'toolkit/modules/AppConstants.jsm',
135
],
})),
(
"resource://gre/modules/AppConstants.jsm",
(
"toolkit/modules/AppConstants.jsm",
{
"101,102": ["toolkit/modules/AppConstants.jsm", 135],
},
),
),
# Path with query
('resource://activity-stream/lib/PrefsFeed.jsm?q=0.9098419174803978',
('browser/components/newtab/lib/PrefsFeed.jsm', None)),
(
"resource://activity-stream/lib/PrefsFeed.jsm?q=0.9098419174803978",
("browser/components/newtab/lib/PrefsFeed.jsm", None),
),
]
url_finder = lcov_rewriter.UrlFinder(self._chrome_map_file, '', 'dist/bin/', [])
url_finder = lcov_rewriter.UrlFinder(self._chrome_map_file, "", "dist/bin/", [])
for path, expected in paths:
self.assertEqual(url_finder.rewrite_url(path), expected)
if __name__ == '__main__':
if __name__ == "__main__":
mozunit.main()

View File

@ -11,7 +11,9 @@ import logging
from taskgraph.transforms.base import TransformSequence
from taskgraph.transforms.bouncer_submission import craft_bouncer_product_name
from taskgraph.transforms.bouncer_submission_partners import craft_partner_bouncer_product_name
from taskgraph.transforms.bouncer_submission_partners import (
craft_partner_bouncer_product_name,
)
from taskgraph.util.partners import get_partners_to_be_published
from taskgraph.util.schema import resolve_keyed_by
from taskgraph.util.scriptworker import get_release_config
@ -25,59 +27,81 @@ transforms = TransformSequence()
def make_task_worker(config, jobs):
for job in jobs:
resolve_keyed_by(
job, 'worker-type', item_name=job['name'],
**{'release-level': config.params.release_level()}
job,
"worker-type",
item_name=job["name"],
**{"release-level": config.params.release_level()}
)
resolve_keyed_by(
job, 'scopes', item_name=job['name'],
**{'release-level': config.params.release_level()}
job,
"scopes",
item_name=job["name"],
**{"release-level": config.params.release_level()}
)
resolve_keyed_by(
job, 'bouncer-products-per-alias',
item_name=job['name'], project=config.params['project']
job,
"bouncer-products-per-alias",
item_name=job["name"],
project=config.params["project"],
)
if 'partner-bouncer-products-per-alias' in job:
if "partner-bouncer-products-per-alias" in job:
resolve_keyed_by(
job, 'partner-bouncer-products-per-alias',
item_name=job['name'], project=config.params['project']
job,
"partner-bouncer-products-per-alias",
item_name=job["name"],
project=config.params["project"],
)
job['worker']['entries'] = craft_bouncer_entries(config, job)
job["worker"]["entries"] = craft_bouncer_entries(config, job)
del job['bouncer-products-per-alias']
if 'partner-bouncer-products-per-alias' in job:
del job['partner-bouncer-products-per-alias']
del job["bouncer-products-per-alias"]
if "partner-bouncer-products-per-alias" in job:
del job["partner-bouncer-products-per-alias"]
if job['worker']['entries']:
if job["worker"]["entries"]:
yield job
else:
logger.warn('No bouncer entries defined in bouncer submission task for "{}". \
Job deleted.'.format(job['name']))
logger.warn(
'No bouncer entries defined in bouncer submission task for "{}". \
Job deleted.'.format(
job["name"]
)
)
def craft_bouncer_entries(config, job):
release_config = get_release_config(config)
product = job['shipping-product']
current_version = release_config['version']
bouncer_products_per_alias = job['bouncer-products-per-alias']
product = job["shipping-product"]
current_version = release_config["version"]
bouncer_products_per_alias = job["bouncer-products-per-alias"]
entries = {
bouncer_alias: craft_bouncer_product_name(
product, bouncer_product, current_version,
product,
bouncer_product,
current_version,
)
for bouncer_alias, bouncer_product in bouncer_products_per_alias.items()
}
partner_bouncer_products_per_alias = job.get('partner-bouncer-products-per-alias')
partner_bouncer_products_per_alias = job.get("partner-bouncer-products-per-alias")
if partner_bouncer_products_per_alias:
partners = get_partners_to_be_published(config)
for partner, sub_config_name, _ in partners:
entries.update({
bouncer_alias.replace('PARTNER', '{}-{}'.format(partner, sub_config_name)):
craft_partner_bouncer_product_name(
product, bouncer_product, current_version, partner, sub_config_name)
for bouncer_alias, bouncer_product in partner_bouncer_products_per_alias.items()
})
entries.update(
{
bouncer_alias.replace(
"PARTNER", "{}-{}".format(partner, sub_config_name)
): craft_partner_bouncer_product_name(
product,
bouncer_product,
current_version,
partner,
sub_config_name,
)
for bouncer_alias, bouncer_product in partner_bouncer_products_per_alias.items() # NOQA: E501
}
)
return entries

View File

@ -9,7 +9,10 @@ from __future__ import absolute_import, print_function, unicode_literals
import os
from taskgraph.transforms.base import TransformSequence
from taskgraph.util.attributes import copy_attributes_from_dependent_job, sorted_unique_list
from taskgraph.util.attributes import (
copy_attributes_from_dependent_job,
sorted_unique_list,
)
from taskgraph.util.scriptworker import (
get_signing_cert_scope_per_platform,
)
@ -18,14 +21,15 @@ from taskgraph.util.taskcluster import get_artifact_prefix
from taskgraph.util.treeherder import join_symbol, inherit_treeherder_from_dep
import logging
logger = logging.getLogger(__name__)
SIGNING_FORMATS = {
'mar-signing-autograph-stage': {
'target.complete.mar': ['autograph_stage_mar384'],
"mar-signing-autograph-stage": {
"target.complete.mar": ["autograph_stage_mar384"],
},
'default': {
'target.complete.mar': ['autograph_hash_only_mar384'],
"default": {
"target.complete.mar": ["autograph_hash_only_mar384"],
},
}
@ -35,34 +39,36 @@ transforms = TransformSequence()
def generate_partials_artifacts(job, release_history, platform, locale=None):
artifact_prefix = get_artifact_prefix(job)
if locale:
artifact_prefix = '{}/{}'.format(artifact_prefix, locale)
artifact_prefix = "{}/{}".format(artifact_prefix, locale)
else:
locale = 'en-US'
locale = "en-US"
artifacts = get_partials_artifacts_from_params(release_history, platform, locale)
upstream_artifacts = [{
"taskId": {"task-reference": '<partials>'},
"taskType": 'partials',
"paths": [
"{}/{}".format(artifact_prefix, path)
for path, version in artifacts
# TODO Use mozilla-version to avoid comparing strings. Otherwise Firefox 100 will be
# considered smaller than Firefox 56
if version is None or version >= '56'
],
"formats": ["autograph_hash_only_mar384"],
}]
upstream_artifacts = [
{
"taskId": {"task-reference": "<partials>"},
"taskType": "partials",
"paths": [
"{}/{}".format(artifact_prefix, path)
for path, version in artifacts
# TODO Use mozilla-version to avoid comparing strings. Otherwise Firefox 100 will
# be considered smaller than Firefox 56
if version is None or version >= "56"
],
"formats": ["autograph_hash_only_mar384"],
}
]
old_mar_upstream_artifacts = {
"taskId": {"task-reference": '<partials>'},
"taskType": 'partials',
"taskId": {"task-reference": "<partials>"},
"taskType": "partials",
"paths": [
"{}/{}".format(artifact_prefix, path)
for path, version in artifacts
# TODO Use mozilla-version to avoid comparing strings. Otherwise Firefox 100 will be
# considered smaller than Firefox 56
if version is not None and version < '56'
if version is not None and version < "56"
],
"formats": ["mar"],
}
@ -76,16 +82,18 @@ def generate_partials_artifacts(job, release_history, platform, locale=None):
def generate_complete_artifacts(job, kind):
upstream_artifacts = []
if kind not in SIGNING_FORMATS:
kind = 'default'
kind = "default"
for artifact in job.release_artifacts:
basename = os.path.basename(artifact)
if basename in SIGNING_FORMATS[kind]:
upstream_artifacts.append({
"taskId": {"task-reference": '<{}>'.format(job.kind)},
"taskType": 'build',
"paths": [artifact],
"formats": SIGNING_FORMATS[kind][basename],
})
upstream_artifacts.append(
{
"taskId": {"task-reference": "<{}>".format(job.kind)},
"taskType": "build",
"paths": [artifact],
"formats": SIGNING_FORMATS[kind][basename],
}
)
return upstream_artifacts
@ -93,15 +101,15 @@ def generate_complete_artifacts(job, kind):
@transforms.add
def make_task_description(config, jobs):
for job in jobs:
dep_job = job['primary-dependency']
locale = dep_job.attributes.get('locale')
dep_job = job["primary-dependency"]
locale = dep_job.attributes.get("locale")
treeherder = inherit_treeherder_from_dep(job, dep_job)
treeherder.setdefault(
'symbol', join_symbol(job.get('treeherder-group', 'ms'), locale or 'N')
"symbol", join_symbol(job.get("treeherder-group", "ms"), locale or "N")
)
label = job.get('label', "{}-{}".format(config.kind, dep_job.label))
label = job.get("label", "{}-{}".format(config.kind, dep_job.label))
dependencies = {dep_job.kind: dep_job.label}
signing_dependencies = dep_job.dependencies
@ -110,25 +118,24 @@ def make_task_description(config, jobs):
dependencies.update(signing_dependencies)
attributes = copy_attributes_from_dependent_job(dep_job)
attributes['required_signoffs'] = sorted_unique_list(
attributes.get('required_signoffs', []),
job.pop('required_signoffs')
attributes["required_signoffs"] = sorted_unique_list(
attributes.get("required_signoffs", []), job.pop("required_signoffs")
)
attributes['shipping_phase'] = job['shipping-phase']
attributes["shipping_phase"] = job["shipping-phase"]
if locale:
attributes['locale'] = locale
attributes["locale"] = locale
build_platform = attributes.get('build_platform')
if config.kind == 'partials-signing':
build_platform = attributes.get("build_platform")
if config.kind == "partials-signing":
upstream_artifacts = generate_partials_artifacts(
dep_job, config.params['release_history'], build_platform, locale)
dep_job, config.params["release_history"], build_platform, locale
)
else:
upstream_artifacts = generate_complete_artifacts(dep_job, config.kind)
is_shippable = job.get(
'shippable', # First check current job
dep_job.attributes.get(
'shippable')) # Then dep job for 'shippable'
"shippable", dep_job.attributes.get("shippable") # First check current job
) # Then dep job for 'shippable'
signing_cert_scope = get_signing_cert_scope_per_platform(
build_platform, is_shippable, config
)
@ -136,19 +143,23 @@ def make_task_description(config, jobs):
scopes = [signing_cert_scope]
task = {
'label': label,
'description': "{} {}".format(
dep_job.description, job['description-suffix']),
'worker-type': job.get('worker-type', 'linux-signing'),
'worker': {'implementation': 'scriptworker-signing',
'upstream-artifacts': upstream_artifacts,
'max-run-time': 3600},
'dependencies': dependencies,
'attributes': attributes,
'scopes': scopes,
'run-on-projects': job.get('run-on-projects',
dep_job.attributes.get('run_on_projects')),
'treeherder': treeherder,
"label": label,
"description": "{} {}".format(
dep_job.description, job["description-suffix"]
),
"worker-type": job.get("worker-type", "linux-signing"),
"worker": {
"implementation": "scriptworker-signing",
"upstream-artifacts": upstream_artifacts,
"max-run-time": 3600,
},
"dependencies": dependencies,
"attributes": attributes,
"scopes": scopes,
"run-on-projects": job.get(
"run-on-projects", dep_job.attributes.get("run_on_projects")
),
"treeherder": treeherder,
}
yield task

View File

@ -12,22 +12,22 @@ from taskgraph.loader.single_dep import schema
from taskgraph.transforms.base import TransformSequence
from taskgraph.util.attributes import copy_attributes_from_dependent_job
from taskgraph.util.partners import get_partner_config_by_kind
from taskgraph.util.scriptworker import (
get_signing_cert_scope_per_platform,
)
from taskgraph.util.scriptworker import get_signing_cert_scope_per_platform
from taskgraph.util.taskcluster import get_artifact_path
from taskgraph.transforms.task import task_description_schema
from voluptuous import Optional
transforms = TransformSequence()
repackage_signing_description_schema = schema.extend({
Optional('label'): text_type,
Optional('extra'): object,
Optional('shipping-product'): task_description_schema['shipping-product'],
Optional('shipping-phase'): task_description_schema['shipping-phase'],
Optional('priority'): task_description_schema['priority'],
})
repackage_signing_description_schema = schema.extend(
{
Optional("label"): text_type,
Optional("extra"): object,
Optional("shipping-product"): task_description_schema["shipping-product"],
Optional("shipping-phase"): task_description_schema["shipping-phase"],
Optional("priority"): task_description_schema["priority"],
}
)
transforms.add_validate(repackage_signing_description_schema)
@ -35,29 +35,26 @@ transforms.add_validate(repackage_signing_description_schema)
@transforms.add
def make_repackage_signing_description(config, jobs):
for job in jobs:
dep_job = job['primary-dependency']
repack_id = dep_job.task['extra']['repack_id']
dep_job = job["primary-dependency"]
repack_id = dep_job.task["extra"]["repack_id"]
attributes = dep_job.attributes
build_platform = dep_job.attributes.get('build_platform')
is_shippable = dep_job.attributes.get('shippable')
build_platform = dep_job.attributes.get("build_platform")
is_shippable = dep_job.attributes.get("shippable")
# Mac & windows
label = dep_job.label.replace("repackage-", "repackage-signing-")
# Linux
label = label.replace("chunking-dummy-", "repackage-signing-")
description = (
"Signing of repackaged artifacts for partner repack id '{repack_id}' for build '"
"{build_platform}/{build_type}'".format(
repack_id=repack_id,
build_platform=attributes.get('build_platform'),
build_type=attributes.get('build_type')
)
description = "Signing of repackaged artifacts for partner repack id '{repack_id}' for build '" "{build_platform}/{build_type}'".format( # NOQA: E501
repack_id=repack_id,
build_platform=attributes.get("build_platform"),
build_type=attributes.get("build_type"),
)
if 'linux' in build_platform:
if "linux" in build_platform:
# we want the repack job, via the dependencies for the the chunking-dummy dep_job
for dep in dep_job.dependencies.values():
if dep.startswith('release-partner-repack'):
if dep.startswith("release-partner-repack"):
dependencies = {"repack": dep}
break
else:
@ -65,73 +62,90 @@ def make_repackage_signing_description(config, jobs):
dependencies = {"repackage": dep_job.label}
attributes = copy_attributes_from_dependent_job(dep_job)
attributes['repackage_type'] = 'repackage-signing'
attributes["repackage_type"] = "repackage-signing"
signing_cert_scope = get_signing_cert_scope_per_platform(
build_platform, is_shippable, config
)
scopes = [signing_cert_scope]
if 'win' in build_platform:
upstream_artifacts = [{
"taskId": {"task-reference": "<repackage>"},
"taskType": "repackage",
"paths": [
get_artifact_path(dep_job, "{}/target.installer.exe".format(repack_id)),
],
"formats": ["autograph_authenticode", "autograph_gpg"]
}]
partner_config = get_partner_config_by_kind(config, config.kind)
partner, subpartner, _ = repack_id.split('/')
repack_stub_installer = partner_config[partner][subpartner].get(
'repack_stub_installer')
if build_platform.startswith('win32') and repack_stub_installer:
upstream_artifacts.append({
if "win" in build_platform:
upstream_artifacts = [
{
"taskId": {"task-reference": "<repackage>"},
"taskType": "repackage",
"paths": [
get_artifact_path(dep_job, "{}/target.stub-installer.exe".format(
repack_id)),
get_artifact_path(
dep_job, "{}/target.installer.exe".format(repack_id)
),
],
"formats": ["autograph_authenticode", "autograph_gpg"]
})
elif 'mac' in build_platform:
upstream_artifacts = [{
"taskId": {"task-reference": "<repackage>"},
"taskType": "repackage",
"paths": [
get_artifact_path(dep_job, "{}/target.dmg".format(repack_id)),
],
"formats": ["autograph_gpg"]
}]
elif 'linux' in build_platform:
upstream_artifacts = [{
"taskId": {"task-reference": "<repack>"},
"taskType": "repackage",
"paths": [
get_artifact_path(dep_job, "{}/target.tar.bz2".format(repack_id)),
],
"formats": ["autograph_gpg"]
}]
"formats": ["autograph_authenticode", "autograph_gpg"],
}
]
partner_config = get_partner_config_by_kind(config, config.kind)
partner, subpartner, _ = repack_id.split("/")
repack_stub_installer = partner_config[partner][subpartner].get(
"repack_stub_installer"
)
if build_platform.startswith("win32") and repack_stub_installer:
upstream_artifacts.append(
{
"taskId": {"task-reference": "<repackage>"},
"taskType": "repackage",
"paths": [
get_artifact_path(
dep_job,
"{}/target.stub-installer.exe".format(repack_id),
),
],
"formats": ["autograph_authenticode", "autograph_gpg"],
}
)
elif "mac" in build_platform:
upstream_artifacts = [
{
"taskId": {"task-reference": "<repackage>"},
"taskType": "repackage",
"paths": [
get_artifact_path(dep_job, "{}/target.dmg".format(repack_id)),
],
"formats": ["autograph_gpg"],
}
]
elif "linux" in build_platform:
upstream_artifacts = [
{
"taskId": {"task-reference": "<repack>"},
"taskType": "repackage",
"paths": [
get_artifact_path(
dep_job, "{}/target.tar.bz2".format(repack_id)
),
],
"formats": ["autograph_gpg"],
}
]
task = {
'label': label,
'description': description,
'worker-type': 'linux-signing',
'worker': {'implementation': 'scriptworker-signing',
'upstream-artifacts': upstream_artifacts,
'max-run-time': 3600},
'scopes': scopes,
'dependencies': dependencies,
'attributes': attributes,
'run-on-projects': dep_job.attributes.get('run_on_projects'),
'extra': {
'repack_id': repack_id,
}
"label": label,
"description": description,
"worker-type": "linux-signing",
"worker": {
"implementation": "scriptworker-signing",
"upstream-artifacts": upstream_artifacts,
"max-run-time": 3600,
},
"scopes": scopes,
"dependencies": dependencies,
"attributes": attributes,
"run-on-projects": dep_job.attributes.get("run_on_projects"),
"extra": {
"repack_id": repack_id,
},
}
# we may have reduced the priority for partner jobs, otherwise task.py will set it
if job.get('priority'):
task['priority'] = job['priority']
if job.get("priority"):
task["priority"] = job["priority"]
yield task

View File

@ -1,4 +1,3 @@
from __future__ import absolute_import, unicode_literals, print_function
import io
@ -50,7 +49,7 @@ add_task(async function test_TODO() {
filename = os.path.basename(self.test)
if not os.path.isfile(manifest_file):
print('Could not open manifest file {}'.format(manifest_file))
print("Could not open manifest file {}".format(manifest_file))
return
write_to_ini_file(manifest_file, filename)
@ -64,22 +63,29 @@ class MochitestCreator(Creator):
def _get_template_contents(self):
mochitest_templates = os.path.abspath(
os.path.join(os.path.dirname(__file__), 'mochitest', 'static')
os.path.join(os.path.dirname(__file__), "mochitest", "static")
)
template_file_name = None
template_file_name = self.templates.get(self.suite)
if template_file_name is None:
print("Sorry, `addtest` doesn't currently know how to add {}".format(self.suite))
print(
"Sorry, `addtest` doesn't currently know how to add {}".format(
self.suite
)
)
return None
template_file_name = template_file_name % {"doc": self.doc}
template_file = os.path.join(mochitest_templates, template_file_name)
if not os.path.isfile(template_file):
print("Sorry, `addtest` doesn't currently know how to add {} with document type {}"
.format(self.suite, self.doc))
print(
"Sorry, `addtest` doesn't currently know how to add {} with document type {}".format( # NOQA: E501
self.suite, self.doc
)
)
return None
with open(template_file) as f:
@ -90,13 +96,13 @@ class MochitestCreator(Creator):
guessed_ini = {
"mochitest-plain": "mochitest.ini",
"mochitest-chrome": "chrome.ini",
"mochitest-browser-chrome": "browser.ini"
"mochitest-browser-chrome": "browser.ini",
}[self.suite]
manifest_file = os.path.join(os.path.dirname(self.test), guessed_ini)
filename = os.path.basename(self.test)
if not os.path.isfile(manifest_file):
print('Could not open manifest file {}'.format(manifest_file))
print("Could not open manifest file {}".format(manifest_file))
return
write_to_ini_file(manifest_file, filename)
@ -135,20 +141,32 @@ class WebPlatformTestsCreator(Creator):
@classmethod
def get_parser(cls, parser):
parser.add_argument("--long-timeout", action="store_true",
help="Test should be given a long timeout "
"(typically 60s rather than 10s, but varies depending on environment)")
parser.add_argument("-m", "--reference", dest="ref", help="Path to the reference file")
parser.add_argument("--mismatch", action="store_true",
help="Create a mismatch reftest")
parser.add_argument("--wait", action="store_true",
help="Create a reftest that waits until takeScreenshot() is called")
parser.add_argument(
"--long-timeout",
action="store_true",
help="Test should be given a long timeout "
"(typically 60s rather than 10s, but varies depending on environment)",
)
parser.add_argument(
"-m", "--reference", dest="ref", help="Path to the reference file"
)
parser.add_argument(
"--mismatch", action="store_true", help="Create a mismatch reftest"
)
parser.add_argument(
"--wait",
action="store_true",
help="Create a reftest that waits until takeScreenshot() is called",
)
def check_args(self):
if self.wpt_type(self.test) is None:
print("""Test path %s is not in wpt directories:
print(
"""Test path %s is not in wpt directories:
testing/web-platform/tests for tests that may be shared
testing/web-platform/mozilla/tests for Gecko-only tests""" % self.test)
testing/web-platform/mozilla/tests for Gecko-only tests"""
% self.test
)
return False
if not self.reftest:
@ -178,8 +196,11 @@ testing/web-platform/mozilla/tests for Gecko-only tests""" % self.test)
yield (ref_path, self._get_template_contents(reference=True))
def _get_template_contents(self, reference=False):
args = {"documentElement": "<html class=reftest-wait>\n"
if self.kwargs["wait"] else ""}
args = {
"documentElement": "<html class=reftest-wait>\n"
if self.kwargs["wait"]
else ""
}
if self.test.rsplit(".", 1)[1] == "js":
template = self.template_js
@ -192,9 +213,14 @@ testing/web-platform/mozilla/tests for Gecko-only tests""" % self.test)
if self.reftest:
if not reference:
args = {"match": "match" if not self.kwargs["mismatch"] else "mismatch",
"ref": (self.ref_url(self.kwargs["ref"])
if self.kwargs["ref"] else '""')}
args = {
"match": "match" if not self.kwargs["mismatch"] else "mismatch",
"ref": (
self.ref_url(self.kwargs["ref"])
if self.kwargs["ref"]
else '""'
),
}
template += self.template_body_reftest % args
if self.kwargs["wait"]:
template += self.template_body_reftest_wait
@ -236,7 +262,7 @@ testing/web-platform/mozilla/tests for Gecko-only tests""" % self.test)
# Path is an absolute URL relative to the tests root
if path.startswith("/_mozilla/"):
base = self.local_path
path = path[len("/_mozilla/"):]
path = path[len("/_mozilla/") :]
else:
base = self.upstream_path
path = path[1:]
@ -249,7 +275,8 @@ testing/web-platform/mozilla/tests for Gecko-only tests""" % self.test)
return path
else:
test_rel_path = self.src_rel_path(
os.path.join(os.path.dirname(self.test), path))
os.path.join(os.path.dirname(self.test), path)
)
if self.wpt_type(test_rel_path) is not None:
return test_rel_path
# Returning None indicates that the path wasn't valid
@ -288,36 +315,38 @@ def write_to_ini_file(manifest_file, filename):
manifest = manifestparser.TestManifest(manifests=[manifest_file])
insert_before = None
if any(t['name'] == filename for t in manifest.tests):
if any(t["name"] == filename for t in manifest.tests):
print("{} is already in the manifest.".format(filename))
return
for test in manifest.tests:
if test.get('name') > filename:
insert_before = test.get('name')
if test.get("name") > filename:
insert_before = test.get("name")
break
with open(manifest_file, "r") as f:
contents = f.readlines()
filename = '[{}]\n'.format(filename)
filename = "[{}]\n".format(filename)
if not insert_before:
contents.append(filename)
else:
insert_before = '[{}]'.format(insert_before)
insert_before = "[{}]".format(insert_before)
for i in range(len(contents)):
if contents[i].startswith(insert_before):
contents.insert(i, filename)
break
with io.open(manifest_file, "w", newline='\n') as f:
with io.open(manifest_file, "w", newline="\n") as f:
f.write("".join(contents))
TEST_CREATORS = {"mochitest": MochitestCreator,
"web-platform-tests": WebPlatformTestsCreator,
"xpcshell": XpcshellCreator}
TEST_CREATORS = {
"mochitest": MochitestCreator,
"web-platform-tests": WebPlatformTestsCreator,
"xpcshell": XpcshellCreator,
}
def creator_for_suite(suite):

File diff suppressed because it is too large Load Diff

View File

@ -8,12 +8,11 @@ import time
def test_macintelpower_init(macintelpower_obj):
"""Tests that the MacIntelPower object is correctly initialized.
"""
"""Tests that the MacIntelPower object is correctly initialized."""
assert macintelpower_obj.ipg_path
assert macintelpower_obj.ipg
assert macintelpower_obj._os == 'darwin'
assert macintelpower_obj._cpu == 'intel'
assert macintelpower_obj._os == "darwin"
assert macintelpower_obj._cpu == "intel"
def test_macintelpower_measuring(macintelpower_obj):
@ -43,12 +42,12 @@ def test_macintelpower_measuring(macintelpower_obj):
return test_data
with mock.patch(
'mozpower.intel_power_gadget.IPGResultsHandler.clean_ipg_data'
) as _:
"mozpower.intel_power_gadget.IPGResultsHandler.clean_ipg_data"
) as _:
with mock.patch(
'mozpower.intel_power_gadget.IPGResultsHandler.'
'format_ipg_data_to_partial_perfherder'
) as formatter:
"mozpower.intel_power_gadget.IPGResultsHandler."
"format_ipg_data_to_partial_perfherder"
) as formatter:
formatter.side_effect = formatter_side_effect
macintelpower_obj.finalize_power_measurements(wait_interval=2, timeout=30)
@ -63,17 +62,16 @@ def test_macintelpower_measuring(macintelpower_obj):
# Check that the IPGResultHandler's methods were
# called
macintelpower_obj.ipg_results_handler. \
clean_ipg_data.assert_called()
macintelpower_obj.ipg_results_handler. \
format_ipg_data_to_partial_perfherder.assert_called_once_with(
macintelpower_obj.end_time - macintelpower_obj.start_time, 'power-testing'
)
macintelpower_obj.ipg_results_handler.clean_ipg_data.assert_called()
macintelpower_obj.ipg_results_handler.format_ipg_data_to_partial_perfherder.assert_called_once_with( # NOQA: E501
macintelpower_obj.end_time - macintelpower_obj.start_time,
"power-testing",
)
# Make sure we can get the expected perfherder data
# after formatting
assert macintelpower_obj.get_perfherder_data() == test_data
if __name__ == '__main__':
if __name__ == "__main__":
mozunit.main()

File diff suppressed because it is too large Load Diff

View File

@ -19,17 +19,21 @@ from mozharness.base.log import INFO, WARNING, ERROR
# ErrorLists {{{1
_mochitest_summary = {
'regex': re.compile(r'''(\d+ INFO (Passed|Failed|Todo):\ +(\d+)|\t(Passed|Failed|Todo): (\d+))'''), # NOQA: E501
'pass_group': "Passed",
'fail_group': "Failed",
'known_fail_group': "Todo",
"regex": re.compile(
r"""(\d+ INFO (Passed|Failed|Todo):\ +(\d+)|\t(Passed|Failed|Todo): (\d+))"""
), # NOQA: E501
"pass_group": "Passed",
"fail_group": "Failed",
"known_fail_group": "Todo",
}
_reftest_summary = {
'regex': re.compile(r'''REFTEST INFO \| (Successful|Unexpected|Known problems): (\d+) \('''), # NOQA: E501
'pass_group': "Successful",
'fail_group': "Unexpected",
'known_fail_group': "Known problems",
"regex": re.compile(
r"""REFTEST INFO \| (Successful|Unexpected|Known problems): (\d+) \("""
), # NOQA: E501
"pass_group": "Successful",
"fail_group": "Unexpected",
"known_fail_group": "Known problems",
}
TinderBoxPrintRe = {
@ -44,93 +48,129 @@ TinderBoxPrintRe = {
"mochitest-plain_summary": _mochitest_summary,
"mochitest-plain-gpu_summary": _mochitest_summary,
"marionette_summary": {
'regex': re.compile(r'''(passed|failed|todo):\ +(\d+)'''),
'pass_group': "passed",
'fail_group': "failed",
'known_fail_group': "todo",
"regex": re.compile(r"""(passed|failed|todo):\ +(\d+)"""),
"pass_group": "passed",
"fail_group": "failed",
"known_fail_group": "todo",
},
"reftest_summary": _reftest_summary,
"reftest-qr_summary": _reftest_summary,
"crashtest_summary": _reftest_summary,
"crashtest-qr_summary": _reftest_summary,
"xpcshell_summary": {
'regex': re.compile(r'''INFO \| (Passed|Failed|Todo): (\d+)'''),
'pass_group': "Passed",
'fail_group': "Failed",
'known_fail_group': "Todo",
"regex": re.compile(r"""INFO \| (Passed|Failed|Todo): (\d+)"""),
"pass_group": "Passed",
"fail_group": "Failed",
"known_fail_group": "Todo",
},
"jsreftest_summary": _reftest_summary,
"instrumentation_summary": _mochitest_summary,
"cppunittest_summary": {
'regex': re.compile(r'''cppunittests INFO \| (Passed|Failed): (\d+)'''),
'pass_group': "Passed",
'fail_group': "Failed",
'known_fail_group': None,
"regex": re.compile(r"""cppunittests INFO \| (Passed|Failed): (\d+)"""),
"pass_group": "Passed",
"fail_group": "Failed",
"known_fail_group": None,
},
"gtest_summary": {
'regex': re.compile(r'''(Passed|Failed): (\d+)'''),
'pass_group': "Passed",
'fail_group': "Failed",
'known_fail_group': None,
"regex": re.compile(r"""(Passed|Failed): (\d+)"""),
"pass_group": "Passed",
"fail_group": "Failed",
"known_fail_group": None,
},
"jittest_summary": {
'regex': re.compile(r'''(Passed|Failed): (\d+)'''),
'pass_group': "Passed",
'fail_group': "Failed",
'known_fail_group': None,
"regex": re.compile(r"""(Passed|Failed): (\d+)"""),
"pass_group": "Passed",
"fail_group": "Failed",
"known_fail_group": None,
},
"mozbase_summary": {
'regex': re.compile(r'''(OK)|(FAILED) \(errors=(\d+)'''),
'pass_group': "OK",
'fail_group': "FAILED",
'known_fail_group': None,
"regex": re.compile(r"""(OK)|(FAILED) \(errors=(\d+)"""),
"pass_group": "OK",
"fail_group": "FAILED",
"known_fail_group": None,
},
"geckoview_summary": {
'regex': re.compile(r'''(Passed|Failed): (\d+)'''),
'pass_group': "Passed",
'fail_group': "Failed",
'known_fail_group': None,
"regex": re.compile(r"""(Passed|Failed): (\d+)"""),
"pass_group": "Passed",
"fail_group": "Failed",
"known_fail_group": None,
},
"geckoview-junit_summary": {
'regex': re.compile(r'''(Passed|Failed): (\d+)'''),
'pass_group': "Passed",
'fail_group': "Failed",
'known_fail_group': None,
"regex": re.compile(r"""(Passed|Failed): (\d+)"""),
"pass_group": "Passed",
"fail_group": "Failed",
"known_fail_group": None,
},
"harness_error": {
'full_regex': re.compile(r"(?:TEST-UNEXPECTED-FAIL|PROCESS-CRASH) \| .* \|[^\|]* (application crashed|missing output line for total leaks!|negative leaks caught!|\d+ bytes leaked)"), # NOQA: E501
'minimum_regex': re.compile(r'''(TEST-UNEXPECTED|PROCESS-CRASH)'''),
'retry_regex': re.compile(r'''(FAIL-SHOULD-RETRY|No space left on device|ADBError|ADBProcessError|ADBTimeoutError|program finished with exit code 80|INFRA-ERROR)''') # NOQA: E501
"full_regex": re.compile(
r"(?:TEST-UNEXPECTED-FAIL|PROCESS-CRASH) \| .* \|[^\|]* (application crashed|missing output line for total leaks!|negative leaks caught!|\d+ bytes leaked)" # NOQA: E501
),
"minimum_regex": re.compile(r"""(TEST-UNEXPECTED|PROCESS-CRASH)"""),
"retry_regex": re.compile(
r"""(FAIL-SHOULD-RETRY|No space left on device|ADBError|ADBProcessError|ADBTimeoutError|program finished with exit code 80|INFRA-ERROR)""" # NOQA: E501
),
},
}
TestPassed = [
{'regex': re.compile('''(TEST-INFO|TEST-KNOWN-FAIL|TEST-PASS|INFO \| )'''), 'level': INFO},
{
"regex": re.compile("""(TEST-INFO|TEST-KNOWN-FAIL|TEST-PASS|INFO \| )"""),
"level": INFO,
},
]
BaseHarnessErrorList = [
{'substr': 'TEST-UNEXPECTED', 'level': ERROR, },
{'substr': 'PROCESS-CRASH', 'level': ERROR, },
{'regex': re.compile('''ERROR: (Address|Leak)Sanitizer'''), 'level': ERROR, },
{'regex': re.compile('''thread '([^']+)' panicked'''), 'level': ERROR, },
{'substr': 'pure virtual method called', 'level': ERROR, },
{'substr': 'Pure virtual function called!', 'level': ERROR, },
{
"substr": "TEST-UNEXPECTED",
"level": ERROR,
},
{
"substr": "PROCESS-CRASH",
"level": ERROR,
},
{
"regex": re.compile("""ERROR: (Address|Leak)Sanitizer"""),
"level": ERROR,
},
{
"regex": re.compile("""thread '([^']+)' panicked"""),
"level": ERROR,
},
{
"substr": "pure virtual method called",
"level": ERROR,
},
{
"substr": "Pure virtual function called!",
"level": ERROR,
},
]
HarnessErrorList = BaseHarnessErrorList + [
{'substr': 'A content process crashed', 'level': ERROR, },
{
"substr": "A content process crashed",
"level": ERROR,
},
]
# wpt can have expected crashes so we can't always turn treeherder orange in those cases
WptHarnessErrorList = BaseHarnessErrorList
LogcatErrorList = [
{'substr': 'Fatal signal 11 (SIGSEGV)', 'level': ERROR,
'explanation': 'This usually indicates the B2G process has crashed'},
{'substr': 'Fatal signal 7 (SIGBUS)', 'level': ERROR,
'explanation': 'This usually indicates the B2G process has crashed'},
{'substr': '[JavaScript Error:', 'level': WARNING},
{'substr': 'seccomp sandbox violation', 'level': ERROR,
'explanation': 'A content process has violated the system call sandbox (bug 790923)'},
{
"substr": "Fatal signal 11 (SIGSEGV)",
"level": ERROR,
"explanation": "This usually indicates the B2G process has crashed",
},
{
"substr": "Fatal signal 7 (SIGBUS)",
"level": ERROR,
"explanation": "This usually indicates the B2G process has crashed",
},
{"substr": "[JavaScript Error:", "level": WARNING},
{
"substr": "seccomp sandbox violation",
"level": ERROR,
"explanation": "A content process has violated the system call sandbox (bug 790923)",
},
]

File diff suppressed because it is too large Load Diff

View File

@ -24,86 +24,127 @@ from mozharness.mozilla.automation import AutomationMixin, TBPL_WARNING
from mozharness.mozilla.structuredlog import StructuredOutputParser
from mozharness.mozilla.testing.unittest import DesktopUnittestOutputParser
from mozharness.mozilla.testing.try_tools import TryToolsMixin, try_config_options
from mozharness.mozilla.testing.verify_tools import VerifyToolsMixin, verify_config_options
from mozharness.mozilla.testing.verify_tools import (
VerifyToolsMixin,
verify_config_options,
)
from mozharness.mozilla.tooltool import TooltoolMixin
from mozharness.lib.python.authentication import get_credentials
INSTALLER_SUFFIXES = ('.apk', # Android
'.tar.bz2', '.tar.gz', # Linux
'.dmg', # Mac
'.installer-stub.exe', '.installer.exe', '.exe', '.zip', # Windows
)
INSTALLER_SUFFIXES = (
".apk", # Android
".tar.bz2",
".tar.gz", # Linux
".dmg", # Mac
".installer-stub.exe",
".installer.exe",
".exe",
".zip", # Windows
)
# https://searchfox.org/mozilla-central/source/testing/config/tooltool-manifests
TOOLTOOL_PLATFORM_DIR = {
'linux': 'linux32',
'linux64': 'linux64',
'win32': 'win32',
'win64': 'win32',
'macosx': 'macosx64',
"linux": "linux32",
"linux64": "linux64",
"win32": "win32",
"win64": "win32",
"macosx": "macosx64",
}
testing_config_options = [
[["--installer-url"],
{"action": "store",
"dest": "installer_url",
"default": None,
"help": "URL to the installer to install",
}],
[["--installer-path"],
{"action": "store",
"dest": "installer_path",
"default": None,
"help": "Path to the installer to install. "
"This is set automatically if run with --download-and-extract.",
}],
[["--binary-path"],
{"action": "store",
"dest": "binary_path",
"default": None,
"help": "Path to installed binary. This is set automatically if run with --install.",
}],
[["--exe-suffix"],
{"action": "store",
"dest": "exe_suffix",
"default": None,
"help": "Executable suffix for binaries on this platform",
}],
[["--test-url"],
{"action": "store",
"dest": "test_url",
"default": None,
"help": "URL to the zip file containing the actual tests",
}],
[["--test-packages-url"],
{"action": "store",
"dest": "test_packages_url",
"default": None,
"help": "URL to a json file describing which tests archives to download",
}],
[["--jsshell-url"],
{"action": "store",
"dest": "jsshell_url",
"default": None,
"help": "URL to the jsshell to install",
}],
[["--download-symbols"],
{"action": "store",
"dest": "download_symbols",
"type": "choice",
"choices": ['ondemand', 'true'],
"help": "Download and extract crash reporter symbols.",
}],
] + copy.deepcopy(virtualenv_config_options) \
+ copy.deepcopy(try_config_options) \
+ copy.deepcopy(verify_config_options)
testing_config_options = (
[
[
["--installer-url"],
{
"action": "store",
"dest": "installer_url",
"default": None,
"help": "URL to the installer to install",
},
],
[
["--installer-path"],
{
"action": "store",
"dest": "installer_path",
"default": None,
"help": "Path to the installer to install. "
"This is set automatically if run with --download-and-extract.",
},
],
[
["--binary-path"],
{
"action": "store",
"dest": "binary_path",
"default": None,
"help": "Path to installed binary. This is set automatically if run with --install.", # NOQA: E501
},
],
[
["--exe-suffix"],
{
"action": "store",
"dest": "exe_suffix",
"default": None,
"help": "Executable suffix for binaries on this platform",
},
],
[
["--test-url"],
{
"action": "store",
"dest": "test_url",
"default": None,
"help": "URL to the zip file containing the actual tests",
},
],
[
["--test-packages-url"],
{
"action": "store",
"dest": "test_packages_url",
"default": None,
"help": "URL to a json file describing which tests archives to download",
},
],
[
["--jsshell-url"],
{
"action": "store",
"dest": "jsshell_url",
"default": None,
"help": "URL to the jsshell to install",
},
],
[
["--download-symbols"],
{
"action": "store",
"dest": "download_symbols",
"type": "choice",
"choices": ["ondemand", "true"],
"help": "Download and extract crash reporter symbols.",
},
],
]
+ copy.deepcopy(virtualenv_config_options)
+ copy.deepcopy(try_config_options)
+ copy.deepcopy(verify_config_options)
)
# TestingMixin {{{1
class TestingMixin(VirtualenvMixin, AutomationMixin, ResourceMonitoringMixin,
TooltoolMixin, TryToolsMixin, VerifyToolsMixin):
class TestingMixin(
VirtualenvMixin,
AutomationMixin,
ResourceMonitoringMixin,
TooltoolMixin,
TryToolsMixin,
VerifyToolsMixin,
):
"""
The steps to identify + download the proper bits for [browser] unit
tests and Talos.
@ -130,14 +171,16 @@ class TestingMixin(VirtualenvMixin, AutomationMixin, ResourceMonitoringMixin,
elif self.installer_url:
reference_url = self.installer_url
else:
self.fatal("Can't figure out build directory urls without an installer_url "
"or test_packages_url!")
self.fatal(
"Can't figure out build directory urls without an installer_url "
"or test_packages_url!"
)
reference_url = urllib.parse.unquote(reference_url)
parts = list(urlparse(reference_url))
last_slash = parts[2].rfind('/')
parts[2] = '/'.join([parts[2][:last_slash], file_name])
last_slash = parts[2].rfind("/")
parts[2] = "/".join([parts[2][:last_slash], file_name])
url = ParseResult(*parts).geturl()
@ -148,19 +191,21 @@ class TestingMixin(VirtualenvMixin, AutomationMixin, ResourceMonitoringMixin,
in the build upload directory where that file can be found.
"""
if self.test_packages_url:
reference_suffixes = ['.test_packages.json']
reference_suffixes = [".test_packages.json"]
reference_url = self.test_packages_url
elif self.installer_url:
reference_suffixes = INSTALLER_SUFFIXES
reference_url = self.installer_url
else:
self.fatal("Can't figure out build directory urls without an installer_url "
"or test_packages_url!")
self.fatal(
"Can't figure out build directory urls without an installer_url "
"or test_packages_url!"
)
url = None
for reference_suffix in reference_suffixes:
if reference_url.endswith(reference_suffix):
url = reference_url[:-len(reference_suffix)] + suffix
url = reference_url[: -len(reference_suffix)] + suffix
break
return url
@ -170,7 +215,9 @@ class TestingMixin(VirtualenvMixin, AutomationMixin, ResourceMonitoringMixin,
return self.symbols_url
elif self.installer_url:
symbols_url = self.query_prefixed_build_dir_url('.crashreporter-symbols.zip')
symbols_url = self.query_prefixed_build_dir_url(
".crashreporter-symbols.zip"
)
# Check if the URL exists. If not, use none to allow mozcrash to auto-check for symbols
try:
@ -178,33 +225,39 @@ class TestingMixin(VirtualenvMixin, AutomationMixin, ResourceMonitoringMixin,
self._urlopen(symbols_url, timeout=120)
self.symbols_url = symbols_url
except Exception as ex:
self.warning("Cannot open symbols url %s (installer url: %s): %s" %
(symbols_url, self.installer_url, ex))
self.warning(
"Cannot open symbols url %s (installer url: %s): %s"
% (symbols_url, self.installer_url, ex)
)
if raise_on_failure:
raise
# If no symbols URL can be determined let minidump_stackwalk query the symbols.
# As of now this only works for Nightly and release builds.
if not self.symbols_url:
self.warning("No symbols_url found. Let minidump_stackwalk query for symbols.")
self.warning(
"No symbols_url found. Let minidump_stackwalk query for symbols."
)
return self.symbols_url
def _pre_config_lock(self, rw_config):
for i, (target_file, target_dict) in enumerate(rw_config.all_cfg_files_and_dicts):
if 'developer_config' in target_file:
for i, (target_file, target_dict) in enumerate(
rw_config.all_cfg_files_and_dicts
):
if "developer_config" in target_file:
self._developer_mode_changes(rw_config)
def _developer_mode_changes(self, rw_config):
""" This function is called when you append the config called
developer_config.py. This allows you to run a job
outside of the Release Engineering infrastructure.
"""This function is called when you append the config called
developer_config.py. This allows you to run a job
outside of the Release Engineering infrastructure.
What this functions accomplishes is:
* --installer-url is set
* --test-url is set if needed
* every url is substituted by another external to the
Release Engineering network
What this functions accomplishes is:
* --installer-url is set
* --test-url is set if needed
* every url is substituted by another external to the
Release Engineering network
"""
c = self.config
orig_config = copy.deepcopy(c)
@ -221,15 +274,19 @@ class TestingMixin(VirtualenvMixin, AutomationMixin, ResourceMonitoringMixin,
if c.get("installer_url") is None:
self.exception("You must use --installer-url with developer_config.py")
if c.get("require_test_zip"):
if not c.get('test_url') and not c.get('test_packages_url'):
self.exception("You must use --test-url or --test-packages-url with "
"developer_config.py")
if not c.get("test_url") and not c.get("test_packages_url"):
self.exception(
"You must use --test-url or --test-packages-url with "
"developer_config.py"
)
c["installer_url"] = _replace_url(c["installer_url"], c["replace_urls"])
if c.get("test_url"):
c["test_url"] = _replace_url(c["test_url"], c["replace_urls"])
if c.get("test_packages_url"):
c["test_packages_url"] = _replace_url(c["test_packages_url"], c["replace_urls"])
c["test_packages_url"] = _replace_url(
c["test_packages_url"], c["replace_urls"]
)
for key, value in self.config.items():
if type(value) == str and value.startswith("http"):
@ -240,17 +297,19 @@ class TestingMixin(VirtualenvMixin, AutomationMixin, ResourceMonitoringMixin,
get_credentials()
def _urlopen(self, url, **kwargs):
'''
"""
This function helps dealing with downloading files while outside
of the releng network.
'''
"""
# Code based on http://code.activestate.com/recipes/305288-http-basic-authentication
def _urlopen_basic_auth(url, **kwargs):
self.info("We want to download this file %s" % url)
if not hasattr(self, "https_username"):
self.info("NOTICE: Files downloaded from outside of "
"Release Engineering network require LDAP "
"credentials.")
self.info(
"NOTICE: Files downloaded from outside of "
"Release Engineering network require LDAP "
"credentials."
)
self.https_username, self.https_password = get_credentials()
# This creates a password manager
@ -268,7 +327,9 @@ class TestingMixin(VirtualenvMixin, AutomationMixin, ResourceMonitoringMixin,
return _urlopen_basic_auth(url, **kwargs)
else:
# windows certificates need to be refreshed (https://bugs.python.org/issue36011)
if self.platform_name() in ('win64',) and platform.architecture()[0] in ('x64',):
if self.platform_name() in ("win64",) and platform.architecture()[0] in (
"x64",
):
if self.ssl_context is None:
self.ssl_context = ssl.SSLContext(ssl.PROTOCOL_TLS)
self.ssl_context.load_default_certs()
@ -287,9 +348,11 @@ class TestingMixin(VirtualenvMixin, AutomationMixin, ResourceMonitoringMixin,
You can set this by specifying --installer-url URL
"""
if (self.config.get("require_test_zip") and
not self.test_url and
not self.test_packages_url):
if (
self.config.get("require_test_zip")
and not self.test_url
and not self.test_packages_url
):
message += """test_url isn't set!
You can set this by specifying --test-url URL
@ -299,17 +362,18 @@ You can set this by specifying --test-url URL
def _read_packages_manifest(self):
dirs = self.query_abs_dirs()
source = self.download_file(self.test_packages_url,
parent_dir=dirs['abs_work_dir'],
error_level=FATAL)
source = self.download_file(
self.test_packages_url, parent_dir=dirs["abs_work_dir"], error_level=FATAL
)
with self.opened(os.path.realpath(source)) as (fh, err):
package_requirements = json.load(fh)
if not package_requirements or err:
self.fatal("There was an error reading test package requirements from %s "
"requirements: `%s` - error: `%s`" % (source,
package_requirements or 'None',
err or 'No error'))
self.fatal(
"There was an error reading test package requirements from %s "
"requirements: `%s` - error: `%s`"
% (source, package_requirements or "None", err or "No error")
)
return package_requirements
def _download_test_packages(self, suite_categories, extract_dirs):
@ -317,29 +381,30 @@ You can set this by specifying --test-url URL
# This is a difference in the convention of the configs more than
# to how these tests are run, so we pave over these differences here.
aliases = {
'mochitest-chrome': 'mochitest',
'mochitest-media': 'mochitest',
'mochitest-plain': 'mochitest',
'mochitest-plain-gpu': 'mochitest',
'mochitest-webgl1-core': 'mochitest',
'mochitest-webgl1-ext': 'mochitest',
'mochitest-webgl2-core': 'mochitest',
'mochitest-webgl2-ext': 'mochitest',
'mochitest-webgl2-deqp': 'mochitest',
'mochitest-webgpu': 'mochitest',
'geckoview': 'mochitest',
'geckoview-junit': 'mochitest',
'reftest-qr': 'reftest',
'crashtest': 'reftest',
'crashtest-qr': 'reftest',
'reftest-debug': 'reftest',
'crashtest-debug': 'reftest',
"mochitest-chrome": "mochitest",
"mochitest-media": "mochitest",
"mochitest-plain": "mochitest",
"mochitest-plain-gpu": "mochitest",
"mochitest-webgl1-core": "mochitest",
"mochitest-webgl1-ext": "mochitest",
"mochitest-webgl2-core": "mochitest",
"mochitest-webgl2-ext": "mochitest",
"mochitest-webgl2-deqp": "mochitest",
"mochitest-webgpu": "mochitest",
"geckoview": "mochitest",
"geckoview-junit": "mochitest",
"reftest-qr": "reftest",
"crashtest": "reftest",
"crashtest-qr": "reftest",
"reftest-debug": "reftest",
"crashtest-debug": "reftest",
}
suite_categories = [aliases.get(name, name) for name in suite_categories]
dirs = self.query_abs_dirs()
test_install_dir = dirs.get('abs_test_install_dir',
os.path.join(dirs['abs_work_dir'], 'tests'))
test_install_dir = dirs.get(
"abs_test_install_dir", os.path.join(dirs["abs_work_dir"], "tests")
)
self.mkdir_p(test_install_dir)
package_requirements = self._read_packages_manifest()
target_packages = []
@ -360,83 +425,100 @@ You can set this by specifying --test-url URL
else:
# If we don't harness specific requirements, assume the common zip
# has everything we need to run tests for this suite.
target_packages.extend(package_requirements['common'])
target_packages.extend(package_requirements["common"])
# eliminate duplicates -- no need to download anything twice
target_packages = list(set(target_packages))
self.info("Downloading packages: %s for test suite categories: %s" %
(target_packages, suite_categories))
self.info(
"Downloading packages: %s for test suite categories: %s"
% (target_packages, suite_categories)
)
for file_name in target_packages:
target_dir = test_install_dir
unpack_dirs = extract_dirs
if "common.tests" in file_name and isinstance(unpack_dirs, list):
# Ensure that the following files are always getting extracted
required_files = ["mach",
"mozinfo.json",
]
required_files = [
"mach",
"mozinfo.json",
]
for req_file in required_files:
if req_file not in unpack_dirs:
self.info("Adding '{}' for extraction from common.tests archive"
.format(req_file))
self.info(
"Adding '{}' for extraction from common.tests archive".format(
req_file
)
)
unpack_dirs.append(req_file)
if "jsshell-" in file_name or file_name == "target.jsshell.zip":
self.info("Special-casing the jsshell zip file")
unpack_dirs = None
target_dir = dirs['abs_test_bin_dir']
target_dir = dirs["abs_test_bin_dir"]
if "web-platform" in file_name:
self.info("Extracting everything from web-platform archive")
unpack_dirs = None
url = self.query_build_dir_url(file_name)
self.download_unpack(url, target_dir,
extract_dirs=unpack_dirs)
self.download_unpack(url, target_dir, extract_dirs=unpack_dirs)
def _download_test_zip(self, extract_dirs=None):
dirs = self.query_abs_dirs()
test_install_dir = dirs.get('abs_test_install_dir',
os.path.join(dirs['abs_work_dir'], 'tests'))
self.download_unpack(self.test_url, test_install_dir,
extract_dirs=extract_dirs)
test_install_dir = dirs.get(
"abs_test_install_dir", os.path.join(dirs["abs_work_dir"], "tests")
)
self.download_unpack(self.test_url, test_install_dir, extract_dirs=extract_dirs)
def structured_output(self, suite_category):
"""Defines whether structured logging is in use in this configuration. This
may need to be replaced with data from a different config at the resolution
of bug 1070041 and related bugs.
"""
return ('structured_suites' in self.config and
suite_category in self.config['structured_suites'])
return (
"structured_suites" in self.config
and suite_category in self.config["structured_suites"]
)
def get_test_output_parser(self, suite_category, strict=False,
fallback_parser_class=DesktopUnittestOutputParser,
**kwargs):
def get_test_output_parser(
self,
suite_category,
strict=False,
fallback_parser_class=DesktopUnittestOutputParser,
**kwargs
):
"""Derive and return an appropriate output parser, either the structured
output parser or a fallback based on the type of logging in use as determined by
configuration.
"""
if not self.structured_output(suite_category):
if fallback_parser_class is DesktopUnittestOutputParser:
return DesktopUnittestOutputParser(suite_category=suite_category, **kwargs)
return DesktopUnittestOutputParser(
suite_category=suite_category, **kwargs
)
return fallback_parser_class(**kwargs)
self.info("Structured output parser in use for %s." % suite_category)
return StructuredOutputParser(suite_category=suite_category, strict=strict, **kwargs)
return StructuredOutputParser(
suite_category=suite_category, strict=strict, **kwargs
)
def _download_installer(self):
file_name = None
if self.installer_path:
file_name = self.installer_path
dirs = self.query_abs_dirs()
source = self.download_file(self.installer_url,
file_name=file_name,
parent_dir=dirs['abs_work_dir'],
error_level=FATAL)
source = self.download_file(
self.installer_url,
file_name=file_name,
parent_dir=dirs["abs_work_dir"],
error_level=FATAL,
)
self.installer_path = os.path.realpath(source)
def _download_and_extract_symbols(self):
dirs = self.query_abs_dirs()
if self.config.get('download_symbols') == 'ondemand':
if self.config.get("download_symbols") == "ondemand":
self.symbols_url = self.query_symbols_url()
self.symbols_path = self.symbols_url
return
@ -447,13 +529,13 @@ You can set this by specifying --test-url URL
# before being unable to proceed (e.g. debug tests need symbols)
self.symbols_url = self.retry(
action=self.query_symbols_url,
kwargs={'raise_on_failure': True},
kwargs={"raise_on_failure": True},
sleeptime=20,
error_level=FATAL,
error_message="We can't proceed without downloading symbols.",
)
if not self.symbols_path:
self.symbols_path = os.path.join(dirs['abs_work_dir'], 'symbols')
self.symbols_path = os.path.join(dirs["abs_work_dir"], "symbols")
if self.symbols_url:
self.download_unpack(self.symbols_url, self.symbols_path)
@ -466,20 +548,22 @@ You can set this by specifying --test-url URL
# See bug 957502 and friends
from_ = "http://ftp.mozilla.org"
to_ = "https://ftp-ssl.mozilla.org"
for attr in 'symbols_url', 'installer_url', 'test_packages_url', 'test_url':
for attr in "symbols_url", "installer_url", "test_packages_url", "test_url":
url = getattr(self, attr)
if url and url.startswith(from_):
new_url = url.replace(from_, to_)
self.info("Replacing url %s -> %s" % (url, new_url))
setattr(self, attr, new_url)
if 'test_url' in self.config:
if "test_url" in self.config:
# A user has specified a test_url directly, any test_packages_url will
# be ignored.
if self.test_packages_url:
self.error('Test data will be downloaded from "%s", the specified test '
' package data at "%s" will be ignored.' %
(self.config.get('test_url'), self.test_packages_url))
self.error(
'Test data will be downloaded from "%s", the specified test '
' package data at "%s" will be ignored.'
% (self.config.get("test_url"), self.test_packages_url)
)
self._download_test_zip(extract_dirs)
else:
@ -488,77 +572,84 @@ You can set this by specifying --test-url URL
# where the packages manifest is located. This is the case when the
# test package manifest isn't set as a property, which is true
# for some self-serve jobs and platforms using parse_make_upload.
self.test_packages_url = self.query_prefixed_build_dir_url('.test_packages.json')
self.test_packages_url = self.query_prefixed_build_dir_url(
".test_packages.json"
)
suite_categories = suite_categories or ['common']
suite_categories = suite_categories or ["common"]
self._download_test_packages(suite_categories, extract_dirs)
self._download_installer()
if self.config.get('download_symbols'):
if self.config.get("download_symbols"):
self._download_and_extract_symbols()
# create_virtualenv is in VirtualenvMixin.
def preflight_install(self):
if not self.installer_path:
if self.config.get('installer_path'):
self.installer_path = self.config['installer_path']
if self.config.get("installer_path"):
self.installer_path = self.config["installer_path"]
else:
self.fatal("""installer_path isn't set!
self.fatal(
"""installer_path isn't set!
You can set this by:
1. specifying --installer-path PATH, or
2. running the download-and-extract action
""")
"""
)
if not self.is_python_package_installed("mozInstall"):
self.fatal("""Can't call install() without mozinstall!
Did you run with --create-virtualenv? Is mozinstall in virtualenv_modules?""")
self.fatal(
"""Can't call install() without mozinstall!
Did you run with --create-virtualenv? Is mozinstall in virtualenv_modules?"""
)
def install_app(self, app=None, target_dir=None, installer_path=None):
""" Dependent on mozinstall """
# install the application
cmd = [self.query_python_path("mozinstall")]
if app:
cmd.extend(['--app', app])
cmd.extend(["--app", app])
# Remove the below when we no longer need to support mozinstall 0.3
self.info("Detecting whether we're running mozinstall >=1.0...")
output = self.get_output_from_command(cmd + ['-h'])
if '--source' in output:
cmd.append('--source')
output = self.get_output_from_command(cmd + ["-h"])
if "--source" in output:
cmd.append("--source")
# End remove
dirs = self.query_abs_dirs()
if not target_dir:
target_dir = dirs.get('abs_app_install_dir',
os.path.join(dirs['abs_work_dir'],
'application'))
target_dir = dirs.get(
"abs_app_install_dir", os.path.join(dirs["abs_work_dir"], "application")
)
self.mkdir_p(target_dir)
if not installer_path:
installer_path = self.installer_path
cmd.extend([installer_path,
'--destination', target_dir])
cmd.extend([installer_path, "--destination", target_dir])
# TODO we'll need some error checking here
return self.get_output_from_command(cmd, halt_on_failure=True,
fatal_exit_code=3)
return self.get_output_from_command(
cmd, halt_on_failure=True, fatal_exit_code=3
)
def install(self):
self.binary_path = self.install_app(app=self.config.get('application'))
self.binary_path = self.install_app(app=self.config.get("application"))
def uninstall_app(self, install_dir=None):
""" Dependent on mozinstall """
# uninstall the application
cmd = self.query_exe("mozuninstall",
default=self.query_python_path("mozuninstall"),
return_type="list")
cmd = self.query_exe(
"mozuninstall",
default=self.query_python_path("mozuninstall"),
return_type="list",
)
dirs = self.query_abs_dirs()
if not install_dir:
install_dir = dirs.get('abs_app_install_dir',
os.path.join(dirs['abs_work_dir'],
'application'))
install_dir = dirs.get(
"abs_app_install_dir", os.path.join(dirs["abs_work_dir"], "application")
)
cmd.append(install_dir)
# TODO we'll need some error checking here
self.get_output_from_command(cmd, halt_on_failure=True,
fatal_exit_code=3)
self.get_output_from_command(cmd, halt_on_failure=True, fatal_exit_code=3)
def uninstall(self):
self.uninstall_app()
@ -569,14 +660,15 @@ Did you run with --create-virtualenv? Is mozinstall in virtualenv_modules?""")
minidump_stackwalk_path = None
if 'MOZ_FETCHES_DIR' in os.environ:
if "MOZ_FETCHES_DIR" in os.environ:
minidump_stackwalk_path = os.path.join(
os.environ['MOZ_FETCHES_DIR'],
'minidump_stackwalk',
'minidump_stackwalk')
os.environ["MOZ_FETCHES_DIR"],
"minidump_stackwalk",
"minidump_stackwalk",
)
if self.platform_name() in ('win32', 'win64'):
minidump_stackwalk_path += '.exe'
if self.platform_name() in ("win32", "win64"):
minidump_stackwalk_path += ".exe"
if not minidump_stackwalk_path or not os.path.isfile(minidump_stackwalk_path):
self.error("minidump_stackwalk path was not fetched?")
@ -626,40 +718,46 @@ Did you run with --create-virtualenv? Is mozinstall in virtualenv_modules?""")
# platforms like mac as excutable files may be universal
# files containing multiple architectures
# NOTE 'enabled' is only here while we have unconsolidated configs
if not suite['enabled']:
if not suite["enabled"]:
continue
if suite.get('architectures'):
if suite.get("architectures"):
arch = platform.architecture()[0]
if arch not in suite['architectures']:
if arch not in suite["architectures"]:
continue
cmd = suite['cmd']
name = suite['name']
self.info("Running pre test command %(name)s with '%(cmd)s'"
% {'name': name, 'cmd': ' '.join(cmd)})
self.run_command(cmd,
cwd=dirs['abs_work_dir'],
error_list=BaseErrorList,
halt_on_failure=suite['halt_on_failure'],
fatal_exit_code=suite.get('fatal_exit_code', 3))
cmd = suite["cmd"]
name = suite["name"]
self.info(
"Running pre test command %(name)s with '%(cmd)s'"
% {"name": name, "cmd": " ".join(cmd)}
)
self.run_command(
cmd,
cwd=dirs["abs_work_dir"],
error_list=BaseErrorList,
halt_on_failure=suite["halt_on_failure"],
fatal_exit_code=suite.get("fatal_exit_code", 3),
)
def preflight_run_tests(self):
"""preflight commands for all tests"""
c = self.config
if c.get('run_cmd_checks_enabled'):
self._run_cmd_checks(c.get('preflight_run_cmd_suites', []))
elif c.get('preflight_run_cmd_suites'):
self.warning("Proceeding without running prerun test commands."
" These are often OS specific and disabling them may"
" result in spurious test results!")
if c.get("run_cmd_checks_enabled"):
self._run_cmd_checks(c.get("preflight_run_cmd_suites", []))
elif c.get("preflight_run_cmd_suites"):
self.warning(
"Proceeding without running prerun test commands."
" These are often OS specific and disabling them may"
" result in spurious test results!"
)
def postflight_run_tests(self):
"""preflight commands for all tests"""
c = self.config
if c.get('run_cmd_checks_enabled'):
self._run_cmd_checks(c.get('postflight_run_cmd_suites', []))
if c.get("run_cmd_checks_enabled"):
self._run_cmd_checks(c.get("postflight_run_cmd_suites", []))
def query_abs_dirs(self):
abs_dirs = super(TestingMixin, self).query_abs_dirs()
if 'MOZ_FETCHES_DIR' in os.environ:
abs_dirs['abs_fetches_dir'] = os.environ['MOZ_FETCHES_DIR']
if "MOZ_FETCHES_DIR" in os.environ:
abs_dirs["abs_fetches_dir"] = os.environ["MOZ_FETCHES_DIR"]
return abs_dirs

File diff suppressed because it is too large Load Diff

View File

@ -23,179 +23,224 @@ from mozharness.mozilla.testing.testbase import TestingMixin, testing_config_opt
from mozharness.mozilla.testing.unittest import TestSummaryOutputParserHelper
from mozharness.mozilla.testing.codecoverage import (
CodeCoverageMixin,
code_coverage_config_options
code_coverage_config_options,
)
from mozharness.mozilla.testing.errors import HarnessErrorList
from mozharness.mozilla.structuredlog import StructuredOutputParser
class MarionetteTest(TestingMixin, MercurialScript, TransferMixin,
CodeCoverageMixin):
config_options = [[
["--application"],
{"action": "store",
"dest": "application",
"default": None,
"help": "application name of binary"
}
], [
["--app-arg"],
{"action": "store",
"dest": "app_arg",
"default": None,
"help": "Optional command-line argument to pass to the browser"
}
], [
["--marionette-address"],
{"action": "store",
"dest": "marionette_address",
"default": None,
"help": "The host:port of the Marionette server running inside Gecko. "
"Unused for emulator testing",
}
], [
["--emulator"],
{"action": "store",
"type": "choice",
"choices": ['arm', 'x86'],
"dest": "emulator",
"default": None,
"help": "Use an emulator for testing",
}
], [
["--test-manifest"],
{"action": "store",
"dest": "test_manifest",
"default": "unit-tests.ini",
"help": "Path to test manifest to run relative to the Marionette "
"tests directory",
}
], [
["--total-chunks"],
{"action": "store",
"dest": "total_chunks",
"help": "Number of total chunks",
}
], [
["--this-chunk"],
{"action": "store",
"dest": "this_chunk",
"help": "Number of this chunk",
}
], [
["--setpref"],
{"action": "append",
"metavar": "PREF=VALUE",
"dest": "extra_prefs",
"default": [],
"help": "Extra user prefs.",
}
], [
["--headless"],
{"action": "store_true",
"dest": "headless",
"default": False,
"help": "Run tests in headless mode.",
}
], [
["--headless-width"],
{"action": "store",
"dest": "headless_width",
"default": "1600",
"help": "Specify headless virtual screen width (default: 1600).",
}
], [
["--headless-height"],
{"action": "store",
"dest": "headless_height",
"default": "1200",
"help": "Specify headless virtual screen height (default: 1200).",
}
], [
["--allow-software-gl-layers"],
{"action": "store_true",
"dest": "allow_software_gl_layers",
"default": False,
"help": "Permits a software GL implementation (such as LLVMPipe) to use the GL compositor."
}
], [
["--enable-webrender"],
{"action": "store_true",
"dest": "enable_webrender",
"default": False,
"help": "Enable the WebRender compositor in Gecko."
}
]] + copy.deepcopy(testing_config_options) \
class MarionetteTest(TestingMixin, MercurialScript, TransferMixin, CodeCoverageMixin):
config_options = (
[
[
["--application"],
{
"action": "store",
"dest": "application",
"default": None,
"help": "application name of binary",
},
],
[
["--app-arg"],
{
"action": "store",
"dest": "app_arg",
"default": None,
"help": "Optional command-line argument to pass to the browser",
},
],
[
["--marionette-address"],
{
"action": "store",
"dest": "marionette_address",
"default": None,
"help": "The host:port of the Marionette server running inside Gecko. "
"Unused for emulator testing",
},
],
[
["--emulator"],
{
"action": "store",
"type": "choice",
"choices": ["arm", "x86"],
"dest": "emulator",
"default": None,
"help": "Use an emulator for testing",
},
],
[
["--test-manifest"],
{
"action": "store",
"dest": "test_manifest",
"default": "unit-tests.ini",
"help": "Path to test manifest to run relative to the Marionette "
"tests directory",
},
],
[
["--total-chunks"],
{
"action": "store",
"dest": "total_chunks",
"help": "Number of total chunks",
},
],
[
["--this-chunk"],
{
"action": "store",
"dest": "this_chunk",
"help": "Number of this chunk",
},
],
[
["--setpref"],
{
"action": "append",
"metavar": "PREF=VALUE",
"dest": "extra_prefs",
"default": [],
"help": "Extra user prefs.",
},
],
[
["--headless"],
{
"action": "store_true",
"dest": "headless",
"default": False,
"help": "Run tests in headless mode.",
},
],
[
["--headless-width"],
{
"action": "store",
"dest": "headless_width",
"default": "1600",
"help": "Specify headless virtual screen width (default: 1600).",
},
],
[
["--headless-height"],
{
"action": "store",
"dest": "headless_height",
"default": "1200",
"help": "Specify headless virtual screen height (default: 1200).",
},
],
[
["--allow-software-gl-layers"],
{
"action": "store_true",
"dest": "allow_software_gl_layers",
"default": False,
"help": "Permits a software GL implementation (such as LLVMPipe) to use the GL compositor.", # NOQA: E501
},
],
[
["--enable-webrender"],
{
"action": "store_true",
"dest": "enable_webrender",
"default": False,
"help": "Enable the WebRender compositor in Gecko.",
},
],
]
+ copy.deepcopy(testing_config_options)
+ copy.deepcopy(code_coverage_config_options)
)
repos = []
def __init__(self, require_config_file=False):
super(MarionetteTest, self).__init__(
config_options=self.config_options,
all_actions=['clobber',
'pull',
'download-and-extract',
'create-virtualenv',
'install',
'run-tests'],
default_actions=['clobber',
'pull',
'download-and-extract',
'create-virtualenv',
'install',
'run-tests'],
all_actions=[
"clobber",
"pull",
"download-and-extract",
"create-virtualenv",
"install",
"run-tests",
],
default_actions=[
"clobber",
"pull",
"download-and-extract",
"create-virtualenv",
"install",
"run-tests",
],
require_config_file=require_config_file,
config={'require_test_zip': True})
config={"require_test_zip": True},
)
# these are necessary since self.config is read only
c = self.config
self.installer_url = c.get('installer_url')
self.installer_path = c.get('installer_path')
self.binary_path = c.get('binary_path')
self.test_url = c.get('test_url')
self.test_packages_url = c.get('test_packages_url')
self.installer_url = c.get("installer_url")
self.installer_path = c.get("installer_path")
self.binary_path = c.get("binary_path")
self.test_url = c.get("test_url")
self.test_packages_url = c.get("test_packages_url")
self.test_suite = self._get_test_suite(c.get('emulator'))
self.test_suite = self._get_test_suite(c.get("emulator"))
if self.test_suite not in self.config["suite_definitions"]:
self.fatal("{} is not defined in the config!".format(self.test_suite))
if c.get('structured_output'):
if c.get("structured_output"):
self.parser_class = StructuredOutputParser
else:
self.parser_class = TestSummaryOutputParserHelper
def _pre_config_lock(self, rw_config):
super(MarionetteTest, self)._pre_config_lock(rw_config)
if not self.config.get('emulator') and not self.config.get('marionette_address'):
self.fatal("You need to specify a --marionette-address for non-emulator tests! "
"(Try --marionette-address localhost:2828 )")
if not self.config.get("emulator") and not self.config.get(
"marionette_address"
):
self.fatal(
"You need to specify a --marionette-address for non-emulator tests! "
"(Try --marionette-address localhost:2828 )"
)
def _query_tests_dir(self):
dirs = self.query_abs_dirs()
test_dir = self.config["suite_definitions"][self.test_suite]["testsdir"]
return os.path.join(dirs['abs_test_install_dir'], test_dir)
return os.path.join(dirs["abs_test_install_dir"], test_dir)
def query_abs_dirs(self):
if self.abs_dirs:
return self.abs_dirs
abs_dirs = super(MarionetteTest, self).query_abs_dirs()
dirs = {}
dirs['abs_test_install_dir'] = os.path.join(
abs_dirs['abs_work_dir'], 'tests')
dirs['abs_marionette_dir'] = os.path.join(
dirs['abs_test_install_dir'], 'marionette', 'harness', 'marionette_harness')
dirs['abs_marionette_tests_dir'] = os.path.join(
dirs['abs_test_install_dir'], 'marionette', 'tests', 'testing',
'marionette', 'harness', 'marionette_harness', 'tests')
dirs['abs_gecko_dir'] = os.path.join(
abs_dirs['abs_work_dir'], 'gecko')
dirs['abs_emulator_dir'] = os.path.join(
abs_dirs['abs_work_dir'], 'emulator')
dirs["abs_test_install_dir"] = os.path.join(abs_dirs["abs_work_dir"], "tests")
dirs["abs_marionette_dir"] = os.path.join(
dirs["abs_test_install_dir"], "marionette", "harness", "marionette_harness"
)
dirs["abs_marionette_tests_dir"] = os.path.join(
dirs["abs_test_install_dir"],
"marionette",
"tests",
"testing",
"marionette",
"harness",
"marionette_harness",
"tests",
)
dirs["abs_gecko_dir"] = os.path.join(abs_dirs["abs_work_dir"], "gecko")
dirs["abs_emulator_dir"] = os.path.join(abs_dirs["abs_work_dir"], "emulator")
dirs['abs_blob_upload_dir'] = os.path.join(abs_dirs['abs_work_dir'], 'blobber_upload_dir')
dirs["abs_blob_upload_dir"] = os.path.join(
abs_dirs["abs_work_dir"], "blobber_upload_dir"
)
for key in dirs.keys():
if key not in abs_dirs:
@ -203,12 +248,12 @@ class MarionetteTest(TestingMixin, MercurialScript, TransferMixin,
self.abs_dirs = abs_dirs
return self.abs_dirs
@PreScriptAction('create-virtualenv')
@PreScriptAction("create-virtualenv")
def _configure_marionette_virtualenv(self, action):
dirs = self.query_abs_dirs()
requirements = os.path.join(dirs['abs_test_install_dir'],
'config',
'marionette_requirements.txt')
requirements = os.path.join(
dirs["abs_test_install_dir"], "config", "marionette_requirements.txt"
)
if not os.path.isfile(requirements):
self.fatal(
"Could not find marionette requirements file: {}".format(requirements)
@ -221,27 +266,30 @@ class MarionetteTest(TestingMixin, MercurialScript, TransferMixin,
Determine which in tree options group to use and return the
appropriate key.
"""
platform = 'emulator' if is_emulator else 'desktop'
platform = "emulator" if is_emulator else "desktop"
# Currently running marionette on an emulator means webapi
# tests. This method will need to change if this does.
testsuite = 'webapi' if is_emulator else 'marionette'
return '{}_{}'.format(testsuite, platform)
testsuite = "webapi" if is_emulator else "marionette"
return "{}_{}".format(testsuite, platform)
def download_and_extract(self):
super(MarionetteTest, self).download_and_extract()
if self.config.get('emulator'):
if self.config.get("emulator"):
dirs = self.query_abs_dirs()
self.mkdir_p(dirs['abs_emulator_dir'])
tar = self.query_exe('tar', return_type='list')
self.run_command(tar + ['zxf', self.installer_path],
cwd=dirs['abs_emulator_dir'],
error_list=TarErrorList,
halt_on_failure=True, fatal_exit_code=3)
self.mkdir_p(dirs["abs_emulator_dir"])
tar = self.query_exe("tar", return_type="list")
self.run_command(
tar + ["zxf", self.installer_path],
cwd=dirs["abs_emulator_dir"],
error_list=TarErrorList,
halt_on_failure=True,
fatal_exit_code=3,
)
def install(self):
if self.config.get('emulator'):
if self.config.get("emulator"):
self.info("Emulator tests; skipping.")
else:
super(MarionetteTest, self).install()
@ -252,51 +300,50 @@ class MarionetteTest(TestingMixin, MercurialScript, TransferMixin,
"""
dirs = self.query_abs_dirs()
raw_log_file = os.path.join(dirs['abs_blob_upload_dir'],
'marionette_raw.log')
error_summary_file = os.path.join(dirs['abs_blob_upload_dir'],
'marionette_errorsummary.log')
html_report_file = os.path.join(dirs['abs_blob_upload_dir'],
'report.html')
raw_log_file = os.path.join(dirs["abs_blob_upload_dir"], "marionette_raw.log")
error_summary_file = os.path.join(
dirs["abs_blob_upload_dir"], "marionette_errorsummary.log"
)
html_report_file = os.path.join(dirs["abs_blob_upload_dir"], "report.html")
config_fmt_args = {
# emulator builds require a longer timeout
'timeout': 60000 if self.config.get('emulator') else 10000,
'profile': os.path.join(dirs['abs_work_dir'], 'profile'),
'xml_output': os.path.join(dirs['abs_work_dir'], 'output.xml'),
'html_output': os.path.join(dirs['abs_blob_upload_dir'], 'output.html'),
'logcat_dir': dirs['abs_work_dir'],
'emulator': 'arm',
'symbols_path': self.symbols_path,
'binary': self.binary_path,
'address': self.config.get('marionette_address'),
'raw_log_file': raw_log_file,
'error_summary_file': error_summary_file,
'html_report_file': html_report_file,
'gecko_log': dirs["abs_blob_upload_dir"],
'this_chunk': self.config.get('this_chunk', 1),
'total_chunks': self.config.get('total_chunks', 1)
"timeout": 60000 if self.config.get("emulator") else 10000,
"profile": os.path.join(dirs["abs_work_dir"], "profile"),
"xml_output": os.path.join(dirs["abs_work_dir"], "output.xml"),
"html_output": os.path.join(dirs["abs_blob_upload_dir"], "output.html"),
"logcat_dir": dirs["abs_work_dir"],
"emulator": "arm",
"symbols_path": self.symbols_path,
"binary": self.binary_path,
"address": self.config.get("marionette_address"),
"raw_log_file": raw_log_file,
"error_summary_file": error_summary_file,
"html_report_file": html_report_file,
"gecko_log": dirs["abs_blob_upload_dir"],
"this_chunk": self.config.get("this_chunk", 1),
"total_chunks": self.config.get("total_chunks", 1),
}
self.info("The emulator type: %s" % config_fmt_args["emulator"])
# build the marionette command arguments
python = self.query_python_path('python')
python = self.query_python_path("python")
cmd = [python, '-u', os.path.join(dirs['abs_marionette_dir'],
'runtests.py')]
cmd = [python, "-u", os.path.join(dirs["abs_marionette_dir"], "runtests.py")]
manifest = os.path.join(dirs['abs_marionette_tests_dir'],
self.config['test_manifest'])
manifest = os.path.join(
dirs["abs_marionette_tests_dir"], self.config["test_manifest"]
)
if self.config.get('app_arg'):
config_fmt_args['app_arg'] = self.config['app_arg']
if self.config.get("app_arg"):
config_fmt_args["app_arg"] = self.config["app_arg"]
if self.config['enable_webrender']:
cmd.append('--enable-webrender')
if self.config["enable_webrender"]:
cmd.append("--enable-webrender")
cmd.extend(['--setpref={}'.format(p) for p in self.config['extra_prefs']])
cmd.extend(["--setpref={}".format(p) for p in self.config["extra_prefs"]])
cmd.append('--gecko-log=-')
cmd.append("--gecko-log=-")
if self.config.get("structured_output"):
cmd.append("--log-raw=-")
@ -308,89 +355,98 @@ class MarionetteTest(TestingMixin, MercurialScript, TransferMixin,
# Make sure that the logging directory exists
self.fatal("Could not create blobber upload directory")
test_paths = json.loads(os.environ.get('MOZHARNESS_TEST_PATHS', '""'))
test_paths = json.loads(os.environ.get("MOZHARNESS_TEST_PATHS", '""'))
if test_paths and 'marionette' in test_paths:
paths = [os.path.join(dirs['abs_test_install_dir'], 'marionette', 'tests', p)
for p in test_paths['marionette']]
if test_paths and "marionette" in test_paths:
paths = [
os.path.join(dirs["abs_test_install_dir"], "marionette", "tests", p)
for p in test_paths["marionette"]
]
cmd.extend(paths)
else:
cmd.append(manifest)
try_options, try_tests = self.try_args("marionette")
cmd.extend(self.query_tests_args(try_tests,
str_format_values=config_fmt_args))
cmd.extend(self.query_tests_args(try_tests, str_format_values=config_fmt_args))
env = {}
if self.query_minidump_stackwalk():
env['MINIDUMP_STACKWALK'] = self.minidump_stackwalk_path
env['MOZ_UPLOAD_DIR'] = self.query_abs_dirs()['abs_blob_upload_dir']
env['MINIDUMP_SAVE_PATH'] = self.query_abs_dirs()['abs_blob_upload_dir']
env['RUST_BACKTRACE'] = 'full'
env["MINIDUMP_STACKWALK"] = self.minidump_stackwalk_path
env["MOZ_UPLOAD_DIR"] = self.query_abs_dirs()["abs_blob_upload_dir"]
env["MINIDUMP_SAVE_PATH"] = self.query_abs_dirs()["abs_blob_upload_dir"]
env["RUST_BACKTRACE"] = "full"
if self.config['allow_software_gl_layers']:
env['MOZ_LAYERS_ALLOW_SOFTWARE_GL'] = '1'
if self.config["allow_software_gl_layers"]:
env["MOZ_LAYERS_ALLOW_SOFTWARE_GL"] = "1"
if self.config['headless']:
env['MOZ_HEADLESS'] = '1'
env['MOZ_HEADLESS_WIDTH'] = self.config['headless_width']
env['MOZ_HEADLESS_HEIGHT'] = self.config['headless_height']
if self.config["headless"]:
env["MOZ_HEADLESS"] = "1"
env["MOZ_HEADLESS_WIDTH"] = self.config["headless_width"]
env["MOZ_HEADLESS_HEIGHT"] = self.config["headless_height"]
if not os.path.isdir(env['MOZ_UPLOAD_DIR']):
self.mkdir_p(env['MOZ_UPLOAD_DIR'])
if not os.path.isdir(env["MOZ_UPLOAD_DIR"]):
self.mkdir_p(env["MOZ_UPLOAD_DIR"])
env = self.query_env(partial_env=env)
try:
cwd = self._query_tests_dir()
except Exception as e:
self.fatal("Don't know how to run --test-suite '{0}': {1}!".format(
self.test_suite, e))
self.fatal(
"Don't know how to run --test-suite '{0}': {1}!".format(
self.test_suite, e
)
)
marionette_parser = self.parser_class(config=self.config,
log_obj=self.log_obj,
error_list=BaseErrorList + HarnessErrorList,
strict=False)
return_code = self.run_command(cmd,
cwd=cwd,
output_timeout=1000,
output_parser=marionette_parser,
env=env)
marionette_parser = self.parser_class(
config=self.config,
log_obj=self.log_obj,
error_list=BaseErrorList + HarnessErrorList,
strict=False,
)
return_code = self.run_command(
cmd, cwd=cwd, output_timeout=1000, output_parser=marionette_parser, env=env
)
level = INFO
tbpl_status, log_level, summary = marionette_parser.evaluate_parser(
return_code=return_code)
return_code=return_code
)
marionette_parser.append_tinderboxprint_line("marionette")
qemu = os.path.join(dirs['abs_work_dir'], 'qemu.log')
qemu = os.path.join(dirs["abs_work_dir"], "qemu.log")
if os.path.isfile(qemu):
self.copyfile(qemu, os.path.join(dirs['abs_blob_upload_dir'],
'qemu.log'))
self.copyfile(qemu, os.path.join(dirs["abs_blob_upload_dir"], "qemu.log"))
# dump logcat output if there were failures
if self.config.get('emulator'):
if marionette_parser.failed != "0" or 'T-FAIL' in marionette_parser.tsummary:
logcat = os.path.join(dirs['abs_work_dir'], 'emulator-5554.log')
if self.config.get("emulator"):
if (
marionette_parser.failed != "0"
or "T-FAIL" in marionette_parser.tsummary
):
logcat = os.path.join(dirs["abs_work_dir"], "emulator-5554.log")
if os.access(logcat, os.F_OK):
self.info('dumping logcat')
self.run_command(['cat', logcat], error_list=LogcatErrorList)
self.info("dumping logcat")
self.run_command(["cat", logcat], error_list=LogcatErrorList)
else:
self.info('no logcat file found')
self.info("no logcat file found")
else:
# .. or gecko.log if it exists
gecko_log = os.path.join(self.config['base_work_dir'], 'gecko.log')
gecko_log = os.path.join(self.config["base_work_dir"], "gecko.log")
if os.access(gecko_log, os.F_OK):
self.info('dumping gecko.log')
self.run_command(['cat', gecko_log])
self.info("dumping gecko.log")
self.run_command(["cat", gecko_log])
self.rmtree(gecko_log)
else:
self.info('gecko.log not found')
self.info("gecko.log not found")
marionette_parser.print_summary('marionette')
marionette_parser.print_summary("marionette")
self.log("Marionette exited with return code %s: %s" % (return_code, tbpl_status),
level=level)
self.log(
"Marionette exited with return code %s: %s" % (return_code, tbpl_status),
level=level,
)
self.record_status(tbpl_status)
if __name__ == '__main__':
if __name__ == "__main__":
marionetteTest = MarionetteTest()
marionetteTest.run_and_exit()

View File

@ -23,43 +23,61 @@ BOUNCER_URL_PATTERN = "{bouncer_prefix}?product={product}&os={os}&lang={lang}"
class BouncerCheck(BaseScript):
config_options = [
[["--version"], {
"dest": "version",
"help": "Version of release, eg: 39.0b5",
}],
[["--product-field"], {
"dest": "product_field",
"help": "Version field of release from product details, eg: LATEST_FIREFOX_VERSION",
}],
[["--products-url"], {
"dest": "products_url",
"help": "The URL of the current Firefox product versions",
"type": str,
"default": "https://product-details.mozilla.org/1.0/firefox_versions.json",
}],
[["--previous-version"], {
"dest": "prev_versions",
"action": "extend",
"help": "Previous version(s)",
}],
[["--locale"], {
"dest": "locales",
# Intentionally limited for several reasons:
# 1) faster to check
# 2) do not need to deal with situation when a new locale
# introduced and we do not have partials for it yet
# 3) it mimics the old Sentry behaviour that worked for ages
# 4) no need to handle ja-JP-mac
"default": ["en-US", "de", "it", "zh-TW"],
"action": "append",
"help": "List of locales to check.",
}],
[["-j", "--parallelization"], {
"dest": "parallelization",
"default": 20,
"type": int,
"help": "Number of HTTP sessions running in parallel",
}],
[
["--version"],
{
"dest": "version",
"help": "Version of release, eg: 39.0b5",
},
],
[
["--product-field"],
{
"dest": "product_field",
"help": "Version field of release from product details, eg: LATEST_FIREFOX_VERSION", # NOQA: E501
},
],
[
["--products-url"],
{
"dest": "products_url",
"help": "The URL of the current Firefox product versions",
"type": str,
"default": "https://product-details.mozilla.org/1.0/firefox_versions.json",
},
],
[
["--previous-version"],
{
"dest": "prev_versions",
"action": "extend",
"help": "Previous version(s)",
},
],
[
["--locale"],
{
"dest": "locales",
# Intentionally limited for several reasons:
# 1) faster to check
# 2) do not need to deal with situation when a new locale
# introduced and we do not have partials for it yet
# 3) it mimics the old Sentry behaviour that worked for ages
# 4) no need to handle ja-JP-mac
"default": ["en-US", "de", "it", "zh-TW"],
"action": "append",
"help": "List of locales to check.",
},
],
[
["-j", "--parallelization"],
{
"dest": "parallelization",
"default": 20,
"type": int,
"help": "Number of HTTP sessions running in parallel",
},
],
]
def __init__(self, require_config_file=True):
@ -68,10 +86,10 @@ class BouncerCheck(BaseScript):
require_config_file=require_config_file,
config={
"cdn_urls": [
'download-installer.cdn.mozilla.net',
'download.cdn.mozilla.net',
'download.mozilla.org',
'archive.mozilla.org',
"download-installer.cdn.mozilla.net",
"download.cdn.mozilla.net",
"download.mozilla.org",
"archive.mozilla.org",
],
},
all_actions=[
@ -90,14 +108,15 @@ class BouncerCheck(BaseScript):
firefox_versions = self.load_json_url(self.config["products_url"])
if self.config['product_field'] not in firefox_versions:
self.fatal('Unknown Firefox label: {}'.format(self.config['product_field']))
if self.config["product_field"] not in firefox_versions:
self.fatal("Unknown Firefox label: {}".format(self.config["product_field"]))
self.config["version"] = firefox_versions[self.config["product_field"]]
self.log("Set Firefox version {}".format(self.config["version"]))
def check_url(self, session, url):
from redo import retry
from requests.exceptions import HTTPError
try:
from urllib.parse import urlparse
except ImportError:
@ -114,12 +133,12 @@ class BouncerCheck(BaseScript):
raise
final_url = urlparse(r.url)
if final_url.scheme != 'https':
self.error('FAIL: URL scheme is not https: {}'.format(r.url))
if final_url.scheme != "https":
self.error("FAIL: URL scheme is not https: {}".format(r.url))
self.return_code = EXIT_STATUS_DICT[TBPL_FAILURE]
if final_url.netloc not in self.config['cdn_urls']:
self.error('FAIL: host not in allowed locations: {}'.format(r.url))
if final_url.netloc not in self.config["cdn_urls"]:
self.error("FAIL: host not in allowed locations: {}".format(r.url))
self.return_code = EXIT_STATUS_DICT[TBPL_FAILURE]
try:
@ -148,8 +167,10 @@ class BouncerCheck(BaseScript):
if not product["check_uptake"]:
continue
for prev_version in self.config.get("prev_versions", []):
product_name = product["product-name"] % {"version": self.config["version"],
"prev_version": prev_version}
product_name = product["product-name"] % {
"version": self.config["version"],
"prev_version": prev_version,
}
for bouncer_platform in product["platforms"]:
for locale in self.config["locales"]:
url = BOUNCER_URL_PATTERN.format(
@ -163,12 +184,14 @@ class BouncerCheck(BaseScript):
def check_bouncer(self):
import requests
import concurrent.futures as futures
session = requests.Session()
http_adapter = requests.adapters.HTTPAdapter(
pool_connections=self.config["parallelization"],
pool_maxsize=self.config["parallelization"])
session.mount('https://', http_adapter)
session.mount('http://', http_adapter)
pool_maxsize=self.config["parallelization"],
)
session.mount("https://", http_adapter)
session.mount("http://", http_adapter)
with futures.ThreadPoolExecutor(self.config["parallelization"]) as e:
fs = []
@ -178,5 +201,5 @@ class BouncerCheck(BaseScript):
f.result()
if __name__ == '__main__':
if __name__ == "__main__":
BouncerCheck().run_and_exit()

View File

@ -35,143 +35,223 @@ def is_triangualar(x):
>>> all(not is_triangualar(x) for x in [4, 5, 8, 9, 11, 17, 25, 29, 39, 44, 59, 61, 72, 98, 112])
True
"""
n = (math.sqrt(8*x + 1) - 1)/2
n = (math.sqrt(8 * x + 1) - 1) / 2
return n == int(n)
class UpdateVerifyConfigCreator(BaseScript):
config_options = [
[["--product"], {
"dest": "product",
"help": "Product being tested, as used in the update URL and filenames. Eg: firefox",
}],
[["--stage-product"], {
"dest": "stage_product",
"help": "Product being tested, as used in stage directories and ship it"
"If not passed this is assumed to be the same as product."
}],
[["--app-name"], {
"dest": "app_name",
"help": "App name being tested. Eg: browser",
}],
[["--branch-prefix"], {
"dest": "branch_prefix",
"help": "Prefix of release branch names. Eg: mozilla, comm",
}],
[["--channel"], {
"dest": "channel",
"help": "Channel to run update verify against",
}],
[["--aus-server"], {
"dest": "aus_server",
"default": "https://aus5.mozilla.org",
"help": "AUS server to run update verify against",
}],
[["--to-version"], {
"dest": "to_version",
"help": "The version of the release being updated to. Eg: 59.0b5",
}],
[["--to-app-version"], {
"dest": "to_app_version",
"help": "The in-app version of the release being updated to. Eg: 59.0",
}],
[["--to-display-version"], {
"dest": "to_display_version",
"help": "The human-readable version of the release being updated to. Eg: 59.0 Beta 9",
}],
[["--to-build-number"], {
"dest": "to_build_number",
"help": "The build number of the release being updated to",
}],
[["--to-buildid"], {
"dest": "to_buildid",
"help": "The buildid of the release being updated to",
}],
[["--to-revision"], {
"dest": "to_revision",
"help": "The revision that the release being updated to was built against",
}],
[["--partial-version"], {
"dest": "partial_versions",
"default": [],
"action": "append",
"help": "A previous release version that is expected to receive a partial update. "
"Eg: 59.0b4. May be specified multiple times."
}],
[["--last-watershed"], {
"dest": "last_watershed",
"help": "The earliest version to include in the update verify config. Eg: 57.0b10",
}],
[["--include-version"], {
"dest": "include_versions",
"default": [],
"action": "append",
"help": "Only include versions that match one of these regexes. "
"May be passed multiple times",
}],
[["--mar-channel-id-override"], {
"dest": "mar_channel_id_options",
"default": [],
"action": "append",
"help": "A version regex and channel id string to override those versions with."
"Eg: ^\\d+\\.\\d+(\\.\\d+)?$,firefox-mozilla-beta,firefox-mozilla-release "
"will set accepted mar channel ids to 'firefox-mozilla-beta' and "
"'firefox-mozilla-release for x.y and x.y.z versions. "
"May be passed multiple times"
}],
[["--override-certs"], {
"dest": "override_certs",
"default": None,
"help": "Certs to override the updater with prior to running update verify."
"If passed, should be one of: dep, nightly, release"
"If not passed, no certificate overriding will be configured"
}],
[["--platform"], {
"dest": "platform",
"help": "The platform to generate the update verify config for, in FTP-style",
}],
[["--updater-platform"], {
"dest": "updater_platform",
"help": "The platform to run the updater on, in FTP-style."
"If not specified, this is assumed to be the same as platform",
}],
[["--archive-prefix"], {
"dest": "archive_prefix",
"help": "The server/path to pull the current release from. "
"Eg: https://archive.mozilla.org/pub",
}],
[["--previous-archive-prefix"], {
"dest": "previous_archive_prefix",
"help": "The server/path to pull the previous releases from"
"If not specified, this is assumed to be the same as --archive-prefix"
}],
[["--repo-path"], {
"dest": "repo_path",
"help": "The repository (relative to the hg server root) that the current release was "
"built from Eg: releases/mozilla-beta"
}],
[["--output-file"], {
"dest": "output_file",
"help": "Where to write the update verify config to",
}],
[["--product-details-server"], {
"dest": "product_details_server",
"default": "https://product-details.mozilla.org",
"help": "Product Details server to pull previous release info from. "
"Using anything other than the production server is likely to "
"cause issues with update verify."
}],
[["--hg-server"], {
"dest": "hg_server",
"default": "https://hg.mozilla.org",
"help": "Mercurial server to pull various previous and current version info from",
}],
[["--full-check-locale"], {
"dest": "full_check_locales",
"default": ["de", "en-US", "ru"],
"action": "append",
"help": "A list of locales to generate full update verify checks for",
}],
[
["--product"],
{
"dest": "product",
"help": "Product being tested, as used in the update URL and filenames. Eg: firefox", # NOQA: E501
},
],
[
["--stage-product"],
{
"dest": "stage_product",
"help": "Product being tested, as used in stage directories and ship it"
"If not passed this is assumed to be the same as product.",
},
],
[
["--app-name"],
{
"dest": "app_name",
"help": "App name being tested. Eg: browser",
},
],
[
["--branch-prefix"],
{
"dest": "branch_prefix",
"help": "Prefix of release branch names. Eg: mozilla, comm",
},
],
[
["--channel"],
{
"dest": "channel",
"help": "Channel to run update verify against",
},
],
[
["--aus-server"],
{
"dest": "aus_server",
"default": "https://aus5.mozilla.org",
"help": "AUS server to run update verify against",
},
],
[
["--to-version"],
{
"dest": "to_version",
"help": "The version of the release being updated to. Eg: 59.0b5",
},
],
[
["--to-app-version"],
{
"dest": "to_app_version",
"help": "The in-app version of the release being updated to. Eg: 59.0",
},
],
[
["--to-display-version"],
{
"dest": "to_display_version",
"help": "The human-readable version of the release being updated to. Eg: 59.0 Beta 9", # NOQA: E501
},
],
[
["--to-build-number"],
{
"dest": "to_build_number",
"help": "The build number of the release being updated to",
},
],
[
["--to-buildid"],
{
"dest": "to_buildid",
"help": "The buildid of the release being updated to",
},
],
[
["--to-revision"],
{
"dest": "to_revision",
"help": "The revision that the release being updated to was built against",
},
],
[
["--partial-version"],
{
"dest": "partial_versions",
"default": [],
"action": "append",
"help": "A previous release version that is expected to receive a partial update. "
"Eg: 59.0b4. May be specified multiple times.",
},
],
[
["--last-watershed"],
{
"dest": "last_watershed",
"help": "The earliest version to include in the update verify config. Eg: 57.0b10",
},
],
[
["--include-version"],
{
"dest": "include_versions",
"default": [],
"action": "append",
"help": "Only include versions that match one of these regexes. "
"May be passed multiple times",
},
],
[
["--mar-channel-id-override"],
{
"dest": "mar_channel_id_options",
"default": [],
"action": "append",
"help": "A version regex and channel id string to override those versions with."
"Eg: ^\\d+\\.\\d+(\\.\\d+)?$,firefox-mozilla-beta,firefox-mozilla-release "
"will set accepted mar channel ids to 'firefox-mozilla-beta' and "
"'firefox-mozilla-release for x.y and x.y.z versions. "
"May be passed multiple times",
},
],
[
["--override-certs"],
{
"dest": "override_certs",
"default": None,
"help": "Certs to override the updater with prior to running update verify."
"If passed, should be one of: dep, nightly, release"
"If not passed, no certificate overriding will be configured",
},
],
[
["--platform"],
{
"dest": "platform",
"help": "The platform to generate the update verify config for, in FTP-style",
},
],
[
["--updater-platform"],
{
"dest": "updater_platform",
"help": "The platform to run the updater on, in FTP-style."
"If not specified, this is assumed to be the same as platform",
},
],
[
["--archive-prefix"],
{
"dest": "archive_prefix",
"help": "The server/path to pull the current release from. "
"Eg: https://archive.mozilla.org/pub",
},
],
[
["--previous-archive-prefix"],
{
"dest": "previous_archive_prefix",
"help": "The server/path to pull the previous releases from"
"If not specified, this is assumed to be the same as --archive-prefix",
},
],
[
["--repo-path"],
{
"dest": "repo_path",
"help": (
"The repository (relative to the hg server root) that the current "
"release was built from Eg: releases/mozilla-beta"
),
},
],
[
["--output-file"],
{
"dest": "output_file",
"help": "Where to write the update verify config to",
},
],
[
["--product-details-server"],
{
"dest": "product_details_server",
"default": "https://product-details.mozilla.org",
"help": "Product Details server to pull previous release info from. "
"Using anything other than the production server is likely to "
"cause issues with update verify.",
},
],
[
["--hg-server"],
{
"dest": "hg_server",
"default": "https://hg.mozilla.org",
"help": "Mercurial server to pull various previous and current version info from",
},
],
[
["--full-check-locale"],
{
"dest": "full_check_locales",
"default": ["de", "en-US", "ru"],
"action": "append",
"help": "A list of locales to generate full update verify checks for",
},
],
]
def __init__(self):
@ -242,12 +322,12 @@ class UpdateVerifyConfigCreator(BaseScript):
"WARNING",
)
releases = json.load(ret)["releases"]
for release_name, release_info in \
reversed(sorted(releases.items(),
key=lambda x: MozillaVersion(x[1]['version']))):
for release_name, release_info in reversed(
sorted(releases.items(), key=lambda x: MozillaVersion(x[1]["version"]))
):
# we need to use releases_name instead of release_info since esr
# string is included in the name. later we rely on this.
product, version = release_name.split('-', 1)
product, version = release_name.split("-", 1)
tag = "{}_{}_RELEASE".format(product.upper(), version.replace(".", "_"))
# Exclude any releases that don't match one of our include version
@ -257,28 +337,41 @@ class UpdateVerifyConfigCreator(BaseScript):
if re.match(v, version):
break
else:
self.log("Skipping release whose version doesn't match any "
"include_version pattern: %s" % release_name,
level=INFO)
self.log(
"Skipping release whose version doesn't match any "
"include_version pattern: %s" % release_name,
level=INFO,
)
continue
# We also have to trim out previous releases that aren't in the same
# product line, too old, etc.
if self.config["stage_product"] != product:
self.log("Skipping release that doesn't match product name: %s" % release_name,
level=INFO)
self.log(
"Skipping release that doesn't match product name: %s"
% release_name,
level=INFO,
)
continue
if MozillaVersion(version) < MozillaVersion(self.config["last_watershed"]):
self.log("Skipping release that's behind the last watershed: %s" % release_name,
level=INFO)
self.log(
"Skipping release that's behind the last watershed: %s"
% release_name,
level=INFO,
)
continue
if version == self.config["to_version"]:
self.log("Skipping release that is the same as to version: %s" % release_name,
level=INFO)
self.log(
"Skipping release that is the same as to version: %s"
% release_name,
level=INFO,
)
continue
if MozillaVersion(version) > MozillaVersion(self.config["to_version"]):
self.log("Skipping release that's newer than to version: %s" % release_name,
level=INFO)
self.log(
"Skipping release that's newer than to version: %s" % release_name,
level=INFO,
)
continue
if version in self.update_paths:
@ -293,9 +386,11 @@ class UpdateVerifyConfigCreator(BaseScript):
version,
release_info["build_number"],
),
ftp2infoFile(self.config["platform"])
ftp2infoFile(self.config["platform"]),
)
self.log(
"Retrieving buildid from info file: %s" % info_file_url, level=DEBUG
)
self.log("Retrieving buildid from info file: %s" % info_file_url, level=DEBUG)
ret = self._retry_download(info_file_url, "WARNING")
buildID = ret.read().split(b"=")[1].strip().decode("utf-8")
@ -320,8 +415,12 @@ class UpdateVerifyConfigCreator(BaseScript):
self.config["app_name"],
),
)
app_version = self._retry_download(app_version_url, "WARNING").read() \
.strip().decode("utf-8")
app_version = (
self._retry_download(app_version_url, "WARNING")
.read()
.strip()
.decode("utf-8")
)
self.log("Adding {} to update paths".format(version), level=INFO)
self.update_paths[version] = {
@ -329,7 +428,9 @@ class UpdateVerifyConfigCreator(BaseScript):
"locales": getPlatformLocales(shipped_locales, self.config["platform"]),
"buildID": buildID,
}
for pattern, mar_channel_ids in self.config["mar_channel_id_overrides"].items():
for pattern, mar_channel_ids in self.config[
"mar_channel_id_overrides"
].items():
if re.match(pattern, version):
self.update_paths[version]["marChannelIds"] = mar_channel_ids
@ -340,15 +441,14 @@ class UpdateVerifyConfigCreator(BaseScript):
if self.update_paths:
self.log("Found update paths:", level=DEBUG)
self.log(pprint.pformat(self.update_paths), level=DEBUG)
elif (
GeckoVersion.parse(self.config["to_version"])
<= GeckoVersion.parse(self.config["last_watershed"])
elif GeckoVersion.parse(self.config["to_version"]) <= GeckoVersion.parse(
self.config["last_watershed"]
):
self.log(
"Didn't find any update paths, but to_version {} is before the last_"
"watershed {}, generating empty config".format(
self.config['to_version'],
self.config['last_watershed'],
self.config["to_version"],
self.config["last_watershed"],
),
level=WARNING,
)
@ -359,17 +459,24 @@ class UpdateVerifyConfigCreator(BaseScript):
from mozrelease.l10n import getPlatformLocales
from mozrelease.platforms import ftp2updatePlatforms
from mozrelease.update_verify import UpdateVerifyConfig
from mozrelease.paths import getCandidatesDir, getReleasesDir, getReleaseInstallerPath
from mozrelease.paths import (
getCandidatesDir,
getReleasesDir,
getReleaseInstallerPath,
)
from mozrelease.versions import getPrettyVersion
candidates_dir = getCandidatesDir(
self.config["stage_product"], self.config["to_version"],
self.config["stage_product"],
self.config["to_version"],
self.config["to_build_number"],
)
to_ = getReleaseInstallerPath(
self.config["product"], self.config["product"].title(),
self.config["to_version"], self.config["platform"],
locale="%locale%"
self.config["product"],
self.config["product"].title(),
self.config["to_version"],
self.config["platform"],
locale="%locale%",
)
to_path = "{}/{}".format(candidates_dir, to_)
@ -378,8 +485,10 @@ class UpdateVerifyConfigCreator(BaseScript):
to_display_version = getPrettyVersion(self.config["to_version"])
self.update_verify_config = UpdateVerifyConfig(
product=self.config["product"].title(), channel=self.config["channel"],
aus_server=self.config["aus_server"], to=to_path,
product=self.config["product"].title(),
channel=self.config["channel"],
aus_server=self.config["aus_server"],
to=to_path,
to_build_id=self.config["to_buildid"],
to_app_version=self.config["to_app_version"],
to_display_version=to_display_version,
@ -394,9 +503,15 @@ class UpdateVerifyConfigCreator(BaseScript):
self.config["app_name"],
),
)
to_shipped_locales = self._retry_download(to_shipped_locales_url, "WARNING") \
.read().strip().decode("utf-8")
to_locales = set(getPlatformLocales(to_shipped_locales, self.config["platform"]))
to_shipped_locales = (
self._retry_download(to_shipped_locales_url, "WARNING")
.read()
.strip()
.decode("utf-8")
)
to_locales = set(
getPlatformLocales(to_shipped_locales, self.config["platform"])
)
completes_only_index = 0
for fromVersion in reversed(sorted(self.update_paths, key=LooseVersion)):
@ -404,61 +519,76 @@ class UpdateVerifyConfigCreator(BaseScript):
locales = sorted(list(set(from_["locales"]).intersection(to_locales)))
appVersion = from_["appVersion"]
build_id = from_["buildID"]
mar_channel_IDs = from_.get('marChannelIds')
mar_channel_IDs = from_.get("marChannelIds")
# Use new build targets for Windows, but only on compatible
# versions (42+). See bug 1185456 for additional context.
if self.config["platform"] not in ("win32", "win64") or \
LooseVersion(fromVersion) < LooseVersion("42.0"):
if self.config["platform"] not in ("win32", "win64") or LooseVersion(
fromVersion
) < LooseVersion("42.0"):
update_platform = ftp2updatePlatforms(self.config["platform"])[0]
else:
update_platform = ftp2updatePlatforms(self.config["platform"])[1]
release_dir = getReleasesDir(
self.config["stage_product"], fromVersion
)
release_dir = getReleasesDir(self.config["stage_product"], fromVersion)
path_ = getReleaseInstallerPath(
self.config["product"], self.config["product"].title(),
fromVersion, self.config["platform"], locale="%locale%",
self.config["product"],
self.config["product"].title(),
fromVersion,
self.config["platform"],
locale="%locale%",
)
from_path = "{}/{}".format(release_dir, path_)
updater_package = "{}/{}".format(
release_dir,
getReleaseInstallerPath(
self.config["product"], self.config["product"].title(),
fromVersion, self.config["updater_platform"],
self.config["product"],
self.config["product"].title(),
fromVersion,
self.config["updater_platform"],
locale="%locale%",
)
),
)
# Exclude locales being full checked
quick_check_locales = [l for l in locales
if l not in self.config["full_check_locales"]]
quick_check_locales = [
l for l in locales if l not in self.config["full_check_locales"]
]
# Get the intersection of from and to full_check_locales
this_full_check_locales = [l for l in self.config["full_check_locales"]
if l in locales]
this_full_check_locales = [
l for l in self.config["full_check_locales"] if l in locales
]
if fromVersion in self.config["partial_versions"]:
self.info("Generating configs for partial update checks for %s" % fromVersion)
self.info(
"Generating configs for partial update checks for %s" % fromVersion
)
self.update_verify_config.addRelease(
release=appVersion, build_id=build_id, locales=locales,
patch_types=["complete", "partial"], from_path=from_path,
release=appVersion,
build_id=build_id,
locales=locales,
patch_types=["complete", "partial"],
from_path=from_path,
ftp_server_from=self.config["previous_archive_prefix"],
ftp_server_to=self.config["archive_prefix"],
mar_channel_IDs=mar_channel_IDs, platform=update_platform,
updater_package=updater_package
mar_channel_IDs=mar_channel_IDs,
platform=update_platform,
updater_package=updater_package,
)
else:
if this_full_check_locales and is_triangualar(completes_only_index):
self.info("Generating full check configs for %s" % fromVersion)
self.update_verify_config.addRelease(
release=appVersion, build_id=build_id, locales=this_full_check_locales,
release=appVersion,
build_id=build_id,
locales=this_full_check_locales,
from_path=from_path,
ftp_server_from=self.config["previous_archive_prefix"],
ftp_server_to=self.config["archive_prefix"],
mar_channel_IDs=mar_channel_IDs, platform=update_platform,
updater_package=updater_package
mar_channel_IDs=mar_channel_IDs,
platform=update_platform,
updater_package=updater_package,
)
# Quick test for other locales, no download
if len(quick_check_locales) > 0:
@ -470,8 +600,10 @@ class UpdateVerifyConfigCreator(BaseScript):
# Excluding full check locales from the quick check
_locales = quick_check_locales
self.update_verify_config.addRelease(
release=appVersion, build_id=build_id,
locales=_locales, platform=update_platform
release=appVersion,
build_id=build_id,
locales=_locales,
platform=update_platform,
)
completes_only_index += 1

File diff suppressed because it is too large Load Diff

View File

@ -10,47 +10,25 @@ from talos.xtalos.etlparser import NAME_SUBSTITUTIONS
def test_NAME_SUBSTITUTIONS():
filepaths_map = {
# tp5n files
r'{talos}\talos\tests\tp5n\alibaba.com\i03.i.aliimg.com\images\eng\style\css_images':
r'{talos}\talos\tests\{tp5n_files}',
r'{talos}\talos\tests\tp5n\cnet.com\i.i.com.com\cnwk.1d\i\tron\fd':
r'{talos}\talos\tests\{tp5n_files}',
r'{talos}\talos\tests\tp5n\tp5n.manifest':
r'{talos}\talos\tests\{tp5n_files}',
r'{talos}\talos\tests\tp5n\tp5n.manifest.develop':
r'{talos}\talos\tests\{tp5n_files}',
r'{talos}\talos\tests\tp5n\yelp.com\media1.ct.yelpcdn.com\photo':
r'{talos}\talos\tests\{tp5n_files}',
r"{talos}\talos\tests\tp5n\alibaba.com\i03.i.aliimg.com\images\eng\style\css_images": r"{talos}\talos\tests\{tp5n_files}", # NOQA: E501
r"{talos}\talos\tests\tp5n\cnet.com\i.i.com.com\cnwk.1d\i\tron\fd": r"{talos}\talos\tests\{tp5n_files}", # NOQA: E501
r"{talos}\talos\tests\tp5n\tp5n.manifest": r"{talos}\talos\tests\{tp5n_files}",
r"{talos}\talos\tests\tp5n\tp5n.manifest.develop": r"{talos}\talos\tests\{tp5n_files}",
r"{talos}\talos\tests\tp5n\yelp.com\media1.ct.yelpcdn.com\photo": r"{talos}\talos\tests\{tp5n_files}", # NOQA: E501
# cltbld for Windows 7 32bit
r'c:\users\cltbld.t-w732-ix-015.000\appdata\locallow\mozilla':
r'c:\users\{cltbld}\appdata\locallow\mozilla',
r'c:\users\cltbld.t-w732-ix-035.000\appdata\locallow\mozilla':
r'c:\users\{cltbld}\appdata\locallow\mozilla',
r'c:\users\cltbld.t-w732-ix-058.000\appdata\locallow\mozilla':
r'c:\users\{cltbld}\appdata\locallow\mozilla',
r'c:\users\cltbld.t-w732-ix-112.001\appdata\local\temp':
r'c:\users\{cltbld}\appdata\local\temp',
r"c:\users\cltbld.t-w732-ix-015.000\appdata\locallow\mozilla": r"c:\users\{cltbld}\appdata\locallow\mozilla", # NOQA: E501
r"c:\users\cltbld.t-w732-ix-035.000\appdata\locallow\mozilla": r"c:\users\{cltbld}\appdata\locallow\mozilla", # NOQA: E501
r"c:\users\cltbld.t-w732-ix-058.000\appdata\locallow\mozilla": r"c:\users\{cltbld}\appdata\locallow\mozilla", # NOQA: E501
r"c:\users\cltbld.t-w732-ix-112.001\appdata\local\temp": r"c:\users\{cltbld}\appdata\local\temp", # NOQA: E501
# nvidia's 3D Vision
r'c:\program files\nvidia corporation\3d vision\npnv3dv.dll':
r'c:\program files\{nvidia_3d_vision}',
r'c:\program files\nvidia corporation\3d vision\npnv3dvstreaming.dll':
r'c:\program files\{nvidia_3d_vision}',
r'c:\program files\nvidia corporation\3d vision\nvstereoapii.dll':
r'c:\program files\{nvidia_3d_vision}',
r'{firefox}\browser\extensions\{45b6d270-f6ec-4930-a6ad-14bac5ea2204}.xpi':
r'{firefox}\browser\extensions\{uuid}.xpi',
r'c:\slave\test\build\venv\lib\site-packages\pip\_vendor\html5lib\treebuilders':
r'c:\slave\test\build\venv\lib\site-packages\{pip_vendor}',
r'c:\slave\test\build\venv\lib\site-packages\pip\_vendor\colorama':
r'c:\slave\test\build\venv\lib\site-packages\{pip_vendor}',
r'c:\slave\test\build\venv\lib\site-packages\pip\_vendor\cachecontrol\caches':
r'c:\slave\test\build\venv\lib\site-packages\{pip_vendor}',
r'c:\slave\test\build\venv\lib\site-packages\pip\_vendor\requests\packages\urllib3'
r'\packages\ssl_match_hostname':
r'c:\slave\test\build\venv\lib\site-packages\{pip_vendor}',
r"c:\program files\nvidia corporation\3d vision\npnv3dv.dll": r"c:\program files\{nvidia_3d_vision}", # NOQA: E501
r"c:\program files\nvidia corporation\3d vision\npnv3dvstreaming.dll": r"c:\program files\{nvidia_3d_vision}", # NOQA: E501
r"c:\program files\nvidia corporation\3d vision\nvstereoapii.dll": r"c:\program files\{nvidia_3d_vision}", # NOQA: E501
r"{firefox}\browser\extensions\{45b6d270-f6ec-4930-a6ad-14bac5ea2204}.xpi": r"{firefox}\browser\extensions\{uuid}.xpi", # NOQA: E501
r"c:\slave\test\build\venv\lib\site-packages\pip\_vendor\html5lib\treebuilders": r"c:\slave\test\build\venv\lib\site-packages\{pip_vendor}", # NOQA: E501
r"c:\slave\test\build\venv\lib\site-packages\pip\_vendor\colorama": r"c:\slave\test\build\venv\lib\site-packages\{pip_vendor}", # NOQA: E501
r"c:\slave\test\build\venv\lib\site-packages\pip\_vendor\cachecontrol\caches": r"c:\slave\test\build\venv\lib\site-packages\{pip_vendor}", # NOQA: E501
r"c:\slave\test\build\venv\lib\site-packages\pip\_vendor\requests\packages\urllib3\packages\ssl_match_hostname": r"c:\slave\test\build\venv\lib\site-packages\{pip_vendor}", # NOQA: E501
}
for given_raw_path, exp_normal_path in filepaths_map.items():
@ -60,5 +38,5 @@ def test_NAME_SUBSTITUTIONS():
assert exp_normal_path == normal_path
if __name__ == '__main__':
if __name__ == "__main__":
mozunit.main()

View File

@ -89,18 +89,14 @@ def data_cls_getter(output_node, visited_node):
def compile(stream, data_cls_getter=None, **kwargs):
return base.compile(Compiler,
stream,
data_cls_getter=data_cls_getter,
**kwargs)
return base.compile(Compiler, stream, data_cls_getter=data_cls_getter, **kwargs)
def get_manifest(manifest_path):
"""Get the ExpectedManifest for a particular manifest path"""
try:
with open(manifest_path) as f:
return compile(f,
data_cls_getter=data_cls_getter)
return compile(f, data_cls_getter=data_cls_getter)
except IOError:
return None
@ -125,20 +121,25 @@ class Differences(object):
modified = []
for item in self.modified:
if isinstance(item, TestModified):
modified.append(" %s\n %s\n%s" % (item[0], item[1], indent(str(item[2]), 4)))
modified.append(
" %s\n %s\n%s" % (item[0], item[1], indent(str(item[2]), 4))
)
else:
assert isinstance(item, ExpectedModified)
modified.append(" %s\n %s %s" % item)
return "Added:\n%s\nDeleted:\n%s\nModified:\n%s\n" % (
"\n".join(" %s:\n %s" % item for item in self.added),
"\n".join(" %s" % item for item in self.deleted),
"\n".join(modified))
"\n".join(modified),
)
TestModified = namedtuple("TestModified", ["test", "test_manifest", "differences"])
ExpectedModified = namedtuple("ExpectedModified", ["test", "ancestor_manifest", "new_manifest"])
ExpectedModified = namedtuple(
"ExpectedModified", ["test", "ancestor_manifest", "new_manifest"]
)
def compare_test(test, ancestor_manifest, new_manifest):
@ -147,8 +148,12 @@ def compare_test(test, ancestor_manifest, new_manifest):
compare_expected(changes, None, ancestor_manifest, new_manifest)
for subtest, ancestor_subtest_manifest in iteritems(ancestor_manifest.child_map):
compare_expected(changes, subtest, ancestor_subtest_manifest,
new_manifest.child_map.get(subtest))
compare_expected(
changes,
subtest,
ancestor_subtest_manifest,
new_manifest.child_map.get(subtest),
)
for subtest, subtest_manifest in iteritems(new_manifest.child_map):
if subtest not in ancestor_manifest.child_map:
@ -158,18 +163,32 @@ def compare_test(test, ancestor_manifest, new_manifest):
def compare_expected(changes, subtest, ancestor_manifest, new_manifest):
if (not (ancestor_manifest and ancestor_manifest.has_key("expected")) and # noqa W601
(new_manifest and new_manifest.has_key("expected"))): # noqa W601
changes.modified.append(ExpectedModified(subtest, ancestor_manifest, new_manifest))
elif (ancestor_manifest and ancestor_manifest.has_key("expected") and # noqa W601
not (new_manifest and new_manifest.has_key("expected"))): # noqa W601
if not (
ancestor_manifest and ancestor_manifest.has_key("expected") # noqa W601
) and (
new_manifest and new_manifest.has_key("expected") # noqa W601
):
changes.modified.append(
ExpectedModified(subtest, ancestor_manifest, new_manifest)
)
elif (
ancestor_manifest
and ancestor_manifest.has_key("expected") # noqa W601
and not (new_manifest and new_manifest.has_key("expected")) # noqa W601
):
changes.deleted.append(subtest)
elif (ancestor_manifest and ancestor_manifest.has_key("expected") and # noqa W601
new_manifest and new_manifest.has_key("expected")): # noqa W601
elif (
ancestor_manifest
and ancestor_manifest.has_key("expected") # noqa W601
and new_manifest
and new_manifest.has_key("expected") # noqa W601
):
old_expected = ancestor_manifest.get("expected")
new_expected = new_manifest.get("expected")
if expected_values_changed(old_expected, new_expected):
changes.modified.append(ExpectedModified(subtest, ancestor_manifest, new_manifest))
changes.modified.append(
ExpectedModified(subtest, ancestor_manifest, new_manifest)
)
def expected_values_changed(old_expected, new_expected):
@ -198,11 +217,11 @@ def record_changes(ancestor_manifest, new_manifest):
changes.added.append((test, test_manifest))
else:
ancestor_test_manifest = ancestor_manifest.child_map[test]
test_differences = compare_test(test,
ancestor_test_manifest,
test_manifest)
test_differences = compare_test(test, ancestor_test_manifest, test_manifest)
if test_differences:
changes.modified.append(TestModified(test, test_manifest, test_differences))
changes.modified.append(
TestModified(test, test_manifest, test_differences)
)
for test, test_manifest in iteritems(ancestor_manifest.child_map):
if test not in new_manifest.child_map:
@ -266,7 +285,9 @@ def run(ancestor, current, new, dest):
current_manifest = get_manifest(current)
new_manifest = get_manifest(new)
updated_current_str = make_changes(ancestor_manifest, current_manifest, new_manifest)
updated_current_str = make_changes(
ancestor_manifest, current_manifest, new_manifest
)
if dest != "-":
with open(dest, "wb") as f:

View File

@ -17,6 +17,7 @@ from . import shared_telemetry_utils as utils
from ctypes import c_int
from .shared_telemetry_utils import ParserError
from collections import OrderedDict
atexit.register(ParserError.exit_func)
# Constants.
@ -25,32 +26,33 @@ MAX_LABEL_COUNT = 100
MAX_KEY_COUNT = 30
MAX_KEY_LENGTH = 20
MIN_CATEGORICAL_BUCKET_COUNT = 50
CPP_IDENTIFIER_PATTERN = '^[a-z][a-z0-9_]+[a-z0-9]$'
CPP_IDENTIFIER_PATTERN = "^[a-z][a-z0-9_]+[a-z0-9]$"
ALWAYS_ALLOWED_KEYS = [
'kind',
'description',
'operating_systems',
'expires_in_version',
'alert_emails',
'keyed',
'releaseChannelCollection',
'bug_numbers',
'keys',
'record_in_processes',
'record_into_store',
'products',
"kind",
"description",
"operating_systems",
"expires_in_version",
"alert_emails",
"keyed",
"releaseChannelCollection",
"bug_numbers",
"keys",
"record_in_processes",
"record_into_store",
"products",
]
BASE_DOC_URL = ("https://firefox-source-docs.mozilla.org/toolkit/components/"
"telemetry/telemetry/")
HISTOGRAMS_DOC_URL = (BASE_DOC_URL + "collection/histograms.html")
SCALARS_DOC_URL = (BASE_DOC_URL + "collection/scalars.html")
BASE_DOC_URL = (
"https://firefox-source-docs.mozilla.org/toolkit/components/" "telemetry/telemetry/"
)
HISTOGRAMS_DOC_URL = BASE_DOC_URL + "collection/histograms.html"
SCALARS_DOC_URL = BASE_DOC_URL + "collection/scalars.html"
GECKOVIEW_STREAMING_SUPPORTED_KINDS = [
'linear',
'exponential',
'categorical',
"linear",
"exponential",
"categorical",
]
# parse_histograms.py is used by scripts from a mozilla-central build tree
@ -62,7 +64,7 @@ try:
import buildconfig
# Need to update sys.path to be able to find usecounters.
sys.path.append(os.path.join(buildconfig.topsrcdir, 'dom/base/'))
sys.path.append(os.path.join(buildconfig.topsrcdir, "dom/base/"))
except ImportError:
# Must be in an out-of-tree usage scenario. Trust that whoever is
# running this script knows we need the usecounters module and has
@ -110,18 +112,22 @@ def load_allowlist():
# the histogram-allowlists file lives in the root of the module. Account
# for that when looking for the allowlist.
# NOTE: if the parsers are moved, this logic will need to be updated.
telemetry_module_path = os.path.abspath(os.path.join(parsers_path, os.pardir, os.pardir))
allowlist_path = os.path.join(telemetry_module_path, 'histogram-allowlists.json')
with open(allowlist_path, 'r') as f:
telemetry_module_path = os.path.abspath(
os.path.join(parsers_path, os.pardir, os.pardir)
)
allowlist_path = os.path.join(
telemetry_module_path, "histogram-allowlists.json"
)
with open(allowlist_path, "r") as f:
try:
allowlists = json.load(f)
for name, allowlist in allowlists.items():
allowlists[name] = set(allowlist)
except ValueError:
ParserError('Error parsing allowlist: %s' % allowlist_path).handle_now()
ParserError("Error parsing allowlist: %s" % allowlist_path).handle_now()
except IOError:
allowlists = None
ParserError('Unable to parse allowlist: %s.' % allowlist_path).handle_now()
ParserError("Unable to parse allowlist: %s." % allowlist_path).handle_now()
class Histogram:
@ -129,32 +135,33 @@ class Histogram:
def __init__(self, name, definition, strict_type_checks=False):
"""Initialize a histogram named name with the given definition.
definition is a dict-like object that must contain at least the keys:
definition is a dict-like object that must contain at least the keys:
- 'kind': The kind of histogram. Must be one of 'boolean', 'flag',
'count', 'enumerated', 'linear', or 'exponential'.
- 'description': A textual description of the histogram.
- 'strict_type_checks': A boolean indicating whether to use the new, stricter type checks.
The server-side still has to deal with old, oddly typed submissions,
so we have to skip them there by default."""
- 'kind': The kind of histogram. Must be one of 'boolean', 'flag',
'count', 'enumerated', 'linear', or 'exponential'.
- 'description': A textual description of the histogram.
- 'strict_type_checks': A boolean indicating whether to use the new, stricter type checks.
The server-side still has to deal with old, oddly typed
submissions, so we have to skip them there by default.
"""
self._strict_type_checks = strict_type_checks
self._is_use_counter = name.startswith("USE_COUNTER2_")
if self._is_use_counter:
definition.setdefault('record_in_processes', ['main', 'content'])
definition.setdefault('releaseChannelCollection', 'opt-out')
definition.setdefault('products', ['firefox', 'fennec'])
definition.setdefault("record_in_processes", ["main", "content"])
definition.setdefault("releaseChannelCollection", "opt-out")
definition.setdefault("products", ["firefox", "fennec"])
self.verify_attributes(name, definition)
self._name = name
self._description = definition['description']
self._kind = definition['kind']
self._keys = definition.get('keys', [])
self._keyed = definition.get('keyed', False)
self._expiration = definition.get('expires_in_version')
self._labels = definition.get('labels', [])
self._record_in_processes = definition.get('record_in_processes')
self._record_into_store = definition.get('record_into_store', ['main'])
self._products = definition.get('products')
self._operating_systems = definition.get('operating_systems', ["all"])
self._description = definition["description"]
self._kind = definition["kind"]
self._keys = definition.get("keys", [])
self._keyed = definition.get("keyed", False)
self._expiration = definition.get("expires_in_version")
self._labels = definition.get("labels", [])
self._record_in_processes = definition.get("record_in_processes")
self._record_into_store = definition.get("record_into_store", ["main"])
self._products = definition.get("products")
self._operating_systems = definition.get("operating_systems", ["all"])
self.compute_bucket_parameters(definition)
self.set_nsITelemetry_kind()
@ -170,8 +177,8 @@ definition is a dict-like object that must contain at least the keys:
def kind(self):
"""Return the kind of the histogram.
Will be one of 'boolean', 'flag', 'count', 'enumerated', 'categorical', 'linear',
or 'exponential'."""
Will be one of 'boolean', 'flag', 'count', 'enumerated', 'categorical', 'linear',
or 'exponential'."""
return self._kind
def expiration(self):
@ -180,7 +187,7 @@ or 'exponential'."""
def nsITelemetry_kind(self):
"""Return the nsITelemetry constant corresponding to the kind of
the histogram."""
the histogram."""
return self._nsITelemetry_kind
def low(self):
@ -251,61 +258,65 @@ the histogram."""
def ranges(self):
"""Return an array of lower bounds for each bucket in the histogram."""
bucket_fns = {
'boolean': linear_buckets,
'flag': linear_buckets,
'count': linear_buckets,
'enumerated': linear_buckets,
'categorical': linear_buckets,
'linear': linear_buckets,
'exponential': exponential_buckets,
"boolean": linear_buckets,
"flag": linear_buckets,
"count": linear_buckets,
"enumerated": linear_buckets,
"categorical": linear_buckets,
"linear": linear_buckets,
"exponential": exponential_buckets,
}
if self._kind not in bucket_fns:
ParserError('Unknown kind "%s" for histogram "%s".' %
(self._kind, self._name)).handle_later()
ParserError(
'Unknown kind "%s" for histogram "%s".' % (self._kind, self._name)
).handle_later()
fn = bucket_fns[self._kind]
return fn(self.low(), self.high(), self.n_buckets())
def compute_bucket_parameters(self, definition):
bucket_fns = {
'boolean': Histogram.boolean_flag_bucket_parameters,
'flag': Histogram.boolean_flag_bucket_parameters,
'count': Histogram.boolean_flag_bucket_parameters,
'enumerated': Histogram.enumerated_bucket_parameters,
'categorical': Histogram.categorical_bucket_parameters,
'linear': Histogram.linear_bucket_parameters,
'exponential': Histogram.exponential_bucket_parameters,
"boolean": Histogram.boolean_flag_bucket_parameters,
"flag": Histogram.boolean_flag_bucket_parameters,
"count": Histogram.boolean_flag_bucket_parameters,
"enumerated": Histogram.enumerated_bucket_parameters,
"categorical": Histogram.categorical_bucket_parameters,
"linear": Histogram.linear_bucket_parameters,
"exponential": Histogram.exponential_bucket_parameters,
}
if self._kind not in bucket_fns:
ParserError('Unknown kind "%s" for histogram "%s".' %
(self._kind, self._name)).handle_later()
ParserError(
'Unknown kind "%s" for histogram "%s".' % (self._kind, self._name)
).handle_later()
fn = bucket_fns[self._kind]
self.set_bucket_parameters(*fn(definition))
def verify_attributes(self, name, definition):
global ALWAYS_ALLOWED_KEYS
general_keys = ALWAYS_ALLOWED_KEYS + ['low', 'high', 'n_buckets']
general_keys = ALWAYS_ALLOWED_KEYS + ["low", "high", "n_buckets"]
table = {
'boolean': ALWAYS_ALLOWED_KEYS,
'flag': ALWAYS_ALLOWED_KEYS,
'count': ALWAYS_ALLOWED_KEYS,
'enumerated': ALWAYS_ALLOWED_KEYS + ['n_values'],
'categorical': ALWAYS_ALLOWED_KEYS + ['labels', 'n_values'],
'linear': general_keys,
'exponential': general_keys,
"boolean": ALWAYS_ALLOWED_KEYS,
"flag": ALWAYS_ALLOWED_KEYS,
"count": ALWAYS_ALLOWED_KEYS,
"enumerated": ALWAYS_ALLOWED_KEYS + ["n_values"],
"categorical": ALWAYS_ALLOWED_KEYS + ["labels", "n_values"],
"linear": general_keys,
"exponential": general_keys,
}
# We removed extended_statistics_ok on the client, but the server-side,
# where _strict_type_checks==False, has to deal with historical data.
if not self._strict_type_checks:
table['exponential'].append('extended_statistics_ok')
table["exponential"].append("extended_statistics_ok")
kind = definition['kind']
kind = definition["kind"]
if kind not in table:
ParserError('Unknown kind "%s" for histogram "%s".' % (kind, name)).handle_later()
ParserError(
'Unknown kind "%s" for histogram "%s".' % (kind, name)
).handle_later()
allowed_keys = table[kind]
self.check_name(name)
@ -322,25 +333,29 @@ the histogram."""
self.check_record_into_store(name, definition)
def check_name(self, name):
if '#' in name:
ParserError('Error for histogram name "%s": "#" is not allowed.' %
(name)).handle_later()
if "#" in name:
ParserError(
'Error for histogram name "%s": "#" is not allowed.' % (name)
).handle_later()
# Avoid C++ identifier conflicts between histogram enums and label enum names.
if name.startswith("LABELS_"):
ParserError('Error for histogram name "%s": can not start with "LABELS_".' %
(name)).handle_later()
ParserError(
'Error for histogram name "%s": can not start with "LABELS_".' % (name)
).handle_later()
# To make it easier to generate C++ identifiers from this etc., we restrict
# the histogram names to a strict pattern.
# We skip this on the server to avoid failures with old Histogram.json revisions.
if self._strict_type_checks:
if not re.match(CPP_IDENTIFIER_PATTERN, name, re.IGNORECASE):
ParserError('Error for histogram name "%s": name does not conform to "%s"' %
(name, CPP_IDENTIFIER_PATTERN)).handle_later()
ParserError(
'Error for histogram name "%s": name does not conform to "%s"'
% (name, CPP_IDENTIFIER_PATTERN)
).handle_later()
def check_expiration(self, name, definition):
field = 'expires_in_version'
field = "expires_in_version"
expiration = definition.get(field)
if not expiration:
@ -348,97 +363,125 @@ the histogram."""
# We forbid new probes from using "expires_in_version" : "default" field/value pair.
# Old ones that use this are added to the allowlist.
if expiration == "default" and \
allowlists is not None and \
name not in allowlists['expiry_default']:
ParserError('New histogram "%s" cannot have "default" %s value.' %
(name, field)).handle_later()
if (
expiration == "default"
and allowlists is not None
and name not in allowlists["expiry_default"]
):
ParserError(
'New histogram "%s" cannot have "default" %s value.' % (name, field)
).handle_later()
# Historical editions of Histograms.json can have the deprecated
# expiration format 'N.Na1'. Fortunately, those scripts set
# self._strict_type_checks to false.
if expiration != "default" and \
not utils.validate_expiration_version(expiration) and \
self._strict_type_checks:
ParserError(('Error for histogram {} - invalid {}: {}.'
'\nSee: {}#expires-in-version')
.format(name, field, expiration, HISTOGRAMS_DOC_URL)).handle_later()
if (
expiration != "default"
and not utils.validate_expiration_version(expiration)
and self._strict_type_checks
):
ParserError(
(
"Error for histogram {} - invalid {}: {}."
"\nSee: {}#expires-in-version"
).format(name, field, expiration, HISTOGRAMS_DOC_URL)
).handle_later()
expiration = utils.add_expiration_postfix(expiration)
definition[field] = expiration
def check_label_values(self, name, definition):
labels = definition.get('labels')
labels = definition.get("labels")
if not labels:
return
invalid = filter(lambda l: len(l) > MAX_LABEL_LENGTH, labels)
if len(list(invalid)) > 0:
ParserError('Label values for "%s" exceed length limit of %d: %s' %
(name, MAX_LABEL_LENGTH, ', '.join(invalid))).handle_later()
ParserError(
'Label values for "%s" exceed length limit of %d: %s'
% (name, MAX_LABEL_LENGTH, ", ".join(invalid))
).handle_later()
if len(labels) > MAX_LABEL_COUNT:
ParserError('Label count for "%s" exceeds limit of %d' %
(name, MAX_LABEL_COUNT)).handle_now()
ParserError(
'Label count for "%s" exceeds limit of %d' % (name, MAX_LABEL_COUNT)
).handle_now()
# To make it easier to generate C++ identifiers from this etc., we restrict
# the label values to a strict pattern.
invalid = filter(lambda l: not re.match(CPP_IDENTIFIER_PATTERN, l, re.IGNORECASE), labels)
invalid = filter(
lambda l: not re.match(CPP_IDENTIFIER_PATTERN, l, re.IGNORECASE), labels
)
if len(list(invalid)) > 0:
ParserError('Label values for %s are not matching pattern "%s": %s' %
(name, CPP_IDENTIFIER_PATTERN, ', '.join(invalid))).handle_later()
ParserError(
'Label values for %s are not matching pattern "%s": %s'
% (name, CPP_IDENTIFIER_PATTERN, ", ".join(invalid))
).handle_later()
def check_record_in_processes(self, name, definition):
if not self._strict_type_checks:
return
field = 'record_in_processes'
field = "record_in_processes"
rip = definition.get(field)
DOC_URL = HISTOGRAMS_DOC_URL + "#record-in-processes"
if not rip:
ParserError('Histogram "%s" must have a "%s" field:\n%s'
% (name, field, DOC_URL)).handle_later()
ParserError(
'Histogram "%s" must have a "%s" field:\n%s' % (name, field, DOC_URL)
).handle_later()
for process in rip:
if not utils.is_valid_process_name(process):
ParserError('Histogram "%s" has unknown process "%s" in %s.\n%s' %
(name, process, field, DOC_URL)).handle_later()
ParserError(
'Histogram "%s" has unknown process "%s" in %s.\n%s'
% (name, process, field, DOC_URL)
).handle_later()
def check_products(self, name, definition):
if not self._strict_type_checks:
return
field = 'products'
field = "products"
products = definition.get(field)
DOC_URL = HISTOGRAMS_DOC_URL + "#products"
if not products:
ParserError('Histogram "%s" must have a "%s" field:\n%s'
% (name, field, DOC_URL)).handle_now()
ParserError(
'Histogram "%s" must have a "%s" field:\n%s' % (name, field, DOC_URL)
).handle_now()
for product in products:
if not utils.is_valid_product(product):
ParserError('Histogram "%s" has unknown product "%s" in %s.\n%s' %
(name, product, field, DOC_URL)).handle_later()
ParserError(
'Histogram "%s" has unknown product "%s" in %s.\n%s'
% (name, product, field, DOC_URL)
).handle_later()
if utils.is_geckoview_streaming_product(product):
kind = definition.get('kind')
kind = definition.get("kind")
if kind not in GECKOVIEW_STREAMING_SUPPORTED_KINDS:
ParserError(('Histogram "%s" is of kind "%s" which is unsupported for '
'product "%s".') % (name, kind, product)).handle_later()
keyed = definition.get('keyed')
ParserError(
(
'Histogram "%s" is of kind "%s" which is unsupported for '
'product "%s".'
)
% (name, kind, product)
).handle_later()
keyed = definition.get("keyed")
if keyed:
ParserError('Keyed histograms like "%s" are unsupported for product "%s"' %
(name, product)).handle_later()
ParserError(
'Keyed histograms like "%s" are unsupported for product "%s"'
% (name, product)
).handle_later()
def check_operating_systems(self, name, definition):
if not self._strict_type_checks:
return
field = 'operating_systems'
field = "operating_systems"
operating_systems = definition.get(field)
DOC_URL = HISTOGRAMS_DOC_URL + "#operating-systems"
@ -449,14 +492,16 @@ the histogram."""
for operating_system in operating_systems:
if not utils.is_valid_os(operating_system):
ParserError('Histogram "%s" has unknown operating system "%s" in %s.\n%s' %
(name, operating_system, field, DOC_URL)).handle_later()
ParserError(
'Histogram "%s" has unknown operating system "%s" in %s.\n%s'
% (name, operating_system, field, DOC_URL)
).handle_later()
def check_record_into_store(self, name, definition):
if not self._strict_type_checks:
return
field = 'record_into_store'
field = "record_into_store"
DOC_URL = HISTOGRAMS_DOC_URL + "#record-into-store"
if field not in definition:
@ -466,28 +511,36 @@ the histogram."""
record_into_store = definition.get(field)
# record_into_store should not be empty
if not record_into_store:
ParserError('Histogram "%s" has empty list of stores, which is not allowed.\n%s' %
(name, DOC_URL)).handle_later()
ParserError(
'Histogram "%s" has empty list of stores, which is not allowed.\n%s'
% (name, DOC_URL)
).handle_later()
def check_keys_field(self, name, definition):
keys = definition.get('keys')
keys = definition.get("keys")
if not self._strict_type_checks or keys is None:
return
if not definition.get('keyed', False):
raise ValueError("'keys' field is not valid for %s; only allowed for keyed histograms."
% (name))
if not definition.get("keyed", False):
raise ValueError(
"'keys' field is not valid for %s; only allowed for keyed histograms."
% (name)
)
if len(keys) == 0:
raise ValueError('The key list for %s cannot be empty' % (name))
raise ValueError("The key list for %s cannot be empty" % (name))
if len(keys) > MAX_KEY_COUNT:
raise ValueError('Label count for %s exceeds limit of %d' % (name, MAX_KEY_COUNT))
raise ValueError(
"Label count for %s exceeds limit of %d" % (name, MAX_KEY_COUNT)
)
invalid = filter(lambda k: len(k) > MAX_KEY_LENGTH, keys)
if len(list(invalid)) > 0:
raise ValueError('"keys" values for %s are exceeding length "%d": %s' %
(name, MAX_KEY_LENGTH, ', '.join(invalid)))
raise ValueError(
'"keys" values for %s are exceeding length "%d": %s'
% (name, MAX_KEY_LENGTH, ", ".join(invalid))
)
def check_allowlisted_kind(self, name, definition):
# We don't need to run any of these checks on the server.
@ -500,16 +553,22 @@ the histogram."""
hist_kind = definition.get("kind")
android_target = "android" in definition.get("operating_systems", [])
if not android_target and \
hist_kind in ["flag", "count"] and \
name not in allowlists["kind"]:
ParserError(('Unsupported kind "%s" for histogram "%s":\n'
'New "%s" histograms are not supported on Desktop, you should'
' use scalars instead:\n'
'%s\n'
'Are you trying to add a histogram on Android?'
' Add "operating_systems": ["android"] to your histogram definition.')
% (hist_kind, name, hist_kind, SCALARS_DOC_URL)).handle_now()
if (
not android_target
and hist_kind in ["flag", "count"]
and name not in allowlists["kind"]
):
ParserError(
(
'Unsupported kind "%s" for histogram "%s":\n'
'New "%s" histograms are not supported on Desktop, you should'
" use scalars instead:\n"
"%s\n"
"Are you trying to add a histogram on Android?"
' Add "operating_systems": ["android"] to your histogram definition.'
)
% (hist_kind, name, hist_kind, SCALARS_DOC_URL)
).handle_now()
# Check for the presence of fields that old histograms are allowlisted for.
def check_allowlistable_fields(self, name, definition):
@ -523,13 +582,16 @@ the histogram."""
if allowlists is None:
return
for field in ['alert_emails', 'bug_numbers']:
for field in ["alert_emails", "bug_numbers"]:
if field not in definition and name not in allowlists[field]:
ParserError('New histogram "%s" must have a "%s" field.' %
(name, field)).handle_later()
ParserError(
'New histogram "%s" must have a "%s" field.' % (name, field)
).handle_later()
if field in definition and name in allowlists[field]:
msg = 'Histogram "%s" should be removed from the allowlist for "%s" in ' \
'histogram-allowlists.json.'
msg = (
'Histogram "%s" should be removed from the allowlist for "%s" in '
"histogram-allowlists.json."
)
ParserError(msg % (name, field)).handle_later()
def check_field_types(self, name, definition):
@ -576,6 +638,7 @@ the histogram."""
return eval(v, {})
except Exception:
return v
for key in [k for k in coerce_fields if k in definition]:
definition[key] = try_to_coerce_to_number(definition[key])
# This handles old "keyed":"true" definitions (bug 1271986).
@ -591,46 +654,56 @@ the histogram."""
if key not in definition:
continue
if not isinstance(definition[key], key_type):
ParserError('Value for key "{0}" in histogram "{1}" should be {2}.'
.format(key, name, nice_type_name(key_type))).handle_later()
ParserError(
'Value for key "{0}" in histogram "{1}" should be {2}.'.format(
key, name, nice_type_name(key_type)
)
).handle_later()
# Make sure the max range is lower than or equal to INT_MAX
if "high" in definition and not c_int(definition["high"]).value > 0:
ParserError('Value for high in histogram "{0}" should be lower or equal to INT_MAX.'
.format(nice_type_name(c_int))).handle_later()
ParserError(
'Value for high in histogram "{0}" should be lower or equal to INT_MAX.'.format(
nice_type_name(c_int)
)
).handle_later()
for key, key_type in type_checked_list_fields.items():
if key not in definition:
continue
if not all(isinstance(x, key_type) for x in definition[key]):
ParserError('All values for list "{0}" in histogram "{1}" should be of type'
' {2}.'.format(key, name, nice_type_name(key_type))).handle_later()
ParserError(
'All values for list "{0}" in histogram "{1}" should be of type'
" {2}.".format(key, name, nice_type_name(key_type))
).handle_later()
def check_keys(self, name, definition, allowed_keys):
if not self._strict_type_checks:
return
for key in iter(definition.keys()):
if key not in allowed_keys:
ParserError('Key "%s" is not allowed for histogram "%s".' %
(key, name)).handle_later()
ParserError(
'Key "%s" is not allowed for histogram "%s".' % (key, name)
).handle_later()
def set_bucket_parameters(self, low, high, n_buckets):
self._low = low
self._high = high
self._n_buckets = n_buckets
max_n_buckets = 101 if self._kind in ['enumerated', 'categorical'] else 100
if (allowlists is not None
max_n_buckets = 101 if self._kind in ["enumerated", "categorical"] else 100
if (
allowlists is not None
and self._n_buckets > max_n_buckets
and type(self._n_buckets) is int):
if self._name not in allowlists['n_buckets']:
and type(self._n_buckets) is int
):
if self._name not in allowlists["n_buckets"]:
ParserError(
'New histogram "%s" is not permitted to have more than 100 buckets.\n'
'Histograms with large numbers of buckets use disproportionately high'
' amounts of resources. Contact a Telemetry peer (e.g. in #telemetry)'
' if you think an exception ought to be made:\n'
'https://wiki.mozilla.org/Modules/Toolkit#Telemetry'
% self._name
).handle_later()
"Histograms with large numbers of buckets use disproportionately high"
" amounts of resources. Contact a Telemetry peer (e.g. in #telemetry)"
" if you think an exception ought to be made:\n"
"https://wiki.mozilla.org/Modules/Toolkit#Telemetry" % self._name
).handle_later()
@staticmethod
def boolean_flag_bucket_parameters(definition):
@ -638,13 +711,11 @@ the histogram."""
@staticmethod
def linear_bucket_parameters(definition):
return (definition.get('low', 1),
definition['high'],
definition['n_buckets'])
return (definition.get("low", 1), definition["high"], definition["n_buckets"])
@staticmethod
def enumerated_bucket_parameters(definition):
n_values = definition['n_values']
n_values = definition["n_values"]
return (1, n_values, n_values + 1)
@staticmethod
@ -653,45 +724,48 @@ the histogram."""
# Otherwise when adding labels later we run into problems with the pipeline not
# supporting bucket changes.
# This can be overridden using the n_values field.
n_values = max(len(definition['labels']),
definition.get('n_values', 0),
MIN_CATEGORICAL_BUCKET_COUNT)
n_values = max(
len(definition["labels"]),
definition.get("n_values", 0),
MIN_CATEGORICAL_BUCKET_COUNT,
)
return (1, n_values, n_values + 1)
@staticmethod
def exponential_bucket_parameters(definition):
return (definition.get('low', 1),
definition['high'],
definition['n_buckets'])
return (definition.get("low", 1), definition["high"], definition["n_buckets"])
def set_nsITelemetry_kind(self):
# Pick a Telemetry implementation type.
types = {
'boolean': 'BOOLEAN',
'flag': 'FLAG',
'count': 'COUNT',
'enumerated': 'LINEAR',
'categorical': 'CATEGORICAL',
'linear': 'LINEAR',
'exponential': 'EXPONENTIAL',
"boolean": "BOOLEAN",
"flag": "FLAG",
"count": "COUNT",
"enumerated": "LINEAR",
"categorical": "CATEGORICAL",
"linear": "LINEAR",
"exponential": "EXPONENTIAL",
}
if self._kind not in types:
ParserError('Unknown kind "%s" for histogram "%s".' %
(self._kind, self._name)).handle_later()
ParserError(
'Unknown kind "%s" for histogram "%s".' % (self._kind, self._name)
).handle_later()
self._nsITelemetry_kind = "nsITelemetry::HISTOGRAM_%s" % types[self._kind]
def set_dataset(self, definition):
datasets = {
'opt-in': 'DATASET_PRERELEASE_CHANNELS',
'opt-out': 'DATASET_ALL_CHANNELS'
"opt-in": "DATASET_PRERELEASE_CHANNELS",
"opt-out": "DATASET_ALL_CHANNELS",
}
value = definition.get('releaseChannelCollection', 'opt-in')
value = definition.get("releaseChannelCollection", "opt-in")
if value not in datasets:
ParserError('Unknown value for releaseChannelCollection'
' policy for histogram "%s".' % self._name).handle_later()
ParserError(
"Unknown value for releaseChannelCollection"
' policy for histogram "%s".' % self._name
).handle_later()
self._dataset = "nsITelemetry::" + datasets[value]
@ -702,7 +776,9 @@ def load_histograms_into_dict(ordered_pairs, strict_type_checks):
d = collections.OrderedDict()
for key, value in ordered_pairs:
if strict_type_checks and key in d:
ParserError("Found duplicate key in Histograms file: %s" % key).handle_later()
ParserError(
"Found duplicate key in Histograms file: %s" % key
).handle_later()
d[key] = value
return d
@ -712,13 +788,17 @@ def load_histograms_into_dict(ordered_pairs, strict_type_checks):
# routine to parse that file, and return a dictionary mapping histogram
# names to histogram parameters.
def from_json(filename, strict_type_checks):
with open(filename, 'r') as f:
with open(filename, "r") as f:
try:
def hook(ps):
return load_histograms_into_dict(ps, strict_type_checks)
histograms = json.load(f, object_pairs_hook=hook)
except ValueError as e:
ParserError("error parsing histograms in %s: %s" % (filename, e.message)).handle_now()
ParserError(
"error parsing histograms in %s: %s" % (filename, e.message)
).handle_now()
return histograms
@ -731,10 +811,10 @@ def from_UseCountersWorker_conf(filename, strict_type_checks):
def from_nsDeprecatedOperationList(filename, strict_type_checks):
operation_regex = re.compile('^DEPRECATED_OPERATION\\(([^)]+)\\)')
operation_regex = re.compile("^DEPRECATED_OPERATION\\(([^)]+)\\)")
histograms = collections.OrderedDict()
with open(filename, 'r') as f:
with open(filename, "r") as f:
for line in f:
match = operation_regex.search(line)
if not match:
@ -743,35 +823,42 @@ def from_nsDeprecatedOperationList(filename, strict_type_checks):
op = match.group(1)
def add_counter(context):
name = 'USE_COUNTER2_DEPRECATED_%s_%s' % (op, context.upper())
name = "USE_COUNTER2_DEPRECATED_%s_%s" % (op, context.upper())
histograms[name] = {
'expires_in_version': 'never',
'kind': 'boolean',
'description': 'Whether a %s used %s' % (context, op)
"expires_in_version": "never",
"kind": "boolean",
"description": "Whether a %s used %s" % (context, op),
}
add_counter('document')
add_counter('page')
add_counter("document")
add_counter("page")
return histograms
def to_camel_case(property_name):
return re.sub("(^|_|-)([a-z0-9])",
lambda m: m.group(2).upper(),
property_name.strip("_").strip("-"))
return re.sub(
"(^|_|-)([a-z0-9])",
lambda m: m.group(2).upper(),
property_name.strip("_").strip("-"),
)
def add_css_property_counters(histograms, property_name):
def add_counter(context):
name = 'USE_COUNTER2_CSS_PROPERTY_%s_%s' % (to_camel_case(property_name), context.upper())
name = "USE_COUNTER2_CSS_PROPERTY_%s_%s" % (
to_camel_case(property_name),
context.upper(),
)
histograms[name] = {
'expires_in_version': 'never',
'kind': 'boolean',
'description': 'Whether a %s used the CSS property %s' % (context, property_name)
"expires_in_version": "never",
"kind": "boolean",
"description": "Whether a %s used the CSS property %s"
% (context, property_name),
}
add_counter('document')
add_counter('page')
add_counter("document")
add_counter("page")
def from_ServoCSSPropList(filename, strict_type_checks):
@ -799,7 +886,7 @@ def from_counted_unknown_properties(filename, strict_type_checks):
# This is only used for probe-scraper.
def from_properties_db(filename, strict_type_checks):
histograms = collections.OrderedDict()
with open(filename, 'r') as f:
with open(filename, "r") as f:
in_css_properties = False
for line in f:
@ -811,20 +898,28 @@ def from_properties_db(filename, strict_type_checks):
if line.startswith("};"):
break
if not line.startswith(" \""):
if not line.startswith(' "'):
continue
name = line.split("\"")[1]
name = line.split('"')[1]
add_css_property_counters(histograms, name)
return histograms
FILENAME_PARSERS = [
(lambda x: from_json if x.endswith('.json') else None),
(lambda x: from_nsDeprecatedOperationList if x == 'nsDeprecatedOperationList.h' else None),
(lambda x: from_ServoCSSPropList if x == 'ServoCSSPropList.py' else None),
(lambda x: from_counted_unknown_properties if x == 'counted_unknown_properties.py' else None),
(lambda x: from_properties_db if x == 'properties-db.js' else None),
(lambda x: from_json if x.endswith(".json") else None),
(
lambda x: from_nsDeprecatedOperationList
if x == "nsDeprecatedOperationList.h"
else None
),
(lambda x: from_ServoCSSPropList if x == "ServoCSSPropList.py" else None),
(
lambda x: from_counted_unknown_properties
if x == "counted_unknown_properties.py"
else None
),
(lambda x: from_properties_db if x == "properties-db.js" else None),
]
# Similarly to the dance above with buildconfig, usecounters may not be
@ -832,16 +927,19 @@ FILENAME_PARSERS = [
try:
import usecounters
FILENAME_PARSERS.append(lambda x: from_UseCounters_conf if x == 'UseCounters.conf' else None)
FILENAME_PARSERS.append(
lambda x: from_UseCountersWorker_conf if x == 'UseCountersWorker.conf' else None)
lambda x: from_UseCounters_conf if x == "UseCounters.conf" else None
)
FILENAME_PARSERS.append(
lambda x: from_UseCountersWorker_conf if x == "UseCountersWorker.conf" else None
)
except ImportError:
pass
def from_files(filenames, strict_type_checks=True):
"""Return an iterator that provides a sequence of Histograms for
the histograms defined in filenames.
the histograms defined in filenames.
"""
if strict_type_checks:
load_allowlist()
@ -878,19 +976,24 @@ the histograms defined in filenames.
upper_bound = indices[-1][0]
n_counters = upper_bound - lower_bound + 1
if n_counters != len(indices):
ParserError("Histograms %s must be defined in a contiguous block." %
name).handle_later()
ParserError(
"Histograms %s must be defined in a contiguous block." % name
).handle_later()
# We require that all USE_COUNTER2_*_WORKER histograms be defined in a contiguous
# block.
check_continuity(all_histograms,
lambda x: x[1].startswith("USE_COUNTER2_") and x[1].endswith("_WORKER"),
"use counter worker")
check_continuity(
all_histograms,
lambda x: x[1].startswith("USE_COUNTER2_") and x[1].endswith("_WORKER"),
"use counter worker",
)
# And all other USE_COUNTER2_* histograms be defined in a contiguous
# block.
check_continuity(all_histograms,
lambda x: x[1].startswith("USE_COUNTER2_") and not x[1].endswith("_WORKER"),
"use counter")
check_continuity(
all_histograms,
lambda x: x[1].startswith("USE_COUNTER2_") and not x[1].endswith("_WORKER"),
"use counter",
)
# Check that histograms that were removed from Histograms.json etc.
# are also removed from the allowlists.
@ -898,9 +1001,11 @@ the histograms defined in filenames.
all_allowlist_entries = itertools.chain.from_iterable(iter(allowlists.values()))
orphaned = set(all_allowlist_entries) - set(all_histograms.keys())
if len(orphaned) > 0:
msg = 'The following entries are orphaned and should be removed from ' \
'histogram-allowlists.json:\n%s'
ParserError(msg % (', '.join(sorted(orphaned)))).handle_later()
msg = (
"The following entries are orphaned and should be removed from "
"histogram-allowlists.json:\n%s"
)
ParserError(msg % (", ".join(sorted(orphaned)))).handle_later()
for (name, definition) in all_histograms.items():
yield Histogram(name, definition, strict_type_checks=strict_type_checks)

View File

@ -2,7 +2,7 @@
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
r'''Make it easy to install and run [browsertime](https://github.com/sitespeedio/browsertime).
r"""Make it easy to install and run [browsertime](https://github.com/sitespeedio/browsertime).
Browsertime is a harness for running performance tests, similar to
Mozilla's Raptor testing framework. Browsertime is written in Node.js
@ -26,7 +26,7 @@ To invoke browsertime, run
./mach browsertime [ARGS]
```
All arguments are passed through to browsertime.
'''
"""
from __future__ import absolute_import, print_function, unicode_literals
@ -65,93 +65,88 @@ def silence():
def node_path():
from mozbuild.nodeutil import find_node_executable
node, _ = find_node_executable()
return os.path.abspath(node)
def package_path():
'''The path to the `browsertime` directory.
"""The path to the `browsertime` directory.
Override the default with the `BROWSERTIME` environment variable.'''
override = os.environ.get('BROWSERTIME', None)
Override the default with the `BROWSERTIME` environment variable."""
override = os.environ.get("BROWSERTIME", None)
if override:
return override
return mozpath.join(BROWSERTIME_ROOT, 'node_modules', 'browsertime')
return mozpath.join(BROWSERTIME_ROOT, "node_modules", "browsertime")
def browsertime_path():
'''The path to the `browsertime.js` script.'''
"""The path to the `browsertime.js` script."""
# On Windows, invoking `node_modules/.bin/browsertime{.cmd}`
# doesn't work when invoked as an argument to our specific
# binary. Since we want our version of node, invoke the
# actual script directly.
return mozpath.join(
package_path(),
'bin',
'browsertime.js')
return mozpath.join(package_path(), "bin", "browsertime.js")
def visualmetrics_path():
'''The path to the `visualmetrics.py` script.'''
return mozpath.join(
package_path(),
'browsertime',
'visualmetrics.py')
"""The path to the `visualmetrics.py` script."""
return mozpath.join(package_path(), "browsertime", "visualmetrics.py")
def host_platform():
is_64bits = sys.maxsize > 2**32
is_64bits = sys.maxsize > 2 ** 32
if sys.platform.startswith('win'):
if sys.platform.startswith("win"):
if is_64bits:
return 'win64'
elif sys.platform.startswith('linux'):
return "win64"
elif sys.platform.startswith("linux"):
if is_64bits:
return 'linux64'
elif sys.platform.startswith('darwin'):
return 'darwin'
return "linux64"
elif sys.platform.startswith("darwin"):
return "darwin"
raise ValueError('sys.platform is not yet supported: {}'.format(sys.platform))
raise ValueError("sys.platform is not yet supported: {}".format(sys.platform))
# Map from `host_platform()` to a `fetch`-like syntax.
host_fetches = {
'darwin': {
'ffmpeg': {
'type': 'static-url',
'url': 'https://github.com/ncalexan/geckodriver/releases/download/v0.24.0-android/ffmpeg-4.1.1-macos64-static.zip', # noqa
"darwin": {
"ffmpeg": {
"type": "static-url",
"url": "https://github.com/ncalexan/geckodriver/releases/download/v0.24.0-android/ffmpeg-4.1.1-macos64-static.zip", # noqa
# An extension to `fetch` syntax.
'path': 'ffmpeg-4.1.1-macos64-static',
"path": "ffmpeg-4.1.1-macos64-static",
},
},
'linux64': {
'ffmpeg': {
'type': 'static-url',
'url': 'https://github.com/ncalexan/geckodriver/releases/download/v0.24.0-android/ffmpeg-4.1.4-i686-static.tar.xz', # noqa
"linux64": {
"ffmpeg": {
"type": "static-url",
"url": "https://github.com/ncalexan/geckodriver/releases/download/v0.24.0-android/ffmpeg-4.1.4-i686-static.tar.xz", # noqa
# An extension to `fetch` syntax.
'path': 'ffmpeg-4.1.4-i686-static',
"path": "ffmpeg-4.1.4-i686-static",
},
# TODO: install a static ImageMagick. All easily available binaries are
# not statically linked, so they will (mostly) fail at runtime due to
# missing dependencies. For now we require folks to install ImageMagick
# globally with their package manager of choice.
},
'win64': {
'ffmpeg': {
'type': 'static-url',
'url': 'https://github.com/ncalexan/geckodriver/releases/download/v0.24.0-android/ffmpeg-4.1.1-win64-static.zip', # noqa
"win64": {
"ffmpeg": {
"type": "static-url",
"url": "https://github.com/ncalexan/geckodriver/releases/download/v0.24.0-android/ffmpeg-4.1.1-win64-static.zip", # noqa
# An extension to `fetch` syntax.
'path': 'ffmpeg-4.1.1-win64-static',
"path": "ffmpeg-4.1.1-win64-static",
},
'ImageMagick': {
'type': 'static-url',
"ImageMagick": {
"type": "static-url",
# 'url': 'https://imagemagick.org/download/binaries/ImageMagick-7.0.8-39-portable-Q16-x64.zip', # noqa
# imagemagick.org doesn't keep old versions; the mirror below does.
'url': 'https://ftp.icm.edu.pl/packages/ImageMagick/binaries/ImageMagick-7.0.8-39-portable-Q16-x64.zip', # noqa
"url": "https://ftp.icm.edu.pl/packages/ImageMagick/binaries/ImageMagick-7.0.8-39-portable-Q16-x64.zip", # noqa
# An extension to `fetch` syntax.
'path': 'ImageMagick-7.0.8',
"path": "ImageMagick-7.0.8",
},
},
}
@ -161,23 +156,23 @@ host_fetches = {
class MachBrowsertime(MachCommandBase):
@property
def artifact_cache_path(self):
r'''Downloaded artifacts will be kept here.'''
r"""Downloaded artifacts will be kept here."""
# The convention is $MOZBUILD_STATE_PATH/cache/$FEATURE.
return mozpath.join(self._mach_context.state_dir, 'cache', 'browsertime')
return mozpath.join(self._mach_context.state_dir, "cache", "browsertime")
@property
def state_path(self):
r'''Unpacked artifacts will be kept here.'''
r"""Unpacked artifacts will be kept here."""
# The convention is $MOZBUILD_STATE_PATH/$FEATURE.
return mozpath.join(self._mach_context.state_dir, 'browsertime')
return mozpath.join(self._mach_context.state_dir, "browsertime")
def setup_prerequisites(self):
r'''Install browsertime and visualmetrics.py prerequisites.'''
r"""Install browsertime and visualmetrics.py prerequisites."""
from mozbuild.action.tooltool import unpack_file
from mozbuild.artifact_cache import ArtifactCache
if not AUTOMATION and host_platform().startswith('linux'):
if not AUTOMATION and host_platform().startswith("linux"):
# On Linux ImageMagick needs to be installed manually, and `mach bootstrap` doesn't
# do that (yet). Provide some guidance.
try:
@ -185,49 +180,53 @@ class MachBrowsertime(MachCommandBase):
except ImportError:
from shutil_which import which
im_programs = ('compare', 'convert', 'mogrify')
im_programs = ("compare", "convert", "mogrify")
for im_program in im_programs:
prog = which(im_program)
if not prog:
print('Error: On Linux, ImageMagick must be on the PATH. '
'Install ImageMagick manually and try again (or update PATH). '
'On Ubuntu and Debian, try `sudo apt-get install imagemagick`. '
'On Fedora, try `sudo dnf install imagemagick`. '
'On CentOS, try `sudo yum install imagemagick`.')
print(
"Error: On Linux, ImageMagick must be on the PATH. "
"Install ImageMagick manually and try again (or update PATH). "
"On Ubuntu and Debian, try `sudo apt-get install imagemagick`. "
"On Fedora, try `sudo dnf install imagemagick`. "
"On CentOS, try `sudo yum install imagemagick`."
)
return 1
# Download the visualmetrics.py requirements.
artifact_cache = ArtifactCache(self.artifact_cache_path,
log=self.log, skip_cache=False)
artifact_cache = ArtifactCache(
self.artifact_cache_path, log=self.log, skip_cache=False
)
fetches = host_fetches[host_platform()]
for tool, fetch in sorted(fetches.items()):
archive = artifact_cache.fetch(fetch['url'])
archive = artifact_cache.fetch(fetch["url"])
# TODO: assert type, verify sha256 (and size?).
if fetch.get('unpack', True):
if fetch.get("unpack", True):
cwd = os.getcwd()
try:
mkdir(self.state_path)
os.chdir(self.state_path)
self.log(
logging.INFO,
'browsertime',
{'path': archive},
'Unpacking temporary location {path}')
"browsertime",
{"path": archive},
"Unpacking temporary location {path}",
)
if 'win64' in host_platform() and 'imagemagick' in tool.lower():
if "win64" in host_platform() and "imagemagick" in tool.lower():
# Windows archive does not contain a subfolder
# so we make one for it here
mkdir(fetch.get('path'))
os.chdir(os.path.join(self.state_path, fetch.get('path')))
mkdir(fetch.get("path"))
os.chdir(os.path.join(self.state_path, fetch.get("path")))
unpack_file(archive)
os.chdir(self.state_path)
else:
unpack_file(archive)
# Make sure the expected path exists after extraction
path = os.path.join(self.state_path, fetch.get('path'))
path = os.path.join(self.state_path, fetch.get("path"))
if not os.path.exists(path):
raise Exception("Cannot find an extracted directory: %s" % path)
@ -245,42 +244,51 @@ class MachBrowsertime(MachCommandBase):
os.chmod(loc_to_change, st.st_mode | stat.S_IEXEC)
except Exception as e:
raise Exception(
"Could not set executable bit in %s, error: %s" % (path, str(e))
"Could not set executable bit in %s, error: %s"
% (path, str(e))
)
finally:
os.chdir(cwd)
def setup(self, should_clobber=False, new_upstream_url=''):
r'''Install browsertime and visualmetrics.py prerequisites and the Node.js package.'''
def setup(self, should_clobber=False, new_upstream_url=""):
r"""Install browsertime and visualmetrics.py prerequisites and the Node.js package."""
sys.path.append(mozpath.join(self.topsrcdir, 'tools', 'lint', 'eslint'))
sys.path.append(mozpath.join(self.topsrcdir, "tools", "lint", "eslint"))
import setup_helper
if not new_upstream_url:
self.setup_prerequisites()
if new_upstream_url:
package_json_path = os.path.join(BROWSERTIME_ROOT, 'package.json')
package_json_path = os.path.join(BROWSERTIME_ROOT, "package.json")
self.log(
logging.INFO,
'browsertime',
{'new_upstream_url': new_upstream_url, 'package_json_path': package_json_path},
'Updating browsertime node module version in {package_json_path} '
'to {new_upstream_url}')
"browsertime",
{
"new_upstream_url": new_upstream_url,
"package_json_path": package_json_path,
},
"Updating browsertime node module version in {package_json_path} "
"to {new_upstream_url}",
)
if not re.search('/tarball/[a-f0-9]{40}$', new_upstream_url):
raise ValueError("New upstream URL does not end with /tarball/[a-f0-9]{40}: '%s'"
% new_upstream_url)
if not re.search("/tarball/[a-f0-9]{40}$", new_upstream_url):
raise ValueError(
"New upstream URL does not end with /tarball/[a-f0-9]{40}: '%s'"
% new_upstream_url
)
with open(package_json_path) as f:
existing_body = json.loads(f.read(), object_pairs_hook=collections.OrderedDict)
existing_body = json.loads(
f.read(), object_pairs_hook=collections.OrderedDict
)
existing_body['devDependencies']['browsertime'] = new_upstream_url
existing_body["devDependencies"]["browsertime"] = new_upstream_url
updated_body = json.dumps(existing_body)
with open(package_json_path, 'w') as f:
with open(package_json_path, "w") as f:
f.write(updated_body)
# Install the browsertime Node.js requirements.
@ -297,15 +305,17 @@ class MachBrowsertime(MachCommandBase):
self.log(
logging.INFO,
'browsertime',
{'package_json': mozpath.join(BROWSERTIME_ROOT, 'package.json')},
'Installing browsertime node module from {package_json}')
"browsertime",
{"package_json": mozpath.join(BROWSERTIME_ROOT, "package.json")},
"Installing browsertime node module from {package_json}",
)
status = setup_helper.package_setup(
BROWSERTIME_ROOT,
'browsertime',
should_update=new_upstream_url != '',
"browsertime",
should_update=new_upstream_url != "",
should_clobber=should_clobber,
no_optional=new_upstream_url or AUTOMATION)
no_optional=new_upstream_url or AUTOMATION,
)
if status:
return status
@ -316,13 +326,14 @@ class MachBrowsertime(MachCommandBase):
return self.check()
def node(self, args):
r'''Invoke node (interactively) with the given arguments.'''
r"""Invoke node (interactively) with the given arguments."""
return self.run_process(
[node_path()] + args,
append_env=self.append_env(),
pass_thru=True, # Allow user to run Node interactively.
ensure_exit_code=False, # Don't throw on non-zero exit code.
cwd=mozpath.join(self.topsrcdir))
cwd=mozpath.join(self.topsrcdir),
)
def append_env(self, append_path=True):
fetches = host_fetches[host_platform()]
@ -331,22 +342,30 @@ class MachBrowsertime(MachCommandBase):
# {`convert`,`compare`,`mogrify`} are found. The `visualmetrics.py`
# script doesn't take these as configuration, so we do this (for now).
# We should update the script itself to accept this configuration.
path = os.environ.get('PATH', '').split(os.pathsep) if append_path else []
path_to_ffmpeg = mozpath.join(
self.state_path,
fetches['ffmpeg']['path'])
path = os.environ.get("PATH", "").split(os.pathsep) if append_path else []
path_to_ffmpeg = mozpath.join(self.state_path, fetches["ffmpeg"]["path"])
path_to_imagemagick = None
if 'ImageMagick' in fetches:
if "ImageMagick" in fetches:
path_to_imagemagick = mozpath.join(
self.state_path,
fetches['ImageMagick']['path'])
self.state_path, fetches["ImageMagick"]["path"]
)
if path_to_imagemagick:
# ImageMagick ships ffmpeg (on Windows, at least) so we
# want to ensure that our ffmpeg goes first, just in case.
path.insert(0, self.state_path if host_platform().startswith('win') else mozpath.join(path_to_imagemagick, 'bin')) # noqa
path.insert(0, path_to_ffmpeg if host_platform().startswith('linux') else mozpath.join(path_to_ffmpeg, 'bin')) # noqa
path.insert(
0,
self.state_path
if host_platform().startswith("win")
else mozpath.join(path_to_imagemagick, "bin"),
) # noqa
path.insert(
0,
path_to_ffmpeg
if host_platform().startswith("linux")
else mozpath.join(path_to_ffmpeg, "bin"),
) # noqa
# Ensure that bare `node` and `npm` in scripts, including post-install
# scripts, finds the binary we're invoking with. Without this, it's
@ -358,7 +377,7 @@ class MachBrowsertime(MachCommandBase):
# On windows, we need to add the ImageMagick directory to the path
# otherwise compare won't be found, and the built-in OS convert
# method will be used instead of the ImageMagick one.
if 'win64' in host_platform() and path_to_imagemagick:
if "win64" in host_platform() and path_to_imagemagick:
# Bug 1596237 - In the windows ImageMagick distribution, the ffmpeg
# binary is directly located in the root directory, so here we
# insert in the 3rd position to avoid taking precedence over ffmpeg
@ -379,8 +398,7 @@ class MachBrowsertime(MachCommandBase):
path.append(p)
append_env = {
'PATH': os.pathsep.join(path),
"PATH": os.pathsep.join(path),
# Bug 1560193: The JS library browsertime uses to execute commands
# (execa) will muck up the PATH variable and put the directory that
# node is in first in path. If this is globally-installed node,
@ -389,36 +407,41 @@ class MachBrowsertime(MachCommandBase):
#
# Our fork of browsertime supports a `PYTHON` environment variable
# that points to the exact python executable to use.
'PYTHON': self.virtualenv_manager.python_path,
"PYTHON": self.virtualenv_manager.python_path,
}
if path_to_imagemagick:
append_env.update({
# See https://imagemagick.org/script/download.php. Harmless on other platforms.
'LD_LIBRARY_PATH': mozpath.join(path_to_imagemagick, 'lib'),
'DYLD_LIBRARY_PATH': mozpath.join(path_to_imagemagick, 'lib'),
'MAGICK_HOME': path_to_imagemagick,
})
append_env.update(
{
# See https://imagemagick.org/script/download.php. Harmless on other
# platforms.
"LD_LIBRARY_PATH": mozpath.join(path_to_imagemagick, "lib"),
"DYLD_LIBRARY_PATH": mozpath.join(path_to_imagemagick, "lib"),
"MAGICK_HOME": path_to_imagemagick,
}
)
return append_env
def _need_install(self, package):
from pip._internal.req.constructors import install_req_from_line
req = install_req_from_line(package)
req.check_if_exists(use_user_site=False)
if req.satisfied_by is None:
return True
venv_site_lib = os.path.abspath(os.path.join(self.virtualenv_manager.bin_path, "..",
"lib"))
venv_site_lib = os.path.abspath(
os.path.join(self.virtualenv_manager.bin_path, "..", "lib")
)
site_packages = os.path.abspath(req.satisfied_by.location)
return not site_packages.startswith(venv_site_lib)
def activate_virtualenv(self, *args, **kwargs):
r'''Activates virtualenv.
r"""Activates virtualenv.
This function will also install Pillow and pyssim if needed.
It will raise an error in case the install failed.
'''
"""
MachCommandBase.activate_virtualenv(self, *args, **kwargs)
# installing Python deps on the fly
@ -427,19 +450,20 @@ class MachBrowsertime(MachCommandBase):
self.virtualenv_manager._run_pip(["install", dep])
def check(self):
r'''Run `visualmetrics.py --check`.'''
r"""Run `visualmetrics.py --check`."""
self.activate_virtualenv()
args = ['--check']
args = ["--check"]
status = self.run_process(
[self.virtualenv_manager.python_path, visualmetrics_path()] + args,
# For --check, don't allow user's path to interfere with
# path testing except on Linux, where ImageMagick needs to
# be installed manually.
append_env=self.append_env(append_path=host_platform().startswith('linux')),
append_env=self.append_env(append_path=host_platform().startswith("linux")),
pass_thru=True,
ensure_exit_code=False, # Don't throw on non-zero exit code.
cwd=mozpath.join(self.topsrcdir))
cwd=mozpath.join(self.topsrcdir),
)
sys.stdout.flush()
sys.stderr.flush()
@ -449,12 +473,12 @@ class MachBrowsertime(MachCommandBase):
# Avoid logging the command (and, on Windows, the environment).
self.log_manager.terminal_handler.setLevel(logging.CRITICAL)
print('browsertime version:', end=' ')
print("browsertime version:", end=" ")
sys.stdout.flush()
sys.stderr.flush()
return self.node([browsertime_path()] + ['--version'])
return self.node([browsertime_path()] + ["--version"])
def extra_default_args(self, args=[]):
# Add Mozilla-specific default arguments. This is tricky because browsertime is quite
@ -462,72 +486,83 @@ class MachBrowsertime(MachCommandBase):
# difficult to interpret type errors.
def extract_browser_name(args):
'Extracts the browser name if any'
"Extracts the browser name if any"
# These are BT arguments, it's BT job to check them
# here we just want to extract the browser name
res = re.findall("(--browser|-b)[= ]([\w]+)", ' '.join(args))
res = re.findall("(--browser|-b)[= ]([\w]+)", " ".join(args))
if res == []:
return None
return res[0][-1]
def matches(args, *flags):
'Return True if any argument matches any of the given flags (maybe with an argument).'
"Return True if any argument matches any of the given flags (maybe with an argument)."
for flag in flags:
if flag in args or any(arg.startswith(flag + '=') for arg in args):
if flag in args or any(arg.startswith(flag + "=") for arg in args):
return True
return False
extra_args = []
# Default to Firefox. Override with `-b ...` or `--browser=...`.
specifies_browser = matches(args, '-b', '--browser')
specifies_browser = matches(args, "-b", "--browser")
if not specifies_browser:
extra_args.extend(('-b', 'firefox'))
extra_args.extend(("-b", "firefox"))
# Default to not collect HAR. Override with `--skipHar=false`.
specifies_har = matches(args, '--har', '--skipHar', '--gzipHar')
specifies_har = matches(args, "--har", "--skipHar", "--gzipHar")
if not specifies_har:
extra_args.append('--skipHar')
extra_args.append("--skipHar")
if not matches(args, "--android"):
# If --firefox.binaryPath is not specified, default to the objdir binary
# Note: --firefox.release is not a real browsertime option, but it will
# silently ignore it instead and default to a release installation.
specifies_binaryPath = matches(args, '--firefox.binaryPath',
'--firefox.release', '--firefox.nightly',
'--firefox.beta', '--firefox.developer')
specifies_binaryPath = matches(
args,
"--firefox.binaryPath",
"--firefox.release",
"--firefox.nightly",
"--firefox.beta",
"--firefox.developer",
)
if not specifies_binaryPath:
specifies_binaryPath = extract_browser_name(args) == 'chrome'
specifies_binaryPath = extract_browser_name(args) == "chrome"
if not specifies_binaryPath:
try:
extra_args.extend(('--firefox.binaryPath', self.get_binary_path()))
extra_args.extend(("--firefox.binaryPath", self.get_binary_path()))
except BinaryNotFoundException as e:
self.log(logging.ERROR,
'browsertime',
{'error': str(e)},
'ERROR: {error}')
self.log(logging.INFO,
'browsertime',
{},
'Please run |./mach build| '
'or specify a Firefox binary with --firefox.binaryPath.')
self.log(
logging.ERROR,
"browsertime",
{"error": str(e)},
"ERROR: {error}",
)
self.log(
logging.INFO,
"browsertime",
{},
"Please run |./mach build| "
"or specify a Firefox binary with --firefox.binaryPath.",
)
return 1
if extra_args:
self.log(
logging.DEBUG,
'browsertime',
{'extra_args': extra_args},
'Running browsertime with extra default arguments: {extra_args}')
"browsertime",
{"extra_args": extra_args},
"Running browsertime with extra default arguments: {extra_args}",
)
return extra_args
def _verify_node_install(self):
# check if Node is installed
sys.path.append(mozpath.join(self.topsrcdir, 'tools', 'lint', 'eslint'))
sys.path.append(mozpath.join(self.topsrcdir, "tools", "lint", "eslint"))
import setup_helper
with silence():
node_valid = setup_helper.check_node_executables_valid()
if not node_valid:
@ -543,23 +578,45 @@ class MachBrowsertime(MachCommandBase):
return True
@Command('browsertime', category='testing',
description='Run [browsertime](https://github.com/sitespeedio/browsertime) '
'performance tests.')
@CommandArgument('--verbose', action='store_true',
help='Verbose output for what commands the build is running.')
@CommandArgument('--update-upstream-url', default='')
@CommandArgument('--setup', default=False, action='store_true')
@CommandArgument('--clobber', default=False, action='store_true')
@CommandArgument('--skip-cache', default=False, action='store_true',
help='Skip all local caches to force re-fetching remote artifacts.')
@CommandArgument('--check', default=False, action='store_true')
@CommandArgument('--browsertime-help', default=False, action='store_true',
help='Show the browsertime help message.')
@CommandArgument('args', nargs=argparse.REMAINDER)
def browsertime(self, args, verbose=False,
update_upstream_url='', setup=False, clobber=False,
skip_cache=False, check=False, browsertime_help=False):
@Command(
"browsertime",
category="testing",
description="Run [browsertime](https://github.com/sitespeedio/browsertime) "
"performance tests.",
)
@CommandArgument(
"--verbose",
action="store_true",
help="Verbose output for what commands the build is running.",
)
@CommandArgument("--update-upstream-url", default="")
@CommandArgument("--setup", default=False, action="store_true")
@CommandArgument("--clobber", default=False, action="store_true")
@CommandArgument(
"--skip-cache",
default=False,
action="store_true",
help="Skip all local caches to force re-fetching remote artifacts.",
)
@CommandArgument("--check", default=False, action="store_true")
@CommandArgument(
"--browsertime-help",
default=False,
action="store_true",
help="Show the browsertime help message.",
)
@CommandArgument("args", nargs=argparse.REMAINDER)
def browsertime(
self,
args,
verbose=False,
update_upstream_url="",
setup=False,
clobber=False,
skip_cache=False,
check=False,
browsertime_help=False,
):
self._set_log_level(verbose)
if update_upstream_url:
@ -574,7 +631,7 @@ class MachBrowsertime(MachCommandBase):
return self.check()
if browsertime_help:
args.append('--help')
args.append("--help")
self.activate_virtualenv()
default_args = self.extra_default_args(args)
@ -582,10 +639,9 @@ class MachBrowsertime(MachCommandBase):
return 1
return self.node([browsertime_path()] + default_args + args)
@Command('visualmetrics', category='testing',
description='Run visualmetrics.py')
@CommandArgument('video')
@CommandArgument('args', nargs=argparse.REMAINDER)
@Command("visualmetrics", category="testing", description="Run visualmetrics.py")
@CommandArgument("video")
@CommandArgument("args", nargs=argparse.REMAINDER)
def visualmetrics(self, video, args):
self._set_log_level(True)
self.activate_virtualenv()
@ -595,24 +651,27 @@ class MachBrowsertime(MachCommandBase):
index, _ = os.path.splitext(base)
# TODO: write a '--logfile' as well.
args = ['--dir', # Images are written to `/path/to/video/images` (following browsertime).
mozpath.join(d, 'images', index),
'--video',
video,
'--orange',
'--perceptual',
'--contentful',
'--force',
'--renderignore',
'5',
'--json',
'--viewport',
'-q',
'75',
'-vvvv']
args = [
"--dir", # Images are written to `/path/to/video/images` (following browsertime).
mozpath.join(d, "images", index),
"--video",
video,
"--orange",
"--perceptual",
"--contentful",
"--force",
"--renderignore",
"5",
"--json",
"--viewport",
"-q",
"75",
"-vvvv",
]
return self.run_process(
[visualmetrics_path()] + args,
append_env=self.append_env(),
pass_thru=True,
ensure_exit_code=False, # Don't throw on non-zero exit code.
cwd=mozpath.join(self.topsrcdir))
cwd=mozpath.join(self.topsrcdir),
)

View File

@ -2,45 +2,11 @@
black:
description: Reformat python
exclude:
- browser/components/migration/tests/marionette/test_refresh_firefox.py
- build/pgo/genpgocert.py
- config/check_macroassembler_style.py
- gfx/harfbuzz/src/meson.build
- js/src/devtools/rootAnalysis/t/testlib.py
- js/src/util/make_unicode.py
- layout/style/ServoCSSPropList.mako.py
- mobile/android/mach_commands.py
- python/mozbuild/mozbuild/mach_commands.py
- python/mozbuild/mozbuild/telemetry.py
- python/mozbuild/mozbuild/test/backend/test_build.py
- python/mozbuild/mozbuild/test/backend/test_recursivemake.py
- python/mozbuild/mozbuild/test/codecoverage/test_lcov_rewrite.py
- python/mozbuild/mozbuild/test/frontend/data/reader-error-syntax/moz.build
- taskcluster/taskgraph/transforms/bouncer_aliases.py
- taskcluster/taskgraph/transforms/mar_signing.py
- taskcluster/taskgraph/transforms/repackage_signing_partner.py
- testing/addtest.py
- testing/mochitest/mochitest_options.py
- testing/mozbase/mozpower/tests/test_macintelpower.py
- testing/mozharness/mozharness/mozilla/building/buildbase.py
- testing/mozharness/mozharness/mozilla/testing/errors.py
- testing/mozharness/mozharness/mozilla/testing/raptor.py
- testing/mozharness/configs/test/test_malformed.py
- testing/mozharness/mozharness/mozilla/testing/testbase.py
- testing/mozharness/scripts/desktop_unittest.py
- testing/mozharness/scripts/marionette.py
- testing/mozharness/scripts/release/bouncer_check.py
- testing/mozharness/scripts/release/update-verify-config-creator.py
- testing/talos/talos/test.py
- testing/talos/talos/unittests/test_xtalos.py
- testing/web-platform/metamerge.py
- testing/web-platform/tests
- toolkit/components/telemetry/build_scripts/mozparsers/parse_histograms.py
- tools/browsertime/mach_commands.py
- tools/power/mach_commands.py
- tools/tryselect/selectors/coverage.py
- tools/update-packaging/test_make_incremental_updates.py
- xpcom/components/gen_static_components.py
extensions:
- build
- configure

View File

@ -16,28 +16,38 @@ from mozbuild.base import MachCommandBase
def is_osx_10_10_or_greater(cls):
import platform
release = platform.mac_ver()[0]
return release and StrictVersion(release) >= StrictVersion('10.10')
return release and StrictVersion(release) >= StrictVersion("10.10")
@CommandProvider
class MachCommands(MachCommandBase):
'''
"""
Get system power consumption and related measurements.
'''
@Command('power', category='misc',
conditions=[is_osx_10_10_or_greater],
description='Get system power consumption and related measurements for '
'all running browsers. Available only on Mac OS X 10.10 and above. '
'Requires root access.')
@CommandArgument('-i', '--interval', type=int, default=30000,
help='The sample period, measured in milliseconds. Defaults to 30000.')
"""
@Command(
"power",
category="misc",
conditions=[is_osx_10_10_or_greater],
description="Get system power consumption and related measurements for "
"all running browsers. Available only on Mac OS X 10.10 and above. "
"Requires root access.",
)
@CommandArgument(
"-i",
"--interval",
type=int,
default=30000,
help="The sample period, measured in milliseconds. Defaults to 30000.",
)
def power(self, interval):
import os
import re
import subprocess
rapl = os.path.join(self.topobjdir, 'dist', 'bin', 'rapl')
rapl = os.path.join(self.topobjdir, "dist", "bin", "rapl")
interval = str(interval)
@ -46,23 +56,31 @@ class MachCommands(MachCommandBase):
# doesn't start measuring while |powermetrics| is waiting for the root
# password to be entered.
try:
subprocess.check_call(['sudo', 'true'])
subprocess.check_call(["sudo", "true"])
except Exception:
print('\nsudo failed; aborting')
print("\nsudo failed; aborting")
return 1
# This runs rapl in the background because nothing in this script
# depends on the output. This is good because we want |rapl| and
# |powermetrics| to run at the same time.
subprocess.Popen([rapl, '-n', '1', '-i', interval])
subprocess.Popen([rapl, "-n", "1", "-i", interval])
lines = subprocess.check_output(['sudo', 'powermetrics',
'--samplers', 'tasks',
'--show-process-coalition',
'--show-process-gpu',
'-n', '1',
'-i', interval],
universal_newlines=True)
lines = subprocess.check_output(
[
"sudo",
"powermetrics",
"--samplers",
"tasks",
"--show-process-coalition",
"--show-process-gpu",
"-n",
"1",
"-i",
interval,
],
universal_newlines=True,
)
# When run with --show-process-coalition, |powermetrics| groups outputs
# into process coalitions, each of which has a leader.
@ -131,7 +149,10 @@ class MachCommands(MachCommandBase):
#
# - 'kernel' is for the kernel.
#
if re.search(r'(^Name|firefox|plugin-container|Safari\b|WebKit|Chrome|Terminal|WindowServer|kernel)', line): # NOQA: E501
if re.search(
r"(^Name|firefox|plugin-container|Safari\b|WebKit|Chrome|Terminal|WindowServer|kernel)", # NOQA: E501
line,
):
print(line)
return 0

View File

@ -39,34 +39,36 @@ def setup_globals():
build = MozbuildObject.from_environment(cwd=here)
vcs = get_repository_object(build.topsrcdir)
root_hash = hashlib.sha256(six.ensure_binary(os.path.abspath(build.topsrcdir))).hexdigest()
cache_dir = os.path.join(get_state_dir(), 'cache', root_hash, 'chunk_mapping')
root_hash = hashlib.sha256(
six.ensure_binary(os.path.abspath(build.topsrcdir))
).hexdigest()
cache_dir = os.path.join(get_state_dir(), "cache", root_hash, "chunk_mapping")
if not os.path.isdir(cache_dir):
os.makedirs(cache_dir)
CHUNK_MAPPING_FILE = os.path.join(cache_dir, 'chunk_mapping.sqlite')
CHUNK_MAPPING_TAG_FILE = os.path.join(cache_dir, 'chunk_mapping_tag.json')
CHUNK_MAPPING_FILE = os.path.join(cache_dir, "chunk_mapping.sqlite")
CHUNK_MAPPING_TAG_FILE = os.path.join(cache_dir, "chunk_mapping_tag.json")
# Maps from platform names in the chunk_mapping sqlite database to respective
# substrings in task names.
PLATFORM_MAP = {
'linux': 'test-linux64/opt',
'windows': 'test-windows10-64/opt',
"linux": "test-linux64/opt",
"windows": "test-windows10-64/opt",
}
# List of platform/build type combinations that are included in pushes by |mach try coverage|.
OPT_TASK_PATTERNS = [
'macosx64/opt',
'windows10-64/opt',
'windows7-32/opt',
'linux64/opt',
"macosx64/opt",
"windows10-64/opt",
"windows7-32/opt",
"linux64/opt",
]
class CoverageParser(BaseTryParser):
name = 'coverage'
name = "coverage"
arguments = []
common_groups = ['push', 'task']
common_groups = ["push", "task"]
task_configs = [
"artifact",
"env",
@ -78,41 +80,42 @@ class CoverageParser(BaseTryParser):
def read_test_manifests():
'''Uses TestResolver to read all test manifests in the tree.
"""Uses TestResolver to read all test manifests in the tree.
Returns a (tests, support_files_map) tuple that describes the tests in the tree:
tests - a set of test file paths
support_files_map - a dict that maps from each support file to a list with
test files that require them it
'''
"""
test_resolver = TestResolver.from_environment(cwd=here)
file_finder = FileFinder(build.topsrcdir)
support_files_map = collections.defaultdict(list)
tests = set()
for test in test_resolver.resolve_tests(build.topsrcdir):
tests.add(test['srcdir_relpath'])
if 'support-files' not in test:
tests.add(test["srcdir_relpath"])
if "support-files" not in test:
continue
for support_file_pattern in test['support-files'].split():
for support_file_pattern in test["support-files"].split():
# Get the pattern relative to topsrcdir.
if support_file_pattern.startswith('!/'):
if support_file_pattern.startswith("!/"):
support_file_pattern = support_file_pattern[2:]
elif support_file_pattern.startswith('/'):
elif support_file_pattern.startswith("/"):
support_file_pattern = support_file_pattern[1:]
else:
support_file_pattern = os.path.normpath(os.path.join(test['dir_relpath'],
support_file_pattern))
support_file_pattern = os.path.normpath(
os.path.join(test["dir_relpath"], support_file_pattern)
)
# If it doesn't have a glob, then it's a single file.
if '*' not in support_file_pattern:
if "*" not in support_file_pattern:
# Simple case: single support file, just add it here.
support_files_map[support_file_pattern].append(test['srcdir_relpath'])
support_files_map[support_file_pattern].append(test["srcdir_relpath"])
continue
for support_file, _ in file_finder.find(support_file_pattern):
support_files_map[support_file].append(test['srcdir_relpath'])
support_files_map[support_file].append(test["srcdir_relpath"])
return tests, support_files_map
@ -123,78 +126,93 @@ all_tests, all_support_files = read_test_manifests()
def download_coverage_mapping(base_revision):
try:
with open(CHUNK_MAPPING_TAG_FILE, 'r') as f:
with open(CHUNK_MAPPING_TAG_FILE, "r") as f:
tags = json.load(f)
if tags['target_revision'] == base_revision:
if tags["target_revision"] == base_revision:
return
else:
print('Base revision changed.')
print("Base revision changed.")
except (IOError, ValueError):
print('Chunk mapping file not found.')
print("Chunk mapping file not found.")
CHUNK_MAPPING_URL_TEMPLATE = 'https://firefox-ci-tc.services.mozilla.com/api/index/v1/task/project.relman.code-coverage.production.cron.{}/artifacts/public/chunk_mapping.tar.xz' # noqa
JSON_PUSHES_URL_TEMPLATE = 'https://hg.mozilla.org/mozilla-central/json-pushes?version=2&tipsonly=1&startdate={}' # noqa
CHUNK_MAPPING_URL_TEMPLATE = "https://firefox-ci-tc.services.mozilla.com/api/index/v1/task/project.relman.code-coverage.production.cron.{}/artifacts/public/chunk_mapping.tar.xz" # noqa
JSON_PUSHES_URL_TEMPLATE = "https://hg.mozilla.org/mozilla-central/json-pushes?version=2&tipsonly=1&startdate={}" # noqa
# Get pushes from at most one month ago.
PUSH_HISTORY_DAYS = 30
delta = datetime.timedelta(days=PUSH_HISTORY_DAYS)
start_time = (datetime.datetime.now() - delta).strftime('%Y-%m-%d')
start_time = (datetime.datetime.now() - delta).strftime("%Y-%m-%d")
pushes_url = JSON_PUSHES_URL_TEMPLATE.format(start_time)
pushes_data = requests.get(pushes_url + '&tochange={}'.format(base_revision)).json()
if 'error' in pushes_data:
if 'unknown revision' in pushes_data['error']:
print('unknown revision {}, trying with latest mozilla-central'.format(base_revision))
pushes_data = requests.get(pushes_url + "&tochange={}".format(base_revision)).json()
if "error" in pushes_data:
if "unknown revision" in pushes_data["error"]:
print(
"unknown revision {}, trying with latest mozilla-central".format(
base_revision
)
)
pushes_data = requests.get(pushes_url).json()
if 'error' in pushes_data:
raise Exception(pushes_data['error'])
if "error" in pushes_data:
raise Exception(pushes_data["error"])
pushes = pushes_data['pushes']
pushes = pushes_data["pushes"]
print('Looking for coverage data. This might take a minute or two.')
print('Base revision:', base_revision)
print("Looking for coverage data. This might take a minute or two.")
print("Base revision:", base_revision)
for push_id in sorted(pushes.keys())[::-1]:
rev = pushes[push_id]['changesets'][0]
rev = pushes[push_id]["changesets"][0]
url = CHUNK_MAPPING_URL_TEMPLATE.format(rev)
print('push id: {},\trevision: {}'.format(push_id, rev))
print("push id: {},\trevision: {}".format(push_id, rev))
r = requests.head(url)
if not r.ok:
continue
print('Chunk mapping found, downloading...')
print("Chunk mapping found, downloading...")
r = requests.get(url, stream=True)
CHUNK_MAPPING_ARCHIVE = os.path.join(build.topsrcdir, 'chunk_mapping.tar.xz')
with open(CHUNK_MAPPING_ARCHIVE, 'wb') as f:
CHUNK_MAPPING_ARCHIVE = os.path.join(build.topsrcdir, "chunk_mapping.tar.xz")
with open(CHUNK_MAPPING_ARCHIVE, "wb") as f:
r.raw.decode_content = True
shutil.copyfileobj(r.raw, f)
subprocess.check_call(['tar', '-xJf', CHUNK_MAPPING_ARCHIVE,
'-C', os.path.dirname(CHUNK_MAPPING_FILE)])
subprocess.check_call(
[
"tar",
"-xJf",
CHUNK_MAPPING_ARCHIVE,
"-C",
os.path.dirname(CHUNK_MAPPING_FILE),
]
)
os.remove(CHUNK_MAPPING_ARCHIVE)
assert os.path.isfile(CHUNK_MAPPING_FILE)
with open(CHUNK_MAPPING_TAG_FILE, 'w') as f:
json.dump({'target_revision': base_revision,
'chunk_mapping_revision': rev,
'download_date': start_time},
f)
with open(CHUNK_MAPPING_TAG_FILE, "w") as f:
json.dump(
{
"target_revision": base_revision,
"chunk_mapping_revision": rev,
"download_date": start_time,
},
f,
)
return
raise Exception('Could not find suitable coverage data.')
raise Exception("Could not find suitable coverage data.")
def is_a_test(cursor, path):
'''Checks the all_tests global and the chunk mapping database to see if a
"""Checks the all_tests global and the chunk mapping database to see if a
given file is a test file.
'''
"""
if path in all_tests:
return True
cursor.execute('SELECT COUNT(*) from chunk_to_test WHERE path=?', (path,))
cursor.execute("SELECT COUNT(*) from chunk_to_test WHERE path=?", (path,))
if cursor.fetchone()[0]:
return True
cursor.execute('SELECT COUNT(*) from file_to_test WHERE test=?', (path,))
cursor.execute("SELECT COUNT(*) from file_to_test WHERE test=?", (path,))
if cursor.fetchone()[0]:
return True
@ -202,37 +220,34 @@ def is_a_test(cursor, path):
def tests_covering_file(cursor, path):
'''Returns a set of tests that cover a given source file.
'''
cursor.execute('SELECT test FROM file_to_test WHERE source=?', (path,))
"""Returns a set of tests that cover a given source file."""
cursor.execute("SELECT test FROM file_to_test WHERE source=?", (path,))
return set(e[0] for e in cursor.fetchall())
def tests_in_chunk(cursor, platform, chunk):
'''Returns a set of tests that are contained in a given chunk.
'''
cursor.execute('SELECT path FROM chunk_to_test WHERE platform=? AND chunk=?',
(platform, chunk))
"""Returns a set of tests that are contained in a given chunk."""
cursor.execute(
"SELECT path FROM chunk_to_test WHERE platform=? AND chunk=?", (platform, chunk)
)
# Because of bug 1480103, some entries in this table contain both a file name and a test name,
# separated by a space. With the split, only the file name is kept.
return set(e[0].split(' ')[0] for e in cursor.fetchall())
return set(e[0].split(" ")[0] for e in cursor.fetchall())
def chunks_covering_file(cursor, path):
'''Returns a set of (platform, chunk) tuples with the chunks that cover a given source file.
'''
cursor.execute('SELECT platform, chunk FROM file_to_chunk WHERE path=?', (path,))
"""Returns a set of (platform, chunk) tuples with the chunks that cover a given source file."""
cursor.execute("SELECT platform, chunk FROM file_to_chunk WHERE path=?", (path,))
return set(cursor.fetchall())
def tests_supported_by_file(path):
'''Returns a set of tests that are using the given file as a support-file.
'''
"""Returns a set of tests that are using the given file as a support-file."""
return set(all_support_files[path])
def find_tests(changed_files):
'''Finds both individual tests and test chunks that should be run to test code changes.
"""Finds both individual tests and test chunks that should be run to test code changes.
Argument: a list of file paths relative to the source checkout.
Returns: a (test_files, test_chunks) tuple with two sets.
@ -240,7 +255,7 @@ def find_tests(changed_files):
test_chunks - contains (platform, chunk) tuples with chunks that should be
run. These chunnks do not support running a subset of the tests (like
cppunit or gtest), so the whole chunk must be run.
'''
"""
test_files = set()
test_chunks = set()
files_no_coverage = set()
@ -273,7 +288,7 @@ def find_tests(changed_files):
files_no_coverage.add(path)
files_covered = set(changed_files) - files_no_coverage
test_files = set(s.replace('\\', '/') for s in test_files)
test_files = set(s.replace("\\", "/") for s in test_files)
_print_found_tests(files_covered, files_no_coverage, test_files, test_chunks)
@ -284,7 +299,7 @@ def find_tests(changed_files):
tests = tests_in_chunk(c, platform, chunk)
if tests:
for test in tests:
test_files.add(test.replace('\\', '/'))
test_files.add(test.replace("\\", "/"))
else:
remaining_test_chunks.add((platform, chunk))
@ -292,44 +307,50 @@ def find_tests(changed_files):
def _print_found_tests(files_covered, files_no_coverage, test_files, test_chunks):
'''Print a summary of what will be run to the user's terminal.
'''
"""Print a summary of what will be run to the user's terminal."""
files_covered = sorted(files_covered)
files_no_coverage = sorted(files_no_coverage)
test_files = sorted(test_files)
test_chunks = sorted(test_chunks)
if files_covered:
print('Found {} modified source files with test coverage:'.format(len(files_covered)))
print(
"Found {} modified source files with test coverage:".format(
len(files_covered)
)
)
for covered in files_covered:
print('\t', covered)
print("\t", covered)
if files_no_coverage:
print('Found {} modified source files with no coverage:'.format(len(files_no_coverage)))
print(
"Found {} modified source files with no coverage:".format(
len(files_no_coverage)
)
)
for f in files_no_coverage:
print('\t', f)
print("\t", f)
if not files_covered:
print('No modified source files are covered by tests.')
print("No modified source files are covered by tests.")
elif not files_no_coverage:
print('All modified source files are covered by tests.')
print("All modified source files are covered by tests.")
if test_files:
print('Running {} individual test files.'.format(len(test_files)))
print("Running {} individual test files.".format(len(test_files)))
else:
print('Could not find any individual tests to run.')
print("Could not find any individual tests to run.")
if test_chunks:
print('Running {} test chunks.'.format(len(test_chunks)))
print("Running {} test chunks.".format(len(test_chunks)))
for platform, chunk in test_chunks:
print('\t', platform, chunk)
print("\t", platform, chunk)
else:
print('Could not find any test chunks to run.')
print("Could not find any test chunks to run.")
def filter_tasks_by_chunks(tasks, chunks):
'''Find all tasks that will run the given chunks.
'''
"""Find all tasks that will run the given chunks."""
selected_tasks = set()
for platform, chunk in chunks:
platform = PLATFORM_MAP[platform]
@ -339,14 +360,20 @@ def filter_tasks_by_chunks(tasks, chunks):
if not task.startswith(platform):
continue
if not any(task[len(platform) + 1:].endswith(c) for c in [chunk, chunk + '-e10s']):
if not any(
task[len(platform) + 1 :].endswith(c) for c in [chunk, chunk + "-e10s"]
):
continue
assert selected_task is None, 'Only one task should be selected for a given platform-chunk couple ({} - {}), {} and {} were selected'.format(platform, chunk, selected_task, task) # noqa
assert (
selected_task is None
), "Only one task should be selected for a given platform-chunk couple ({} - {}), {} and {} were selected".format( # noqa
platform, chunk, selected_task, task
)
selected_task = task
if selected_task is None:
print('Warning: no task found for chunk', platform, chunk)
print("Warning: no task found for chunk", platform, chunk)
else:
selected_tasks.add(selected_task)
@ -354,20 +381,27 @@ def filter_tasks_by_chunks(tasks, chunks):
def is_opt_task(task):
'''True if the task runs on a supported platform and build type combination.
"""True if the task runs on a supported platform and build type combination.
This is used to remove -ccov/asan/pgo tasks, along with all /debug tasks.
'''
"""
return any(platform in task for platform in OPT_TASK_PATTERNS)
def run(try_config={}, full=False, parameters=None, push=True, message='{msg}', closed_tree=False):
def run(
try_config={},
full=False,
parameters=None,
push=True,
message="{msg}",
closed_tree=False,
):
setup_globals()
download_coverage_mapping(vcs.base_ref)
changed_sources = vcs.get_outgoing_files()
test_files, test_chunks = find_tests(changed_sources)
if not test_files and not test_chunks:
print('ERROR Could not find any tests or chunks to run.')
print("ERROR Could not find any tests or chunks to run.")
return 1
tg = generate_tasks(parameters, full)
@ -379,25 +413,35 @@ def run(try_config={}, full=False, parameters=None, push=True, message='{msg}',
tasks = list(tasks)
if not tasks:
print('ERROR Did not find any matching tasks after filtering.')
print("ERROR Did not find any matching tasks after filtering.")
return 1
test_count_message = ('{test_count} test file{test_plural} that ' +
'cover{test_singular} these changes ' +
'({task_count} task{task_plural} to be scheduled)').format(
test_count_message = (
"{test_count} test file{test_plural} that "
+ "cover{test_singular} these changes "
+ "({task_count} task{task_plural} to be scheduled)"
).format(
test_count=len(test_files),
test_plural='' if len(test_files) == 1 else 's',
test_singular='s' if len(test_files) == 1 else '',
test_plural="" if len(test_files) == 1 else "s",
test_singular="s" if len(test_files) == 1 else "",
task_count=len(tasks),
task_plural='' if len(tasks) == 1 else 's')
print('Found ' + test_count_message)
task_plural="" if len(tasks) == 1 else "s",
)
print("Found " + test_count_message)
# Set the test paths to be run by setting MOZHARNESS_TEST_PATHS.
path_env = {'MOZHARNESS_TEST_PATHS': six.ensure_text(
json.dumps(resolve_tests_by_suite(test_files)))}
try_config.setdefault('env', {}).update(path_env)
path_env = {
"MOZHARNESS_TEST_PATHS": six.ensure_text(
json.dumps(resolve_tests_by_suite(test_files))
)
}
try_config.setdefault("env", {}).update(path_env)
# Build commit message.
msg = 'try coverage - ' + test_count_message
return push_to_try('coverage', message.format(msg=msg),
try_task_config=generate_try_task_config('coverage', tasks, try_config),
push=push, closed_tree=closed_tree)
msg = "try coverage - " + test_count_message
return push_to_try(
"coverage",
message.format(msg=msg),
try_task_config=generate_try_task_config("coverage", tasks, try_config),
push=push,
closed_tree=closed_tree,
)

View File

@ -11,11 +11,16 @@ from make_incremental_updates import PatchInfo, MarFileEntry
class TestPatchInfo(unittest.TestCase):
def setUp(self):
self.work_dir = 'work_dir'
self.file_exclusion_list = ['update.manifest', 'updatev2.manifest', 'updatev3.manifest']
self.path_exclusion_list = ['/readme.txt']
self.work_dir = "work_dir"
self.file_exclusion_list = [
"update.manifest",
"updatev2.manifest",
"updatev3.manifest",
]
self.path_exclusion_list = ["/readme.txt"]
self.patch_info = PatchInfo(
self.work_dir, self.file_exclusion_list, self.path_exclusion_list)
self.work_dir, self.file_exclusion_list, self.path_exclusion_list
)
def testPatchInfo(self):
self.assertEquals(self.work_dir, self.patch_info.work_dir)
@ -26,51 +31,68 @@ class TestPatchInfo(unittest.TestCase):
self.assertEquals(self.path_exclusion_list, self.patch_info.path_exclusion_list)
def test_append_add_instruction(self):
self.patch_info.append_add_instruction('file.test')
self.patch_info.append_add_instruction("file.test")
self.assertEquals(['add "file.test"'], self.patch_info.manifestv2)
self.assertEquals(['add "file.test"'], self.patch_info.manifestv3)
def test_append_add_if_instruction(self):
self.patch_info.append_add_instruction('distribution/extensions/extension/file.test')
self.patch_info.append_add_instruction(
"distribution/extensions/extension/file.test"
)
self.assertEquals(
['add-if "distribution/extensions/extension" "distribution/extensions/extension/file.test"'], # NOQA: E501
self.patch_info.manifestv2)
[
'add-if "distribution/extensions/extension" "distribution/extensions/extension/file.test"' # NOQA: E501
],
self.patch_info.manifestv2,
)
self.assertEquals(
['add-if "distribution/extensions/extension" "distribution/extensions/extension/file.test"'], # NOQA: E501
self.patch_info.manifestv3)
[
'add-if "distribution/extensions/extension" "distribution/extensions/extension/file.test"' # NOQA: E501
],
self.patch_info.manifestv3,
)
def test_append_add_if_not_instruction(self):
self.patch_info.append_add_if_not_instruction('file.test')
self.patch_info.append_add_if_not_instruction("file.test")
self.assertEquals([], self.patch_info.manifestv2)
self.assertEquals(['add-if-not "file.test" "file.test"'], self.patch_info.manifestv3)
self.assertEquals(
['add-if-not "file.test" "file.test"'], self.patch_info.manifestv3
)
def test_append_patch_instruction(self):
self.patch_info.append_patch_instruction('file.test', 'patchname')
self.patch_info.append_patch_instruction("file.test", "patchname")
self.assertEquals(['patch "patchname" "file.test"'], self.patch_info.manifestv2)
self.assertEquals(['patch "patchname" "file.test"'], self.patch_info.manifestv3)
def test_append_patch_if_instruction(self):
self.patch_info.append_patch_instruction(
'distribution/extensions/extension/file.test', 'patchname')
"distribution/extensions/extension/file.test", "patchname"
)
self.assertEquals(
['patch-if "distribution/extensions/extension" "patchname" "distribution/extensions/extension/file.test"'], # NOQA: E501
self.patch_info.manifestv2)
[
'patch-if "distribution/extensions/extension" "patchname" "distribution/extensions/extension/file.test"' # NOQA: E501
],
self.patch_info.manifestv2,
)
self.assertEquals(
['patch-if "distribution/extensions/extension" "patchname" "distribution/extensions/extension/file.test"'], # NOQA: E501
self.patch_info.manifestv3)
[
'patch-if "distribution/extensions/extension" "patchname" "distribution/extensions/extension/file.test"' # NOQA: E501
],
self.patch_info.manifestv3,
)
def test_append_remove_instruction(self):
self.patch_info.append_remove_instruction('file.test')
self.patch_info.append_remove_instruction("file.test")
self.assertEquals(['remove "file.test"'], self.patch_info.manifestv2)
self.assertEquals(['remove "file.test"'], self.patch_info.manifestv3)
def test_append_rmdir_instruction(self):
self.patch_info.append_remove_instruction('dirtest/')
self.patch_info.append_remove_instruction("dirtest/")
self.assertEquals(['rmdir "dirtest/"'], self.patch_info.manifestv2)
self.assertEquals(['rmdir "dirtest/"'], self.patch_info.manifestv3)
def test_append_rmrfdir_instruction(self):
self.patch_info.append_remove_instruction('dirtest/*')
self.patch_info.append_remove_instruction("dirtest/*")
self.assertEquals(['rmrfdir "dirtest/"'], self.patch_info.manifestv2)
self.assertEquals(['rmrfdir "dirtest/"'], self.patch_info.manifestv3)
@ -80,8 +102,10 @@ class TestPatchInfo(unittest.TestCase):
"""
def test_build_marfile_entry_hash(self):
self.assertEquals(({}, set([]), set([])),
self.patch_info.build_marfile_entry_hash('root_path'))
self.assertEquals(
({}, set([]), set([])),
self.patch_info.build_marfile_entry_hash("root_path"),
)
""" FIXME touches the filesystem, need refactoring
@ -112,13 +136,14 @@ class TestMarFileEntry(unittest.TestCase):
class TestMakeIncrementalUpdates(unittest.TestCase):
def setUp(self):
work_dir = '.'
work_dir = "."
self.patch_info = PatchInfo(
work_dir,
['update.manifest', 'updatev2.manifest', 'updatev3.manifest'],
['/readme.txt'])
root_path = '/'
filename = 'test.file'
["update.manifest", "updatev2.manifest", "updatev3.manifest"],
["/readme.txt"],
)
root_path = "/"
filename = "test.file"
self.mar_file_entry = MarFileEntry(root_path, filename)
""" FIXME makes direct shell calls, need refactoring
@ -162,14 +187,25 @@ class TestMakeIncrementalUpdates(unittest.TestCase):
"""
def test_decode_filename(self):
expected = {'locale': 'lang', 'platform': 'platform',
'product': 'product', 'version': '1.0', 'type': 'complete'}
self.assertEquals(expected, mkup.decode_filename('product-1.0.lang.platform.complete.mar'))
self.assertEquals(expected, mkup.decode_filename('platform/lang/product-1.0.complete.mar'))
expected = {
"locale": "lang",
"platform": "platform",
"product": "product",
"version": "1.0",
"type": "complete",
}
self.assertEquals(
expected, mkup.decode_filename("product-1.0.lang.platform.complete.mar")
)
self.assertEquals(
expected, mkup.decode_filename("platform/lang/product-1.0.complete.mar")
)
with self.assertRaises(Exception) as cm:
mkup.decode_filename('fail')
self.assertTrue(cm.exception.args[0].startswith('could not parse filepath fail:'))
mkup.decode_filename("fail")
self.assertTrue(
cm.exception.args[0].startswith("could not parse filepath fail:")
)
if __name__ == '__main__':
if __name__ == "__main__":
unittest.main()

View File

@ -11,17 +11,16 @@ from perfecthash import PerfectHash
import buildconfig
NO_CONTRACT_ID = 0xffffffff
NO_CONTRACT_ID = 0xFFFFFFFF
PHF_SIZE = 512
ENDIAN = '<' if buildconfig.substs['TARGET_ENDIANNESS'] == 'little' else '>'
ENDIAN = "<" if buildconfig.substs["TARGET_ENDIANNESS"] == "little" else ">"
# Represents a UUID in the format used internally by Gecko, and supports
# serializing it in that format to both C++ source and raw byte arrays.
class UUIDRepr(object):
def __init__(self, uuid):
self.uuid = uuid
@ -33,7 +32,7 @@ class UUIDRepr(object):
d = list(fields[3:5])
for i in range(0, 6):
d.append(fields[5] >> (8 * (5 - i)) & 0xff)
d.append(fields[5] >> (8 * (5 - i)) & 0xFF)
self.d = tuple(d)
@ -42,14 +41,12 @@ class UUIDRepr(object):
@property
def bytes(self):
return struct.pack(ENDIAN + 'IHHBBBBBBBB',
self.a, self.b, self.c, *self.d)
return struct.pack(ENDIAN + "IHHBBBBBBBB", self.a, self.b, self.c, *self.d)
def to_cxx(self):
rest = ', '.join('0x%02x' % b for b in self.d)
rest = ", ".join("0x%02x" % b for b in self.d)
return '{ 0x%x, 0x%x, 0x%x, { %s } }' % (self.a, self.b, self.c,
rest)
return "{ 0x%x, 0x%x, 0x%x, { %s } }" % (self.a, self.b, self.c, rest)
# Corresponds to the Module::ProcessSelector enum in Module.h. The actual
@ -63,63 +60,58 @@ class ProcessSelector:
ALLOW_IN_VR_PROCESS = 0x8
ALLOW_IN_SOCKET_PROCESS = 0x10
ALLOW_IN_RDD_PROCESS = 0x20
ALLOW_IN_GPU_AND_MAIN_PROCESS = (ALLOW_IN_GPU_PROCESS |
MAIN_PROCESS_ONLY)
ALLOW_IN_GPU_AND_SOCKET_PROCESS = (ALLOW_IN_GPU_PROCESS |
ALLOW_IN_SOCKET_PROCESS)
ALLOW_IN_GPU_AND_MAIN_PROCESS = ALLOW_IN_GPU_PROCESS | MAIN_PROCESS_ONLY
ALLOW_IN_GPU_AND_SOCKET_PROCESS = ALLOW_IN_GPU_PROCESS | ALLOW_IN_SOCKET_PROCESS
ALLOW_IN_GPU_AND_VR_PROCESS = ALLOW_IN_GPU_PROCESS | ALLOW_IN_VR_PROCESS
ALLOW_IN_GPU_VR_AND_SOCKET_PROCESS = (ALLOW_IN_GPU_PROCESS |
ALLOW_IN_VR_PROCESS |
ALLOW_IN_SOCKET_PROCESS)
ALLOW_IN_RDD_AND_SOCKET_PROCESS = (ALLOW_IN_RDD_PROCESS |
ALLOW_IN_SOCKET_PROCESS)
ALLOW_IN_GPU_RDD_AND_SOCKET_PROCESS = (ALLOW_IN_GPU_PROCESS |
ALLOW_IN_RDD_PROCESS |
ALLOW_IN_SOCKET_PROCESS)
ALLOW_IN_GPU_RDD_VR_AND_SOCKET_PROCESS = (ALLOW_IN_GPU_PROCESS |
ALLOW_IN_RDD_PROCESS |
ALLOW_IN_VR_PROCESS |
ALLOW_IN_SOCKET_PROCESS)
ALLOW_IN_GPU_VR_AND_SOCKET_PROCESS = (
ALLOW_IN_GPU_PROCESS | ALLOW_IN_VR_PROCESS | ALLOW_IN_SOCKET_PROCESS
)
ALLOW_IN_RDD_AND_SOCKET_PROCESS = ALLOW_IN_RDD_PROCESS | ALLOW_IN_SOCKET_PROCESS
ALLOW_IN_GPU_RDD_AND_SOCKET_PROCESS = (
ALLOW_IN_GPU_PROCESS | ALLOW_IN_RDD_PROCESS | ALLOW_IN_SOCKET_PROCESS
)
ALLOW_IN_GPU_RDD_VR_AND_SOCKET_PROCESS = (
ALLOW_IN_GPU_PROCESS
| ALLOW_IN_RDD_PROCESS
| ALLOW_IN_VR_PROCESS
| ALLOW_IN_SOCKET_PROCESS
)
# Maps ProcessSelector constants to the name of the corresponding
# Module::ProcessSelector enum value.
PROCESSES = {
ProcessSelector.ANY_PROCESS: 'ANY_PROCESS',
ProcessSelector.MAIN_PROCESS_ONLY: 'MAIN_PROCESS_ONLY',
ProcessSelector.CONTENT_PROCESS_ONLY: 'CONTENT_PROCESS_ONLY',
ProcessSelector.ALLOW_IN_GPU_PROCESS: 'ALLOW_IN_GPU_PROCESS',
ProcessSelector.ALLOW_IN_VR_PROCESS: 'ALLOW_IN_VR_PROCESS',
ProcessSelector.ALLOW_IN_SOCKET_PROCESS: 'ALLOW_IN_SOCKET_PROCESS',
ProcessSelector.ALLOW_IN_RDD_PROCESS: 'ALLOW_IN_RDD_PROCESS',
ProcessSelector.ALLOW_IN_GPU_AND_MAIN_PROCESS: 'ALLOW_IN_GPU_AND_MAIN_PROCESS',
ProcessSelector.ALLOW_IN_GPU_AND_SOCKET_PROCESS: 'ALLOW_IN_GPU_AND_SOCKET_PROCESS',
ProcessSelector.ALLOW_IN_GPU_AND_VR_PROCESS: 'ALLOW_IN_GPU_AND_VR_PROCESS',
ProcessSelector.ALLOW_IN_GPU_VR_AND_SOCKET_PROCESS: 'ALLOW_IN_GPU_VR_AND_SOCKET_PROCESS',
ProcessSelector.ALLOW_IN_RDD_AND_SOCKET_PROCESS:
'ALLOW_IN_RDD_AND_SOCKET_PROCESS',
ProcessSelector.ALLOW_IN_GPU_RDD_AND_SOCKET_PROCESS:
'ALLOW_IN_GPU_RDD_AND_SOCKET_PROCESS',
ProcessSelector.ALLOW_IN_GPU_RDD_VR_AND_SOCKET_PROCESS:
'ALLOW_IN_GPU_RDD_VR_AND_SOCKET_PROCESS',
ProcessSelector.ANY_PROCESS: "ANY_PROCESS",
ProcessSelector.MAIN_PROCESS_ONLY: "MAIN_PROCESS_ONLY",
ProcessSelector.CONTENT_PROCESS_ONLY: "CONTENT_PROCESS_ONLY",
ProcessSelector.ALLOW_IN_GPU_PROCESS: "ALLOW_IN_GPU_PROCESS",
ProcessSelector.ALLOW_IN_VR_PROCESS: "ALLOW_IN_VR_PROCESS",
ProcessSelector.ALLOW_IN_SOCKET_PROCESS: "ALLOW_IN_SOCKET_PROCESS",
ProcessSelector.ALLOW_IN_RDD_PROCESS: "ALLOW_IN_RDD_PROCESS",
ProcessSelector.ALLOW_IN_GPU_AND_MAIN_PROCESS: "ALLOW_IN_GPU_AND_MAIN_PROCESS",
ProcessSelector.ALLOW_IN_GPU_AND_SOCKET_PROCESS: "ALLOW_IN_GPU_AND_SOCKET_PROCESS",
ProcessSelector.ALLOW_IN_GPU_AND_VR_PROCESS: "ALLOW_IN_GPU_AND_VR_PROCESS",
ProcessSelector.ALLOW_IN_GPU_VR_AND_SOCKET_PROCESS: "ALLOW_IN_GPU_VR_AND_SOCKET_PROCESS",
ProcessSelector.ALLOW_IN_RDD_AND_SOCKET_PROCESS: "ALLOW_IN_RDD_AND_SOCKET_PROCESS",
ProcessSelector.ALLOW_IN_GPU_RDD_AND_SOCKET_PROCESS: "ALLOW_IN_GPU_RDD_AND_SOCKET_PROCESS",
ProcessSelector.ALLOW_IN_GPU_RDD_VR_AND_SOCKET_PROCESS: "ALLOW_IN_GPU_RDD_VR_AND_SOCKET_PROCESS", # NOQA: E501
}
# Emits the C++ symbolic constant corresponding to a ProcessSelector constant.
def lower_processes(processes):
return 'Module::ProcessSelector::%s' % PROCESSES[processes]
return "Module::ProcessSelector::%s" % PROCESSES[processes]
# Emits the C++ symbolic constant for a ModuleEntry's ModuleID enum entry.
def lower_module_id(module):
return 'ModuleID::%s' % module.name
return "ModuleID::%s" % module.name
# Represents a static string table, indexed by offset. This allows us to
# reference strings from static data structures without requiring runtime
# relocations.
class StringTable(object):
def __init__(self):
self.entries = {}
self.entry_list = []
@ -136,7 +128,7 @@ class StringTable(object):
assert not self._serialized
assert len(string) == len(string.encode('utf-8'))
assert len(string) == len(string.encode("utf-8"))
idx = self.size
self.size += len(string) + 1
@ -155,23 +147,20 @@ class StringTable(object):
idx = 0
for entry in self.entry_list:
str_ = entry.replace('\\', '\\\\').replace('"', r'\"') \
.replace('\n', r'\n')
str_ = entry.replace("\\", "\\\\").replace('"', r"\"").replace("\n", r"\n")
lines.append(' /* 0x%x */ "%s\\0"\n' % (idx, str_))
idx += len(entry) + 1
return ''.join(lines)
return "".join(lines)
# Returns a `StringEntry` struct initializer for the string table entry
# corresponding to the given string. If no matching entry exists, it is
# first created.
def entry_to_cxx(self, string):
idx = self.get_idx(string)
return '{ 0x%x } /* %s */' % (
idx,
pretty_string(string))
return "{ 0x%x } /* %s */" % (idx, pretty_string(string))
strings = StringTable()
@ -183,7 +172,6 @@ interfaces = []
# sub-namespaces. This is used to generate pre-declarations for incomplete
# types referenced in XPCOM manifests.
class Namespace(object):
def __init__(self, name=None):
self.name = name
self.classes = set()
@ -202,16 +190,16 @@ class Namespace(object):
def to_cxx(self):
res = ""
if self.name:
res += 'namespace %s {\n' % self.name
res += "namespace %s {\n" % self.name
for clas in sorted(self.classes):
res += 'class %s;\n' % clas
res += "class %s;\n" % clas
for ns in sorted(self.namespaces.keys()):
res += self.namespaces[ns].to_cxx()
if self.name:
res += '} // namespace %s\n' % self.name
res += "} // namespace %s\n" % self.name
return res
@ -221,19 +209,20 @@ class ModuleEntry(object):
next_anon_id = 0
def __init__(self, data, init_idx):
self.cid = UUIDRepr(UUID(data['cid']))
self.contract_ids = data.get('contract_ids', [])
self.type = data.get('type', 'nsISupports')
self.categories = data.get('categories', {})
self.processes = data.get('processes', 0)
self.headers = data.get('headers', [])
self.cid = UUIDRepr(UUID(data["cid"]))
self.contract_ids = data.get("contract_ids", [])
self.type = data.get("type", "nsISupports")
self.categories = data.get("categories", {})
self.processes = data.get("processes", 0)
self.headers = data.get("headers", [])
self.js_name = data.get('js_name', None)
self.interfaces = data.get('interfaces', [])
self.js_name = data.get("js_name", None)
self.interfaces = data.get("interfaces", [])
if len(self.interfaces) > 255:
raise Exception('JS service %s may not have more than 255 '
'interfaces' % self.js_name)
raise Exception(
"JS service %s may not have more than 255 " "interfaces" % self.js_name
)
self.interfaces_offset = len(interfaces)
for iface in self.interfaces:
@ -247,56 +236,67 @@ class ModuleEntry(object):
# module's constructor.
self.init_idx = init_idx
self.constructor = data.get('constructor', None)
self.legacy_constructor = data.get('legacy_constructor', None)
self.init_method = data.get('init_method', [])
self.constructor = data.get("constructor", None)
self.legacy_constructor = data.get("legacy_constructor", None)
self.init_method = data.get("init_method", [])
self.jsm = data.get('jsm', None)
self.jsm = data.get("jsm", None)
self.external = data.get('external', not (self.headers or
self.legacy_constructor))
self.singleton = data.get('singleton', False)
self.overridable = data.get('overridable', False)
self.external = data.get(
"external", not (self.headers or self.legacy_constructor)
)
self.singleton = data.get("singleton", False)
self.overridable = data.get("overridable", False)
if 'name' in data:
if "name" in data:
self.anonymous = False
self.name = data['name']
self.name = data["name"]
else:
self.anonymous = True
self.name = 'Anonymous%03d' % ModuleEntry.next_anon_id
self.name = "Anonymous%03d" % ModuleEntry.next_anon_id
ModuleEntry.next_anon_id += 1
def error(str_):
raise Exception("Error defining component %s (%s): %s" % (
str(self.cid), ', '.join(map(repr, self.contract_ids)),
str_))
raise Exception(
"Error defining component %s (%s): %s"
% (str(self.cid), ", ".join(map(repr, self.contract_ids)), str_)
)
if self.jsm:
if not self.constructor:
error("JavaScript components must specify a constructor")
for prop in ('init_method', 'legacy_constructor', 'headers'):
for prop in ("init_method", "legacy_constructor", "headers"):
if getattr(self, prop):
error("JavaScript components may not specify a '%s' "
"property" % prop)
error(
"JavaScript components may not specify a '%s' "
"property" % prop
)
elif self.external:
if self.constructor or self.legacy_constructor:
error("Externally-constructed components may not specify "
"'constructor' or 'legacy_constructor' properties")
error(
"Externally-constructed components may not specify "
"'constructor' or 'legacy_constructor' properties"
)
if self.init_method:
error("Externally-constructed components may not specify "
"'init_method' properties")
if self.type == 'nsISupports':
error("Externally-constructed components must specify a type "
"other than nsISupports")
error(
"Externally-constructed components may not specify "
"'init_method' properties"
)
if self.type == "nsISupports":
error(
"Externally-constructed components must specify a type "
"other than nsISupports"
)
if self.constructor and self.legacy_constructor:
error("The 'constructor' and 'legacy_constructor' properties "
"are mutually exclusive")
error(
"The 'constructor' and 'legacy_constructor' properties "
"are mutually exclusive"
)
if self.overridable and not self.contract_ids:
error("Overridable components must specify at least one contract "
"ID")
error("Overridable components must specify at least one contract " "ID")
@property
def contract_id(self):
@ -305,9 +305,11 @@ class ModuleEntry(object):
# Generates the C++ code for a StaticModule struct initializer
# representing this component.
def to_cxx(self):
contract_id = (strings.entry_to_cxx(self.contract_id)
if self.overridable
else '{ 0x%x }' % NO_CONTRACT_ID)
contract_id = (
strings.entry_to_cxx(self.contract_id)
if self.overridable
else "{ 0x%x }" % NO_CONTRACT_ID
)
return """
/* {name} */ {{
@ -315,10 +317,13 @@ class ModuleEntry(object):
{cid},
{contract_id},
{processes},
}}""".format(name=self.name, cid=self.cid.to_cxx(),
cid_string=str(self.cid),
contract_id=contract_id,
processes=lower_processes(self.processes))
}}""".format(
name=self.name,
cid=self.cid.to_cxx(),
cid_string=str(self.cid),
contract_id=contract_id,
processes=lower_processes(self.processes),
)
# Generates the C++ code for a JSServiceEntry represengin this module.
def lower_js_service(self):
@ -328,10 +333,12 @@ class ModuleEntry(object):
ModuleID::{name},
{{ {iface_offset} }},
{iface_count}
}}""".format(js_name=strings.entry_to_cxx(self.js_name),
name=self.name,
iface_offset=self.interfaces_offset,
iface_count=len(self.interfaces))
}}""".format(
js_name=strings.entry_to_cxx(self.js_name),
name=self.name,
iface_offset=self.interfaces_offset,
iface_count=len(self.interfaces),
)
# Generates the C++ code necessary to construct an instance of this
# component.
@ -345,40 +352,45 @@ class ModuleEntry(object):
#
# And which returns an `nsresult` indicating success or failure.
def lower_constructor(self):
res = ''
res = ""
if self.init_idx is not None:
res += ' MOZ_TRY(CallInitFunc(%d));\n' % self.init_idx
res += " MOZ_TRY(CallInitFunc(%d));\n" % self.init_idx
if self.legacy_constructor:
res += (' return /* legacy */ %s(nullptr, aIID, aResult);\n'
% self.legacy_constructor)
res += (
" return /* legacy */ %s(nullptr, aIID, aResult);\n"
% self.legacy_constructor
)
return res
if self.jsm:
res += (
' nsCOMPtr<nsISupports> inst;\n'
' MOZ_TRY(ConstructJSMComponent(nsLiteralCString(%s),\n'
' %s,\n'
' getter_AddRefs(inst)));'
'\n' % (json.dumps(self.jsm), json.dumps(self.constructor)))
" nsCOMPtr<nsISupports> inst;\n"
" MOZ_TRY(ConstructJSMComponent(nsLiteralCString(%s),\n"
" %s,\n"
" getter_AddRefs(inst)));"
"\n" % (json.dumps(self.jsm), json.dumps(self.constructor))
)
elif self.external:
res += (' nsCOMPtr<nsISupports> inst = '
'mozCreateComponent<%s>();\n' % self.type)
res += (
" nsCOMPtr<nsISupports> inst = "
"mozCreateComponent<%s>();\n" % self.type
)
# The custom constructor may return null, so check before calling
# any methods.
res += ' NS_ENSURE_TRUE(inst, NS_ERROR_FAILURE);\n'
res += " NS_ENSURE_TRUE(inst, NS_ERROR_FAILURE);\n"
else:
res += ' RefPtr<%s> inst = ' % self.type
res += " RefPtr<%s> inst = " % self.type
if not self.constructor:
res += 'new %s();\n' % self.type
res += "new %s();\n" % self.type
else:
res += '%s();\n' % self.constructor
res += "%s();\n" % self.constructor
# The `new` operator is infallible, so we don't need to worry
# about it returning null, but custom constructors may, so
# check before calling any methods.
res += ' NS_ENSURE_TRUE(inst, NS_ERROR_OUT_OF_MEMORY);\n'
res += " NS_ENSURE_TRUE(inst, NS_ERROR_OUT_OF_MEMORY);\n"
# Check that the constructor function returns an appropriate
# `already_AddRefed` value for our declared type.
@ -392,12 +404,15 @@ class ModuleEntry(object):
std::is_base_of<%(type)s, T>::value,
"Singleton constructor must return correct already_AddRefed");
""" % {'type': self.type, 'constructor': self.constructor}
""" % {
"type": self.type,
"constructor": self.constructor,
}
if self.init_method:
res += ' MOZ_TRY(inst->%s());\n' % self.init_method
res += " MOZ_TRY(inst->%s());\n" % self.init_method
res += ' return inst->QueryInterface(aIID, aResult);\n'
res += " return inst->QueryInterface(aIID, aResult);\n"
return res
@ -409,11 +424,12 @@ class ModuleEntry(object):
assert not self.anonymous
substs = {
'name': self.name,
'id': '::mozilla::xpcom::ModuleID::%s' % self.name,
"name": self.name,
"id": "::mozilla::xpcom::ModuleID::%s" % self.name,
}
res = """
res = (
"""
namespace %(name)s {
static inline const nsID& CID() {
return ::mozilla::xpcom::Components::GetCID(%(id)s);
@ -422,18 +438,26 @@ static inline const nsID& CID() {
static inline ::mozilla::xpcom::GetServiceHelper Service(nsresult* aRv = nullptr) {
return {%(id)s, aRv};
}
""" % substs
"""
% substs
)
if not self.singleton:
res += """
res += (
"""
static inline ::mozilla::xpcom::CreateInstanceHelper Create(nsresult* aRv = nullptr) {
return {%(id)s, aRv};
}
""" % substs
"""
% substs
)
res += """\
res += (
"""\
} // namespace %(name)s
""" % substs
"""
% substs
)
return res
@ -442,14 +466,12 @@ static inline ::mozilla::xpcom::CreateInstanceHelper Create(nsresult* aRv = null
# certain special characters replaced so that it can be used in a C++-style
# (/* ... */) comment.
def pretty_string(string):
return (json.dumps(string).replace('*/', r'*\/')
.replace('/*', r'/\*'))
return json.dumps(string).replace("*/", r"*\/").replace("/*", r"/\*")
# Represents a static contract ID entry, corresponding to a C++ ContractEntry
# struct, mapping a contract ID to a static module entry.
class ContractEntry(object):
def __init__(self, contract, module):
self.contract = contract
self.module = module
@ -459,8 +481,10 @@ class ContractEntry(object):
{{
{contract},
{module_id},
}}""".format(contract=strings.entry_to_cxx(self.contract),
module_id=lower_module_id(self.module))
}}""".format(
contract=strings.entry_to_cxx(self.contract),
module_id=lower_module_id(self.module),
)
# Generates the C++ code for the StaticCategoryEntry and StaticCategory
@ -473,26 +497,30 @@ def gen_categories(substs, categories):
for category, entries in sorted(categories.items()):
entries.sort()
cats.append(' { %s,\n'
' %d, %d },\n'
% (strings.entry_to_cxx(category),
count, len(entries)))
cats.append(
" { %s,\n"
" %d, %d },\n" % (strings.entry_to_cxx(category), count, len(entries))
)
count += len(entries)
ents.append(' /* %s */\n' % pretty_string(category))
ents.append(" /* %s */\n" % pretty_string(category))
for entry, value, processes in entries:
ents.append(' { %s,\n'
' %s,\n'
' %s },\n'
% (strings.entry_to_cxx(entry),
strings.entry_to_cxx(value),
lower_processes(processes)))
ents.append('\n')
ents.append(
" { %s,\n"
" %s,\n"
" %s },\n"
% (
strings.entry_to_cxx(entry),
strings.entry_to_cxx(value),
lower_processes(processes),
)
)
ents.append("\n")
ents.pop()
substs['category_count'] = len(cats)
substs['categories'] = ''.join(cats)
substs['category_entries'] = ''.join(ents)
substs["category_count"] = len(cats)
substs["categories"] = "".join(cats)
substs["category_entries"] = "".join(ents)
# Generates the C++ code for all Init and Unload functions declared in XPCOM
@ -509,26 +537,29 @@ def gen_module_funcs(substs, funcs):
"""
for i, (init, unload) in enumerate(funcs):
init_code = '%s();' % init if init else '/* empty */'
init_code = "%s();" % init if init else "/* empty */"
inits.append(template % (i, init_code))
if unload:
unloads.append("""\
unloads.append(
"""\
if (CalledInit(%d)) {
%s();
}
""" % (i, unload))
"""
% (i, unload)
)
substs['init_funcs'] = ''.join(inits)
substs['unload_funcs'] = ''.join(unloads)
substs['init_count'] = len(funcs)
substs["init_funcs"] = "".join(inits)
substs["unload_funcs"] = "".join(unloads)
substs["init_count"] = len(funcs)
def gen_interfaces(ifaces):
res = []
for iface in ifaces:
res.append(' nsXPTInterface::%s,\n' % iface)
return ''.join(res)
res.append(" nsXPTInterface::%s,\n" % iface)
return "".join(res)
# Generates class pre-declarations for any types referenced in `Classes` array
@ -538,7 +569,7 @@ def gen_decls(types):
root_ns = Namespace()
for type_ in sorted(types):
parts = type_.split('::')
parts = type_.split("::")
ns = root_ns
for part in parts[:-1]:
@ -554,14 +585,17 @@ def gen_decls(types):
def gen_constructors(entries):
constructors = []
for entry in entries:
constructors.append("""\
constructors.append(
"""\
case {id}: {{
{constructor}\
}}
""".format(id=lower_module_id(entry),
constructor=entry.lower_constructor()))
""".format(
id=lower_module_id(entry), constructor=entry.lower_constructor()
)
)
return ''.join(constructors)
return "".join(constructors)
# Generates the getter code for each named component entry in the
@ -570,9 +604,7 @@ def gen_getters(entries):
entries = list(entries)
entries.sort(key=lambda e: e.name)
return ''.join(entry.lower_getters()
for entry in entries
if not entry.anonymous)
return "".join(entry.lower_getters() for entry in entries if not entry.anonymous)
def gen_includes(substs, all_headers):
@ -580,23 +612,24 @@ def gen_includes(substs, all_headers):
absolute_headers = set()
for header in all_headers:
if header.startswith('/'):
if header.startswith("/"):
absolute_headers.add(header)
else:
headers.add(header)
includes = ['#include "%s"' % header for header in sorted(headers)]
substs['includes'] = '\n'.join(includes) + '\n'
substs["includes"] = "\n".join(includes) + "\n"
relative_includes = ['#include "../..%s"' % header
for header in sorted(absolute_headers)]
substs['relative_includes'] = '\n'.join(relative_includes) + '\n'
relative_includes = [
'#include "../..%s"' % header for header in sorted(absolute_headers)
]
substs["relative_includes"] = "\n".join(relative_includes) + "\n"
def to_list(val):
if isinstance(val, (list, tuple)):
return val
return val,
return (val,)
def gen_substs(manifests):
@ -608,19 +641,19 @@ def gen_substs(manifests):
categories = defaultdict(list)
for manifest in manifests:
headers |= set(manifest.get('Headers', []))
headers |= set(manifest.get("Headers", []))
init_idx = None
init = manifest.get('InitFunc')
unload = manifest.get('UnloadFunc')
init = manifest.get("InitFunc")
unload = manifest.get("UnloadFunc")
if init or unload:
init_idx = len(module_funcs)
module_funcs.append((init, unload))
for clas in manifest['Classes']:
for clas in manifest["Classes"]:
modules.append(ModuleEntry(clas, init_idx))
for category, entries in manifest.get('Categories', {}).items():
for category, entries in manifest.get("Categories", {}).items():
for key, entry in entries.items():
if isinstance(entry, tuple):
value, process = entry
@ -642,7 +675,7 @@ def gen_substs(manifests):
for contract_id in mod.contract_ids:
if contract_id in contract_map:
raise Exception('Duplicate contract ID: %s' % contract_id)
raise Exception("Duplicate contract ID: %s" % contract_id)
entry = ContractEntry(contract_id, mod)
contracts.append(entry)
@ -650,8 +683,7 @@ def gen_substs(manifests):
for category, entries in mod.categories.items():
for entry in to_list(entries):
categories[category].append((entry, mod.contract_id,
mod.processes))
categories[category].append((entry, mod.contract_id, mod.processes))
if mod.type and not mod.headers:
types.add(mod.type)
@ -661,90 +693,87 @@ def gen_substs(manifests):
if mod.js_name:
if mod.js_name in js_services:
raise Exception('Duplicate JS service name: %s' % mod.js_name)
raise Exception("Duplicate JS service name: %s" % mod.js_name)
js_services[mod.js_name] = mod
if str(mod.cid) in cids:
raise Exception('Duplicate cid: %s' % str(mod.cid))
raise Exception("Duplicate cid: %s" % str(mod.cid))
cids.add(str(mod.cid))
cid_phf = PerfectHash(modules, PHF_SIZE,
key=lambda module: module.cid.bytes)
cid_phf = PerfectHash(modules, PHF_SIZE, key=lambda module: module.cid.bytes)
contract_phf = PerfectHash(contracts, PHF_SIZE,
key=lambda entry: entry.contract)
contract_phf = PerfectHash(contracts, PHF_SIZE, key=lambda entry: entry.contract)
js_services_phf = PerfectHash(list(js_services.values()), PHF_SIZE,
key=lambda entry: entry.js_name)
js_services_phf = PerfectHash(
list(js_services.values()), PHF_SIZE, key=lambda entry: entry.js_name
)
substs = {}
gen_categories(substs, categories)
substs['module_ids'] = ''.join(' %s,\n' % entry.name
for entry in cid_phf.entries)
substs["module_ids"] = "".join(" %s,\n" % entry.name for entry in cid_phf.entries)
substs['module_count'] = len(modules)
substs['contract_count'] = len(contracts)
substs["module_count"] = len(modules)
substs["contract_count"] = len(contracts)
gen_module_funcs(substs, module_funcs)
gen_includes(substs, headers)
substs['component_jsms'] = '\n'.join(' %s,' % strings.entry_to_cxx(jsm)
for jsm in sorted(jsms)) + '\n'
substs["component_jsms"] = (
"\n".join(" %s," % strings.entry_to_cxx(jsm) for jsm in sorted(jsms)) + "\n"
)
substs['interfaces'] = gen_interfaces(interfaces)
substs["interfaces"] = gen_interfaces(interfaces)
substs['decls'] = gen_decls(types)
substs["decls"] = gen_decls(types)
substs['constructors'] = gen_constructors(cid_phf.entries)
substs["constructors"] = gen_constructors(cid_phf.entries)
substs['component_getters'] = gen_getters(cid_phf.entries)
substs["component_getters"] = gen_getters(cid_phf.entries)
substs['module_cid_table'] = cid_phf.cxx_codegen(
name='ModuleByCID',
entry_type='StaticModule',
entries_name='gStaticModules',
substs["module_cid_table"] = cid_phf.cxx_codegen(
name="ModuleByCID",
entry_type="StaticModule",
entries_name="gStaticModules",
lower_entry=lambda entry: entry.to_cxx(),
return_type="const StaticModule*",
return_entry=(
"return entry.CID().Equals(aKey) && entry.Active()" " ? &entry : nullptr;"
),
key_type="const nsID&",
key_bytes="reinterpret_cast<const char*>(&aKey)",
key_length="sizeof(nsID)",
)
return_type='const StaticModule*',
return_entry=('return entry.CID().Equals(aKey) && entry.Active()'
' ? &entry : nullptr;'),
key_type='const nsID&',
key_bytes='reinterpret_cast<const char*>(&aKey)',
key_length='sizeof(nsID)')
substs['module_contract_id_table'] = contract_phf.cxx_codegen(
name='LookupContractID',
entry_type='ContractEntry',
entries_name='gContractEntries',
substs["module_contract_id_table"] = contract_phf.cxx_codegen(
name="LookupContractID",
entry_type="ContractEntry",
entries_name="gContractEntries",
lower_entry=lambda entry: entry.to_cxx(),
return_type="const ContractEntry*",
return_entry="return entry.Matches(aKey) ? &entry : nullptr;",
key_type="const nsACString&",
key_bytes="aKey.BeginReading()",
key_length="aKey.Length()",
)
return_type='const ContractEntry*',
return_entry='return entry.Matches(aKey) ? &entry : nullptr;',
key_type='const nsACString&',
key_bytes='aKey.BeginReading()',
key_length='aKey.Length()')
substs['js_services_table'] = js_services_phf.cxx_codegen(
name='LookupJSService',
entry_type='JSServiceEntry',
entries_name='gJSServices',
substs["js_services_table"] = js_services_phf.cxx_codegen(
name="LookupJSService",
entry_type="JSServiceEntry",
entries_name="gJSServices",
lower_entry=lambda entry: entry.lower_js_service(),
return_type='const JSServiceEntry*',
return_entry='return entry.Name() == aKey ? &entry : nullptr;',
key_type='const nsACString&',
key_bytes='aKey.BeginReading()',
key_length='aKey.Length()')
return_type="const JSServiceEntry*",
return_entry="return entry.Name() == aKey ? &entry : nullptr;",
key_type="const nsACString&",
key_bytes="aKey.BeginReading()",
key_length="aKey.Length()",
)
# Do this only after everything else has been emitted so we're sure the
# string table is complete.
substs['strings'] = strings.to_cxx()
substs["strings"] = strings.to_cxx()
return substs
@ -754,9 +783,11 @@ def defined(subst):
def read_manifest(filename):
glbl = {'buildconfig': buildconfig,
'defined': defined,
'ProcessSelector': ProcessSelector}
glbl = {
"buildconfig": buildconfig,
"defined": defined,
"ProcessSelector": ProcessSelector,
}
exec(open(filename).read(), glbl)
return glbl
@ -765,33 +796,34 @@ def main(fd, conf_file, template_file):
def open_output(filename):
return FileAvoidWrite(os.path.join(os.path.dirname(fd.name), filename))
conf = json.load(open(conf_file, 'r'))
conf = json.load(open(conf_file, "r"))
deps = set()
manifests = []
for filename in conf['manifests']:
for filename in conf["manifests"]:
deps.add(filename)
manifest = read_manifest(filename)
manifests.append(manifest)
manifest.setdefault('Priority', 50)
manifest['__filename__'] = filename
manifest.setdefault("Priority", 50)
manifest["__filename__"] = filename
manifests.sort(key=lambda man: (man['Priority'], man['__filename__']))
manifests.sort(key=lambda man: (man["Priority"], man["__filename__"]))
substs = gen_substs(manifests)
def replacer(match):
return substs[match.group(1)]
with open_output('StaticComponents.cpp') as fh:
with open(template_file, 'r') as tfh:
with open_output("StaticComponents.cpp") as fh:
with open(template_file, "r") as tfh:
template = tfh.read()
fh.write(re.sub(r'//# @([a-zA-Z_]+)@\n', replacer, template))
fh.write(re.sub(r"//# @([a-zA-Z_]+)@\n", replacer, template))
with open_output('StaticComponentData.h') as fh:
fh.write("""\
with open_output("StaticComponentData.h") as fh:
fh.write(
"""\
/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
/* vim: set ts=8 sts=2 et sw=2 tw=80: */
/* This Source Code Form is subject to the terms of the Mozilla Public
@ -816,9 +848,12 @@ static constexpr size_t kModuleInitCount = %(init_count)d;
} // namespace mozilla
#endif
""" % substs)
"""
% substs
)
fd.write("""\
fd.write(
"""\
/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
/* vim: set ts=8 sts=2 et sw=2 tw=80: */
/* This Source Code Form is subject to the terms of the Mozilla Public
@ -893,6 +928,8 @@ namespace components {
} // namespace mozilla
#endif
""" % substs)
"""
% substs
)
return deps