From 210585edd26491e28ecbdc383f7232b95af651fc Mon Sep 17 00:00:00 2001 From: Ricky Stewart Date: Mon, 26 Oct 2020 18:21:44 +0000 Subject: [PATCH] Bug 1672023 - Remove excluded files from `black.yml` These files were omitted from the original patch because reformatting them required some manual intervention in order to avoid breaking unit tests. Generally the `noqa` lines were already there and just needed to be moved from one line to another (due to the reformatting by `black`), but sometimes `black` saw fit to move a bunch of stuff all onto one line, requiring me to introduce new `noqa` lines. Besides the autoformat by `black` and some manual fixups, this patch contains no other changes. # ignore-this-changeset Differential Revision: https://phabricator.services.mozilla.com/D94052 Depends on D94045 --- .../tests/marionette/test_refresh_firefox.py | 270 ++- build/pgo/genpgocert.py | 129 +- config/check_macroassembler_style.py | 225 +- js/src/devtools/rootAnalysis/t/testlib.py | 127 +- js/src/util/make_unicode.py | 955 +++++---- mobile/android/mach_commands.py | 533 +++-- python/mozbuild/mozbuild/mach_commands.py | 1864 +++++++++++------ python/mozbuild/mozbuild/telemetry.py | 331 +-- .../mozbuild/test/backend/test_build.py | 260 +-- .../test/backend/test_recursivemake.py | 1382 ++++++------ .../test/codecoverage/test_lcov_rewrite.py | 261 ++- .../taskgraph/transforms/bouncer_aliases.py | 80 +- .../taskgraph/transforms/mar_signing.py | 125 +- .../transforms/repackage_signing_partner.py | 168 +- testing/addtest.py | 97 +- testing/mochitest/mochitest_options.py | 1678 +++++++++------ .../mozpower/tests/test_macintelpower.py | 30 +- .../mozharness/mozilla/building/buildbase.py | 1082 +++++----- .../mozharness/mozilla/testing/errors.py | 158 +- .../mozharness/mozilla/testing/raptor.py | 1221 ++++++----- .../mozharness/mozilla/testing/testbase.py | 536 +++-- .../mozharness/scripts/desktop_unittest.py | 1000 +++++---- testing/mozharness/scripts/marionette.py | 512 +++-- .../scripts/release/bouncer_check.py | 129 +- .../release/update-verify-config-creator.py | 516 +++-- testing/talos/talos/test.py | 632 +++--- testing/talos/talos/unittests/test_xtalos.py | 58 +- testing/web-platform/metamerge.py | 69 +- .../mozparsers/parse_histograms.py | 573 ++--- tools/browsertime/mach_commands.py | 381 ++-- tools/lint/black.yml | 34 - tools/power/mach_commands.py | 65 +- tools/tryselect/selectors/coverage.py | 266 ++- .../test_make_incremental_updates.py | 106 +- xpcom/components/gen_static_components.py | 539 ++--- 35 files changed, 9604 insertions(+), 6788 deletions(-) diff --git a/browser/components/migration/tests/marionette/test_refresh_firefox.py b/browser/components/migration/tests/marionette/test_refresh_firefox.py index 033b8be104ea..4de3de5ab784 100644 --- a/browser/components/migration/tests/marionette/test_refresh_firefox.py +++ b/browser/components/migration/tests/marionette/test_refresh_firefox.py @@ -41,7 +41,8 @@ class TestFirefoxRefresh(MarionetteTestCase): _expectedURLs = ["about:robots", "about:mozilla"] def savePassword(self): - self.runCode(""" + self.runCode( + """ let myLogin = new global.LoginInfo( "test.marionette.mozilla.com", "http://test.marionette.mozilla.com/some/form/", @@ -52,10 +53,13 @@ class TestFirefoxRefresh(MarionetteTestCase): "password" ); Services.logins.addLogin(myLogin) - """, script_args=(self._username, self._password)) + """, + script_args=(self._username, self._password), + ) def createBookmarkInMenu(self): - error = self.runAsyncCode(""" + error = self.runAsyncCode( + """ // let url = arguments[0]; // let title = arguments[1]; // let resolve = arguments[arguments.length - 1]; @@ -63,12 +67,15 @@ class TestFirefoxRefresh(MarionetteTestCase): PlacesUtils.bookmarks.insert({ parentGuid: PlacesUtils.bookmarks.menuGuid, url, title }).then(() => resolve(false), resolve); - """, script_args=(self._bookmarkURL, self._bookmarkText)) + """, + script_args=(self._bookmarkURL, self._bookmarkText), + ) if error: print(error) def createBookmarksOnToolbar(self): - error = self.runAsyncCode(""" + error = self.runAsyncCode( + """ let resolve = arguments[arguments.length - 1]; let children = []; for (let i = 1; i <= 5; i++) { @@ -78,12 +85,14 @@ class TestFirefoxRefresh(MarionetteTestCase): guid: PlacesUtils.bookmarks.toolbarGuid, children }).then(() => resolve(false), resolve); - """) + """ + ) if error: print(error) def createHistory(self): - error = self.runAsyncCode(""" + error = self.runAsyncCode( + """ let resolve = arguments[arguments.length - 1]; PlacesUtils.history.insert({ url: arguments[0], @@ -94,12 +103,15 @@ class TestFirefoxRefresh(MarionetteTestCase): }] }).then(() => resolve(false), ex => resolve("Unexpected error in adding visit: " + ex)); - """, script_args=(self._historyURL, self._historyTitle)) + """, + script_args=(self._historyURL, self._historyTitle), + ) if error: print(error) def createFormHistory(self): - error = self.runAsyncCode(""" + error = self.runAsyncCode( + """ let updateDefinition = { op: "add", fieldname: arguments[0], @@ -119,14 +131,17 @@ class TestFirefoxRefresh(MarionetteTestCase): } } }); - """, script_args=(self._formHistoryFieldName, self._formHistoryValue)) + """, + script_args=(self._formHistoryFieldName, self._formHistoryValue), + ) if error: print(error) def createFormAutofill(self): if not self._formAutofillAvailable: return - self._formAutofillAddressGuid = self.runAsyncCode(""" + self._formAutofillAddressGuid = self.runAsyncCode( + """ let resolve = arguments[arguments.length - 1]; const TEST_ADDRESS_1 = { "given-name": "John", @@ -144,19 +159,29 @@ class TestFirefoxRefresh(MarionetteTestCase): return global.formAutofillStorage.initialize().then(() => { return global.formAutofillStorage.addresses.add(TEST_ADDRESS_1); }).then(resolve); - """) + """ + ) def createCookie(self): - self.runCode(""" + self.runCode( + """ // Expire in 15 minutes: let expireTime = Math.floor(Date.now() / 1000) + 15 * 60; Services.cookies.add(arguments[0], arguments[1], arguments[2], arguments[3], true, false, false, expireTime, {}, Ci.nsICookie.SAMESITE_NONE, Ci.nsICookie.SCHEME_UNSET); - """, script_args=(self._cookieHost, self._cookiePath, self._cookieName, self._cookieValue)) + """, + script_args=( + self._cookieHost, + self._cookiePath, + self._cookieName, + self._cookieValue, + ), + ) def createSession(self): - self.runAsyncCode(""" + self.runAsyncCode( + """ let resolve = arguments[arguments.length - 1]; const COMPLETE_STATE = Ci.nsIWebProgressListener.STATE_STOP + Ci.nsIWebProgressListener.STATE_IS_NETWORK; @@ -193,66 +218,82 @@ class TestFirefoxRefresh(MarionetteTestCase): gBrowser.removeTab(tab); } } - """, script_args=(self._expectedURLs,)) # NOQA: E501 + """, # NOQA: E501 + script_args=(self._expectedURLs,), + ) def createFxa(self): # This script will write an entry to the login manager and create # a signedInUser.json in the profile dir. - self.runAsyncCode(""" + self.runAsyncCode( + """ let resolve = arguments[arguments.length - 1]; Cu.import("resource://gre/modules/FxAccountsStorage.jsm"); let storage = new FxAccountsStorageManager(); let data = {email: "test@test.com", uid: "uid", keyFetchToken: "top-secret"}; storage.initialize(data); storage.finalize().then(resolve); - """) + """ + ) def createSync(self): # This script will write the canonical preference which indicates a user # is signed into sync. - self.marionette.execute_script(""" + self.marionette.execute_script( + """ Services.prefs.setStringPref("services.sync.username", "test@test.com"); - """) + """ + ) def checkPassword(self): - loginInfo = self.marionette.execute_script(""" + loginInfo = self.marionette.execute_script( + """ let ary = Services.logins.findLogins( "test.marionette.mozilla.com", "http://test.marionette.mozilla.com/some/form/", null, {}); return ary.length ? ary : {username: "null", password: "null"}; - """) + """ + ) self.assertEqual(len(loginInfo), 1) - self.assertEqual(loginInfo[0]['username'], self._username) - self.assertEqual(loginInfo[0]['password'], self._password) + self.assertEqual(loginInfo[0]["username"], self._username) + self.assertEqual(loginInfo[0]["password"], self._password) - loginCount = self.marionette.execute_script(""" + loginCount = self.marionette.execute_script( + """ return Services.logins.getAllLogins().length; - """) + """ + ) # Note that we expect 2 logins - one from us, one from sync. self.assertEqual(loginCount, 2, "No other logins are present") def checkBookmarkInMenu(self): - titleInBookmarks = self.runAsyncCode(""" + titleInBookmarks = self.runAsyncCode( + """ let [url, resolve] = arguments; PlacesUtils.bookmarks.fetch({url}).then( bookmark => resolve(bookmark ? bookmark.title : ""), ex => resolve(ex) ); - """, script_args=(self._bookmarkURL,)) + """, + script_args=(self._bookmarkURL,), + ) self.assertEqual(titleInBookmarks, self._bookmarkText) def checkBookmarkToolbarVisibility(self): - toolbarVisible = self.marionette.execute_script(""" + toolbarVisible = self.marionette.execute_script( + """ const BROWSER_DOCURL = AppConstants.BROWSER_CHROME_URL; return Services.xulStore.getValue(BROWSER_DOCURL, "PersonalToolbar", "collapsed"); - """) + """ + ) if toolbarVisible == "": toolbarVisible = "false" self.assertEqual(toolbarVisible, "false") def checkHistory(self): - historyResult = self.runAsyncCode(""" + historyResult = self.runAsyncCode( + """ let resolve = arguments[arguments.length - 1]; PlacesUtils.history.fetch(arguments[0]).then(pageInfo => { if (!pageInfo) { @@ -263,15 +304,18 @@ class TestFirefoxRefresh(MarionetteTestCase): }).catch(e => { resolve("Unexpected error in fetching page: " + e); }); - """, script_args=(self._historyURL,)) + """, + script_args=(self._historyURL,), + ) if type(historyResult) == str: self.fail(historyResult) return - self.assertEqual(historyResult['title'], self._historyTitle) + self.assertEqual(historyResult["title"], self._historyTitle) def checkFormHistory(self): - formFieldResults = self.runAsyncCode(""" + formFieldResults = self.runAsyncCode( + """ let resolve = arguments[arguments.length - 1]; let results = []; global.FormHistory.search(["value"], {fieldname: arguments[0]}, { @@ -285,20 +329,24 @@ class TestFirefoxRefresh(MarionetteTestCase): resolve(results); }, }); - """, script_args=(self._formHistoryFieldName,)) + """, + script_args=(self._formHistoryFieldName,), + ) if type(formFieldResults) == str: self.fail(formFieldResults) return formFieldResultCount = len(formFieldResults) - self.assertEqual(formFieldResultCount, 1, - "Should have exactly 1 entry for this field, got %d" % - formFieldResultCount) + self.assertEqual( + formFieldResultCount, + 1, + "Should have exactly 1 entry for this field, got %d" % formFieldResultCount, + ) if formFieldResultCount == 1: - self.assertEqual( - formFieldResults[0]['value'], self._formHistoryValue) + self.assertEqual(formFieldResults[0]["value"], self._formHistoryValue) - formHistoryCount = self.runAsyncCode(""" + formHistoryCount = self.runAsyncCode( + """ let [resolve] = arguments; let count; let callbacks = { @@ -308,33 +356,42 @@ class TestFirefoxRefresh(MarionetteTestCase): }, }; global.FormHistory.count({}, callbacks); - """) - self.assertEqual(formHistoryCount, 1, - "There should be only 1 entry in the form history") + """ + ) + self.assertEqual( + formHistoryCount, 1, "There should be only 1 entry in the form history" + ) def checkFormAutofill(self): if not self._formAutofillAvailable: return - formAutofillResults = self.runAsyncCode(""" + formAutofillResults = self.runAsyncCode( + """ let resolve = arguments[arguments.length - 1]; return global.formAutofillStorage.initialize().then(() => { return global.formAutofillStorage.addresses.getAll() }).then(resolve); - """,) + """, + ) if type(formAutofillResults) == str: self.fail(formAutofillResults) return formAutofillAddressCount = len(formAutofillResults) - self.assertEqual(formAutofillAddressCount, 1, - "Should have exactly 1 saved address, got %d" % formAutofillAddressCount) + self.assertEqual( + formAutofillAddressCount, + 1, + "Should have exactly 1 saved address, got %d" % formAutofillAddressCount, + ) if formAutofillAddressCount == 1: self.assertEqual( - formAutofillResults[0]['guid'], self._formAutofillAddressGuid) + formAutofillResults[0]["guid"], self._formAutofillAddressGuid + ) def checkCookie(self): - cookieInfo = self.runCode(""" + cookieInfo = self.runCode( + """ try { let cookies = Services.cookies.getCookiesFromHost(arguments[0], {}); let cookie = null; @@ -351,18 +408,22 @@ class TestFirefoxRefresh(MarionetteTestCase): } catch (ex) { return "got exception trying to fetch cookie: " + ex; } - """, script_args=(self._cookieHost,)) + """, + script_args=(self._cookieHost,), + ) if not isinstance(cookieInfo, dict): self.fail(cookieInfo) return - self.assertEqual(cookieInfo['path'], self._cookiePath) - self.assertEqual(cookieInfo['value'], self._cookieValue) - self.assertEqual(cookieInfo['name'], self._cookieName) + self.assertEqual(cookieInfo["path"], self._cookiePath) + self.assertEqual(cookieInfo["value"], self._cookieValue) + self.assertEqual(cookieInfo["name"], self._cookieName) def checkSession(self): - tabURIs = self.runCode(""" + tabURIs = self.runCode( + """ return [... gBrowser.browsers].map(b => b.currentURI && b.currentURI.spec) - """) + """ + ) self.assertSequenceEqual(tabURIs, ["about:welcomeback"]) # Dismiss modal dialog if any. This is mainly to dismiss the check for @@ -373,7 +434,8 @@ class TestFirefoxRefresh(MarionetteTestCase): except NoAlertPresentException: pass - tabURIs = self.runAsyncCode(""" + tabURIs = self.runAsyncCode( + """ let resolve = arguments[arguments.length - 1] let mm = gBrowser.selectedBrowser.messageManager; @@ -396,11 +458,13 @@ class TestFirefoxRefresh(MarionetteTestCase): }; mm.loadFrameScript("data:application/javascript,(" + fs.toString() + ")()", true); - """) # NOQA: E501 + """ # NOQA: E501 + ) self.assertSequenceEqual(tabURIs, self._expectedURLs) def checkFxA(self): - result = self.runAsyncCode(""" + result = self.runAsyncCode( + """ Cu.import("resource://gre/modules/FxAccountsStorage.jsm"); let resolve = arguments[arguments.length - 1]; let storage = new FxAccountsStorageManager(); @@ -414,7 +478,8 @@ class TestFirefoxRefresh(MarionetteTestCase): }).catch(err => { resolve(err.toString()); }); - """) + """ + ) if type(result) != dict: self.fail(result) return @@ -423,9 +488,11 @@ class TestFirefoxRefresh(MarionetteTestCase): self.assertEqual(result["accountData"]["keyFetchToken"], "top-secret") def checkSync(self, expect_sync_user): - pref_value = self.marionette.execute_script(""" + pref_value = self.marionette.execute_script( + """ return Services.prefs.getStringPref("services.sync.username", null); - """) + """ + ) expected_value = "test@test.com" if expect_sync_user else None self.assertEqual(pref_value, expected_value) @@ -456,35 +523,35 @@ class TestFirefoxRefresh(MarionetteTestCase): def setUpScriptData(self): self.marionette.set_context(self.marionette.CONTEXT_CHROME) - self.runCode(""" + self.runCode( + """ window.global = {}; global.LoginInfo = Components.Constructor("@mozilla.org/login-manager/loginInfo;1", "nsILoginInfo", "init"); global.profSvc = Cc["@mozilla.org/toolkit/profile-service;1"].getService(Ci.nsIToolkitProfileService); global.Preferences = Cu.import("resource://gre/modules/Preferences.jsm", {}).Preferences; global.FormHistory = Cu.import("resource://gre/modules/FormHistory.jsm", {}).FormHistory; - """) # NOQA: E501 - self._formAutofillAvailable = self.runCode(""" + """ # NOQA: E501 + ) + self._formAutofillAvailable = self.runCode( + """ try { global.formAutofillStorage = Cu.import("resource://formautofill/FormAutofillStorage.jsm", {}).formAutofillStorage; } catch(e) { return false; } return true; - """) # NOQA: E501 + """ # NOQA: E501 + ) def runCode(self, script, *args, **kwargs): - return self.marionette.execute_script(script, - new_sandbox=False, - sandbox=self._sandbox, - *args, - **kwargs) + return self.marionette.execute_script( + script, new_sandbox=False, sandbox=self._sandbox, *args, **kwargs + ) def runAsyncCode(self, script, *args, **kwargs): - return self.marionette.execute_async_script(script, - new_sandbox=False, - sandbox=self._sandbox, - *args, - **kwargs) + return self.marionette.execute_async_script( + script, new_sandbox=False, sandbox=self._sandbox, *args, **kwargs + ) def setUp(self): MarionetteTestCase.setUp(self) @@ -511,14 +578,19 @@ class TestFirefoxRefresh(MarionetteTestCase): if cleanup.reset_profile_path: # Remove ourselves from profiles.ini - self.runCode(""" + self.runCode( + """ let name = arguments[0]; let profile = global.profSvc.getProfileByName(name); profile.remove(false) global.profSvc.flush(); - """, script_args=(cleanup.profile_name_to_remove,)) + """, + script_args=(cleanup.profile_name_to_remove,), + ) # Remove the local profile dir if it's not the same as the profile dir: - different_path = cleanup.reset_profile_local_path != cleanup.reset_profile_path + different_path = ( + cleanup.reset_profile_local_path != cleanup.reset_profile_path + ) if cleanup.reset_profile_local_path and different_path: mozfile.remove(cleanup.reset_profile_local_path) @@ -528,7 +600,8 @@ class TestFirefoxRefresh(MarionetteTestCase): def doReset(self): profileName = "marionette-test-profile-" + str(int(time.time() * 1000)) cleanup = PendingCleanup(profileName) - self.runCode(""" + self.runCode( + """ // Ensure the current (temporary) profile is in profiles.ini: let profD = Services.dirsvc.get("ProfD", Ci.nsIFile); let profileName = arguments[1]; @@ -546,24 +619,33 @@ class TestFirefoxRefresh(MarionetteTestCase): env.set("MOZ_MARIONETTE_PREF_STATE_ACROSS_RESTARTS", JSON.stringify(prefObj)); env.set("MOZ_RESET_PROFILE_RESTART", "1"); env.set("XRE_PROFILE_PATH", arguments[0]); - """, script_args=(self.marionette.instance.profile.profile, profileName,)) + """, + script_args=( + self.marionette.instance.profile.profile, + profileName, + ), + ) - profileLeafName = os.path.basename(os.path.normpath( - self.marionette.instance.profile.profile)) + profileLeafName = os.path.basename( + os.path.normpath(self.marionette.instance.profile.profile) + ) # Now restart the browser to get it reset: self.marionette.restart(clean=False, in_app=True) self.setUpScriptData() # Determine the new profile path (we'll need to remove it when we're done) - [cleanup.reset_profile_path, cleanup.reset_profile_local_path] = self.runCode(""" + [cleanup.reset_profile_path, cleanup.reset_profile_local_path] = self.runCode( + """ let profD = Services.dirsvc.get("ProfD", Ci.nsIFile); let localD = Services.dirsvc.get("ProfLD", Ci.nsIFile); return [profD.path, localD.path]; - """) + """ + ) # Determine the backup path - cleanup.desktop_backup_path = self.runCode(""" + cleanup.desktop_backup_path = self.runCode( + """ let container; try { container = Services.dirsvc.get("Desk", Ci.nsIFile); @@ -575,12 +657,18 @@ class TestFirefoxRefresh(MarionetteTestCase): container.append(dirName); container.append(arguments[0]); return container.path; - """, script_args=(profileLeafName,)) # NOQA: E501 + """, # NOQA: E501 + script_args=(profileLeafName,), + ) - self.assertTrue(os.path.isdir(cleanup.reset_profile_path), - "Reset profile path should be present") - self.assertTrue(os.path.isdir(cleanup.desktop_backup_path), - "Backup profile path should be present") + self.assertTrue( + os.path.isdir(cleanup.reset_profile_path), + "Reset profile path should be present", + ) + self.assertTrue( + os.path.isdir(cleanup.desktop_backup_path), + "Backup profile path should be present", + ) self.assertIn(cleanup.profile_name_to_remove, cleanup.reset_profile_path) return cleanup diff --git a/build/pgo/genpgocert.py b/build/pgo/genpgocert.py index 39ac1fc87959..8b3a83c1962c 100644 --- a/build/pgo/genpgocert.py +++ b/build/pgo/genpgocert.py @@ -23,7 +23,7 @@ from distutils.spawn import find_executable dbFiles = [ re.compile("^cert[0-9]+\.db$"), re.compile("^key[0-9]+\.db$"), - re.compile("^secmod\.db$") + re.compile("^secmod\.db$"), ] @@ -53,10 +53,13 @@ def runUtil(util, args, inputdata=None, outputstream=None): env[pathvar] = "%s%s%s" % (app_path, os.pathsep, env[pathvar]) else: env[pathvar] = app_path - proc = subprocess.Popen([util] + args, env=env, - stdin=subprocess.PIPE if inputdata else None, - stdout=outputstream, - universal_newlines=True) + proc = subprocess.Popen( + [util] + args, + env=env, + stdin=subprocess.PIPE if inputdata else None, + stdout=outputstream, + universal_newlines=True, + ) proc.communicate(inputdata) return proc.returncode @@ -67,11 +70,13 @@ def createRandomFile(randomFile): def writeCertspecForServerLocations(fd): - locations = ServerLocations(os.path.join(build.topsrcdir, - "build", "pgo", - "server-locations.txt")) + locations = ServerLocations( + os.path.join(build.topsrcdir, "build", "pgo", "server-locations.txt") + ) SAN = [] - for loc in [i for i in iter(locations) if i.scheme == "https" and "nocert" not in i.options]: + for loc in [ + i for i in iter(locations) if i.scheme == "https" and "nocert" not in i.options + ]: customCertOption = False customCertRE = re.compile("^cert=(?:\w+)") for _ in [i for i in loc.options if customCertRE.match(i)]: @@ -84,7 +89,9 @@ def writeCertspecForServerLocations(fd): if not customCertOption: SAN.append(loc.host) - fd.write("issuer:printableString/CN=Temporary Certificate Authority/O=Mozilla Testing/OU=Profile Guided Optimization\n") # NOQA: E501 + fd.write( + "issuer:printableString/CN=Temporary Certificate Authority/O=Mozilla Testing/OU=Profile Guided Optimization\n" # NOQA: E501 + ) fd.write("subject:{}\n".format(SAN[0])) fd.write("extension:subjectAlternativeName:{}\n".format(",".join(SAN))) @@ -94,13 +101,15 @@ def constructCertDatabase(build, srcDir): certutil = build.get_binary_path(what="certutil") pk12util = build.get_binary_path(what="pk12util") except BinaryNotFoundException as e: - print('{}\n\n{}\n'.format(e, e.help())) + print("{}\n\n{}\n".format(e, e.help())) return 1 openssl = find_executable("openssl") - pycert = os.path.join(build.topsrcdir, "security", "manager", "ssl", "tests", - "unit", "pycert.py") - pykey = os.path.join(build.topsrcdir, "security", "manager", "ssl", "tests", - "unit", "pykey.py") + pycert = os.path.join( + build.topsrcdir, "security", "manager", "ssl", "tests", "unit", "pycert.py" + ) + pykey = os.path.join( + build.topsrcdir, "security", "manager", "ssl", "tests", "unit", "pykey.py" + ) with NamedTemporaryFile(mode="wt+") as pwfile, TemporaryDirectory() as pemfolder: pwfile.write("\n") @@ -112,15 +121,17 @@ def constructCertDatabase(build, srcDir): # Copy all .certspec and .keyspec files to a temporary directory for root, dirs, files in os.walk(srcDir): - for spec in [i for i in files if i.endswith(".certspec") or i.endswith(".keyspec")]: - shutil.copyfile(os.path.join(root, spec), - os.path.join(pemfolder, spec)) + for spec in [ + i for i in files if i.endswith(".certspec") or i.endswith(".keyspec") + ]: + shutil.copyfile(os.path.join(root, spec), os.path.join(pemfolder, spec)) # Write a certspec for the "server-locations.txt" file to that temporary directory pgoserver_certspec = os.path.join(pemfolder, "pgoserver.certspec") if os.path.exists(pgoserver_certspec): raise Exception( - "{} already exists, which isn't allowed".format(pgoserver_certspec)) + "{} already exists, which isn't allowed".format(pgoserver_certspec) + ) with open(pgoserver_certspec, "w") as fd: writeCertspecForServerLocations(fd) @@ -136,14 +147,27 @@ def constructCertDatabase(build, srcDir): certspec_data = certspec_file.read() with open(pem, "w") as pem_file: status = runUtil( - pycert, [], inputdata=certspec_data, outputstream=pem_file) + pycert, [], inputdata=certspec_data, outputstream=pem_file + ) if status: return status - status = runUtil(certutil, [ - "-A", "-n", name, "-t", "P,,", "-i", pem, - "-d", srcDir, "-f", pwfile.name - ]) + status = runUtil( + certutil, + [ + "-A", + "-n", + name, + "-t", + "P,,", + "-i", + pem, + "-d", + srcDir, + "-f", + pwfile.name, + ], + ) if status: return status @@ -152,9 +176,10 @@ def constructCertDatabase(build, srcDir): name = parts[0] key_type = parts[1] if key_type not in ["ca", "client", "server"]: - raise Exception("{}: keyspec filenames must be of the form XXX.client.keyspec " - "or XXX.ca.keyspec (key_type={})".format( - keyspec, key_type)) + raise Exception( + "{}: keyspec filenames must be of the form XXX.client.keyspec " + "or XXX.ca.keyspec (key_type={})".format(keyspec, key_type) + ) key_pem = os.path.join(pemfolder, "{}.key.pem".format(name)) print("Generating private key {} (pem={})".format(name, key_pem)) @@ -163,42 +188,62 @@ def constructCertDatabase(build, srcDir): keyspec_data = keyspec_file.read() with open(key_pem, "w") as pem_file: status = runUtil( - pykey, [], inputdata=keyspec_data, outputstream=pem_file) + pykey, [], inputdata=keyspec_data, outputstream=pem_file + ) if status: return status cert_pem = os.path.join(pemfolder, "{}.cert.pem".format(name)) if not os.path.exists(cert_pem): - raise Exception("There has to be a corresponding certificate named {} for " - "the keyspec {}".format( - cert_pem, keyspec)) + raise Exception( + "There has to be a corresponding certificate named {} for " + "the keyspec {}".format(cert_pem, keyspec) + ) p12 = os.path.join(pemfolder, "{}.key.p12".format(name)) - print("Converting private key {} to PKCS12 (p12={})".format( - key_pem, p12)) - status = runUtil(openssl, ["pkcs12", "-export", "-inkey", key_pem, "-in", - cert_pem, "-name", name, "-out", p12, "-passout", - "file:"+pwfile.name]) + print( + "Converting private key {} to PKCS12 (p12={})".format(key_pem, p12) + ) + status = runUtil( + openssl, + [ + "pkcs12", + "-export", + "-inkey", + key_pem, + "-in", + cert_pem, + "-name", + name, + "-out", + p12, + "-passout", + "file:" + pwfile.name, + ], + ) if status: return status print("Importing private key {} to database".format(key_pem)) status = runUtil( - pk12util, ["-i", p12, "-d", srcDir, "-w", pwfile.name, "-k", pwfile.name]) + pk12util, + ["-i", p12, "-d", srcDir, "-w", pwfile.name, "-k", pwfile.name], + ) if status: return status if key_type == "ca": - shutil.copyfile(cert_pem, os.path.join( - srcDir, "{}.ca".format(name))) + shutil.copyfile( + cert_pem, os.path.join(srcDir, "{}.ca".format(name)) + ) elif key_type == "client": - shutil.copyfile(p12, os.path.join( - srcDir, "{}.client".format(name))) + shutil.copyfile(p12, os.path.join(srcDir, "{}.client".format(name))) elif key_type == "server": pass # Nothing to do for server keys else: raise Exception( - "State error: Unknown keyspec key_type: {}".format(key_type)) + "State error: Unknown keyspec key_type: {}".format(key_type) + ) return 0 diff --git a/config/check_macroassembler_style.py b/config/check_macroassembler_style.py index 525e92db2947..f599305ed14e 100644 --- a/config/check_macroassembler_style.py +++ b/config/check_macroassembler_style.py @@ -28,56 +28,56 @@ import os import re import sys -architecture_independent = set(['generic']) -all_unsupported_architectures_names = set(['mips32', 'mips64', 'mips_shared']) -all_architecture_names = set(['x86', 'x64', 'arm', 'arm64']) -all_shared_architecture_names = set(['x86_shared', 'arm', 'arm64']) +architecture_independent = set(["generic"]) +all_unsupported_architectures_names = set(["mips32", "mips64", "mips_shared"]) +all_architecture_names = set(["x86", "x64", "arm", "arm64"]) +all_shared_architecture_names = set(["x86_shared", "arm", "arm64"]) reBeforeArg = "(?<=[(,\s])" reArgType = "(?P[\w\s:*&]+)" reArgName = "(?P\s\w+)" reArgDefault = "(?P(?:\s=[^,)]+)?)" reAfterArg = "(?=[,)])" -reMatchArg = re.compile(reBeforeArg + reArgType + - reArgName + reArgDefault + reAfterArg) +reMatchArg = re.compile(reBeforeArg + reArgType + reArgName + reArgDefault + reAfterArg) def get_normalized_signatures(signature, fileAnnot=None): # Remove static - signature = signature.replace('static', '') + signature = signature.replace("static", "") # Remove semicolon. - signature = signature.replace(';', ' ') + signature = signature.replace(";", " ") # Normalize spaces. - signature = re.sub(r'\s+', ' ', signature).strip() + signature = re.sub(r"\s+", " ", signature).strip() # Remove new-line induced spaces after opening braces. - signature = re.sub(r'\(\s+', '(', signature).strip() + signature = re.sub(r"\(\s+", "(", signature).strip() # Match arguments, and keep only the type. - signature = reMatchArg.sub('\g', signature) + signature = reMatchArg.sub("\g", signature) # Remove class name - signature = signature.replace('MacroAssembler::', '') + signature = signature.replace("MacroAssembler::", "") # Extract list of architectures - archs = ['generic'] + archs = ["generic"] if fileAnnot: - archs = [fileAnnot['arch']] + archs = [fileAnnot["arch"]] - if 'DEFINED_ON(' in signature: + if "DEFINED_ON(" in signature: archs = re.sub( - r'.*DEFINED_ON\((?P[^()]*)\).*', '\g', signature).split(',') + r".*DEFINED_ON\((?P[^()]*)\).*", "\g", signature + ).split(",") archs = [a.strip() for a in archs] - signature = re.sub(r'\s+DEFINED_ON\([^()]*\)', '', signature) + signature = re.sub(r"\s+DEFINED_ON\([^()]*\)", "", signature) - elif 'PER_ARCH' in signature: + elif "PER_ARCH" in signature: archs = all_architecture_names - signature = re.sub(r'\s+PER_ARCH', '', signature) + signature = re.sub(r"\s+PER_ARCH", "", signature) - elif 'PER_SHARED_ARCH' in signature: + elif "PER_SHARED_ARCH" in signature: archs = all_shared_architecture_names - signature = re.sub(r'\s+PER_SHARED_ARCH', '', signature) + signature = re.sub(r"\s+PER_SHARED_ARCH", "", signature) - elif 'OOL_IN_HEADER' in signature: - assert archs == ['generic'] - signature = re.sub(r'\s+OOL_IN_HEADER', '', signature) + elif "OOL_IN_HEADER" in signature: + assert archs == ["generic"] + signature = re.sub(r"\s+OOL_IN_HEADER", "", signature) else: # No signature annotation, the list of architectures remains unchanged. @@ -86,58 +86,55 @@ def get_normalized_signatures(signature, fileAnnot=None): # Extract inline annotation inline = False if fileAnnot: - inline = fileAnnot['inline'] + inline = fileAnnot["inline"] - if 'inline ' in signature: - signature = re.sub(r'inline\s+', '', signature) + if "inline " in signature: + signature = re.sub(r"inline\s+", "", signature) inline = True - inlinePrefx = '' + inlinePrefx = "" if inline: - inlinePrefx = 'inline ' - signatures = [ - {'arch': a, 'sig': inlinePrefx + signature} - for a in archs - ] + inlinePrefx = "inline " + signatures = [{"arch": a, "sig": inlinePrefx + signature} for a in archs] return signatures -file_suffixes = set([ - a.replace('_', '-') for a in - all_architecture_names.union(all_shared_architecture_names) - .union(all_unsupported_architectures_names) -]) +file_suffixes = set( + [ + a.replace("_", "-") + for a in all_architecture_names.union(all_shared_architecture_names).union( + all_unsupported_architectures_names + ) + ] +) def get_file_annotation(filename): origFilename = filename - filename = filename.split('/')[-1] + filename = filename.split("/")[-1] inline = False - if filename.endswith('.cpp'): - filename = filename[:-len('.cpp')] - elif filename.endswith('-inl.h'): + if filename.endswith(".cpp"): + filename = filename[: -len(".cpp")] + elif filename.endswith("-inl.h"): inline = True - filename = filename[:-len('-inl.h')] - elif filename.endswith('.h'): + filename = filename[: -len("-inl.h")] + elif filename.endswith(".h"): # This allows the definitions block in MacroAssembler.h to be # style-checked. inline = True - filename = filename[:-len('.h')] + filename = filename[: -len(".h")] else: - raise Exception('unknown file name', origFilename) + raise Exception("unknown file name", origFilename) - arch = 'generic' + arch = "generic" for suffix in file_suffixes: - if filename == 'MacroAssembler-' + suffix: + if filename == "MacroAssembler-" + suffix: arch = suffix break - return { - 'inline': inline, - 'arch': arch.replace('-', '_') - } + return {"inline": inline, "arch": arch.replace("-", "_")} def get_macroassembler_definitions(filename): @@ -147,46 +144,45 @@ def get_macroassembler_definitions(filename): return [] style_section = False - lines = '' + lines = "" signatures = [] with open(filename) as f: for line in f: - if '//{{{ check_macroassembler_style' in line: + if "//{{{ check_macroassembler_style" in line: if style_section: - raise 'check_macroassembler_style section already opened.' + raise "check_macroassembler_style section already opened." style_section = True braces_depth = 0 - elif '//}}} check_macroassembler_style' in line: + elif "//}}} check_macroassembler_style" in line: style_section = False if not style_section: continue # Ignore preprocessor directives. - if line.startswith('#'): + if line.startswith("#"): continue # Remove comments from the processed line. - line = re.sub(r'//.*', '', line) + line = re.sub(r"//.*", "", line) # Locate and count curly braces. - open_curly_brace = line.find('{') + open_curly_brace = line.find("{") was_braces_depth = braces_depth - braces_depth = braces_depth + line.count('{') - line.count('}') + braces_depth = braces_depth + line.count("{") - line.count("}") # Raise an error if the check_macroassembler_style macro is used # across namespaces / classes scopes. if braces_depth < 0: - raise 'check_macroassembler_style annotations are not well scoped.' + raise "check_macroassembler_style annotations are not well scoped." # If the current line contains an opening curly brace, check if # this line combines with the previous one can be identified as a # MacroAssembler function signature. if open_curly_brace != -1 and was_braces_depth == 0: lines = lines + line[:open_curly_brace] - if 'MacroAssembler::' in lines: - signatures.extend( - get_normalized_signatures(lines, fileAnnot)) - lines = '' + if "MacroAssembler::" in lines: + signatures.extend(get_normalized_signatures(lines, fileAnnot)) + lines = "" continue # We do not aggregate any lines if we are scanning lines which are @@ -194,15 +190,15 @@ def get_macroassembler_definitions(filename): if braces_depth > 0: continue if was_braces_depth != 0: - line = line[line.rfind('}') + 1:] + line = line[line.rfind("}") + 1 :] # This logic is used to remove template instantiation, static # variable definitions and function declaration from the next # function definition. - last_semi_colon = line.rfind(';') + last_semi_colon = line.rfind(";") if last_semi_colon != -1: - lines = '' - line = line[last_semi_colon + 1:] + lines = "" + line = line[last_semi_colon + 1 :] # Aggregate lines of non-braced text, which corresponds to the space # where we are expecting to find function definitions. @@ -213,49 +209,49 @@ def get_macroassembler_definitions(filename): def get_macroassembler_declaration(filename): style_section = False - lines = '' + lines = "" signatures = [] with open(filename) as f: for line in f: - if '//{{{ check_macroassembler_decl_style' in line: + if "//{{{ check_macroassembler_decl_style" in line: style_section = True - elif '//}}} check_macroassembler_decl_style' in line: + elif "//}}} check_macroassembler_decl_style" in line: style_section = False if not style_section: continue # Ignore preprocessor directives. - if line.startswith('#'): + if line.startswith("#"): continue - line = re.sub(r'//.*', '', line) - if len(line.strip()) == 0 or 'public:' in line or 'private:' in line: - lines = '' + line = re.sub(r"//.*", "", line) + if len(line.strip()) == 0 or "public:" in line or "private:" in line: + lines = "" continue lines = lines + line # Continue until we have a complete declaration - if ';' not in lines: + if ";" not in lines: continue # Skip member declarations: which are lines ending with a # semi-colon without any list of arguments. - if ')' not in lines: - lines = '' + if ")" not in lines: + lines = "" continue signatures.extend(get_normalized_signatures(lines)) - lines = '' + lines = "" return signatures def append_signatures(d, sigs): for s in sigs: - if s['sig'] not in d: - d[s['sig']] = [] - d[s['sig']].append(s['arch']) + if s["sig"] not in d: + d[s["sig"]] = [] + d[s["sig"]].append(s["arch"]) return d @@ -265,65 +261,66 @@ def generate_file_content(signatures): archs = set(sorted(signatures[s])) archs -= all_unsupported_architectures_names if len(archs.symmetric_difference(architecture_independent)) == 0: - output.append(s + ';\n') - if s.startswith('inline'): + output.append(s + ";\n") + if s.startswith("inline"): # TODO, bug 1432600: This is mistaken for OOL_IN_HEADER # functions. (Such annotation is already removed by the time # this function sees the signature here.) - output.append(' is defined in MacroAssembler-inl.h\n') + output.append(" is defined in MacroAssembler-inl.h\n") else: - output.append(' is defined in MacroAssembler.cpp\n') + output.append(" is defined in MacroAssembler.cpp\n") else: if len(archs.symmetric_difference(all_architecture_names)) == 0: - output.append(s + ' PER_ARCH;\n') + output.append(s + " PER_ARCH;\n") elif len(archs.symmetric_difference(all_shared_architecture_names)) == 0: - output.append(s + ' PER_SHARED_ARCH;\n') + output.append(s + " PER_SHARED_ARCH;\n") else: - output.append( - s + ' DEFINED_ON(' + ', '.join(sorted(archs)) + ');\n') + output.append(s + " DEFINED_ON(" + ", ".join(sorted(archs)) + ");\n") for a in sorted(archs): - a = a.replace('_', '-') - masm = '%s/MacroAssembler-%s' % (a, a) - if s.startswith('inline'): - output.append(' is defined in %s-inl.h\n' % masm) + a = a.replace("_", "-") + masm = "%s/MacroAssembler-%s" % (a, a) + if s.startswith("inline"): + output.append(" is defined in %s-inl.h\n" % masm) else: - output.append(' is defined in %s.cpp\n' % masm) + output.append(" is defined in %s.cpp\n" % masm) return output def check_style(): # We read from the header file the signature of each function. - decls = dict() # type: dict(signature => ['x86', 'x64']) + decls = dict() # type: dict(signature => ['x86', 'x64']) # We infer from each file the signature of each MacroAssembler function. - defs = dict() # type: dict(signature => ['x86', 'x64']) + defs = dict() # type: dict(signature => ['x86', 'x64']) - root_dir = os.path.join('js', 'src', 'jit') + root_dir = os.path.join("js", "src", "jit") for dirpath, dirnames, filenames in os.walk(root_dir): for filename in filenames: - if 'MacroAssembler' not in filename: + if "MacroAssembler" not in filename: continue - filepath = os.path.join(dirpath, filename).replace('\\', '/') + filepath = os.path.join(dirpath, filename).replace("\\", "/") - if filepath.endswith('MacroAssembler.h'): + if filepath.endswith("MacroAssembler.h"): decls = append_signatures( - decls, get_macroassembler_declaration(filepath)) - defs = append_signatures( - defs, get_macroassembler_definitions(filepath)) + decls, get_macroassembler_declaration(filepath) + ) + defs = append_signatures(defs, get_macroassembler_definitions(filepath)) if not decls or not defs: raise Exception("Did not find any definitions or declarations") # Compare declarations and definitions output. - difflines = difflib.unified_diff(generate_file_content(decls), - generate_file_content(defs), - fromfile='check_macroassembler_style.py declared syntax', - tofile='check_macroassembler_style.py found definitions') + difflines = difflib.unified_diff( + generate_file_content(decls), + generate_file_content(defs), + fromfile="check_macroassembler_style.py declared syntax", + tofile="check_macroassembler_style.py found definitions", + ) ok = True for diffline in difflines: ok = False - print(diffline, end='') + print(diffline, end="") return ok @@ -332,12 +329,14 @@ def main(): ok = check_style() if ok: - print('TEST-PASS | check_macroassembler_style.py | ok') + print("TEST-PASS | check_macroassembler_style.py | ok") else: - print('TEST-UNEXPECTED-FAIL | check_macroassembler_style.py | actual output does not match expected output; diff is above') # noqa: E501 + print( + "TEST-UNEXPECTED-FAIL | check_macroassembler_style.py | actual output does not match expected output; diff is above" # noqa: E501 + ) sys.exit(0 if ok else 1) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/js/src/devtools/rootAnalysis/t/testlib.py b/js/src/devtools/rootAnalysis/t/testlib.py index 010f4012f13b..d187164d84ef 100644 --- a/js/src/devtools/rootAnalysis/t/testlib.py +++ b/js/src/devtools/rootAnalysis/t/testlib.py @@ -8,23 +8,24 @@ from collections import defaultdict, namedtuple scriptdir = os.path.abspath(os.path.join(os.path.dirname(__file__), "..")) -HazardSummary = namedtuple('HazardSummary', [ - 'function', - 'variable', - 'type', - 'GCFunction', - 'location']) +HazardSummary = namedtuple( + "HazardSummary", ["function", "variable", "type", "GCFunction", "location"] +) -Callgraph = namedtuple('Callgraph', [ - 'functionNames', - 'nameToId', - 'mangledToUnmangled', - 'unmangledToMangled', - 'calleesOf', - 'callersOf', - 'tags', - 'calleeGraph', - 'callerGraph']) +Callgraph = namedtuple( + "Callgraph", + [ + "functionNames", + "nameToId", + "mangledToUnmangled", + "unmangledToMangled", + "calleesOf", + "callersOf", + "tags", + "calleeGraph", + "callerGraph", + ], +) def equal(got, expected): @@ -33,7 +34,7 @@ def equal(got, expected): def extract_unmangled(func): - return func.split('$')[-1] + return func.split("$")[-1] class Test(object): @@ -49,24 +50,27 @@ class Test(object): def binpath(self, prog): return os.path.join(self.cfg.sixgill_bin, prog) - def compile(self, source, options=''): + def compile(self, source, options=""): env = os.environ - env['CCACHE_DISABLE'] = '1' + env["CCACHE_DISABLE"] = "1" cmd = "{CXX} -c {source} -O3 -std=c++11 -fplugin={sixgill} -fplugin-arg-xgill-mangle=1 {options}".format( # NOQA: E501 source=self.infile(source), - CXX=self.cfg.cxx, sixgill=self.cfg.sixgill_plugin, - options=options) + CXX=self.cfg.cxx, + sixgill=self.cfg.sixgill_plugin, + options=options, + ) if self.cfg.verbose: print("Running %s" % cmd) subprocess.check_call(["sh", "-c", cmd]) def load_db_entry(self, dbname, pattern): - '''Look up an entry from an XDB database file, 'pattern' may be an exact - matching string, or an re pattern object matching a single entry.''' + """Look up an entry from an XDB database file, 'pattern' may be an exact + matching string, or an re pattern object matching a single entry.""" - if hasattr(pattern, 'match'): - output = subprocess.check_output([self.binpath("xdbkeys"), dbname + ".xdb"], - universal_newlines=True) + if hasattr(pattern, "match"): + output = subprocess.check_output( + [self.binpath("xdbkeys"), dbname + ".xdb"], universal_newlines=True + ) matches = list(filter(lambda _: re.search(pattern, _), output.splitlines())) if len(matches) == 0: raise Exception("entry not found") @@ -74,17 +78,26 @@ class Test(object): raise Exception("multiple entries found") pattern = matches[0] - output = subprocess.check_output([self.binpath("xdbfind"), "-json", dbname + ".xdb", - pattern], - universal_newlines=True) + output = subprocess.check_output( + [self.binpath("xdbfind"), "-json", dbname + ".xdb", pattern], + universal_newlines=True, + ) return json.loads(output) def run_analysis_script(self, phase, upto=None): - open("defaults.py", "w").write('''\ + open("defaults.py", "w").write( + """\ analysis_scriptdir = '{scriptdir}' sixgill_bin = '{bindir}' -'''.format(scriptdir=scriptdir, bindir=self.cfg.sixgill_bin)) - cmd = [os.path.join(scriptdir, "analyze.py"), '-v' if self.verbose else '-q', phase] +""".format( + scriptdir=scriptdir, bindir=self.cfg.sixgill_bin + ) + ) + cmd = [ + os.path.join(scriptdir, "analyze.py"), + "-v" if self.verbose else "-q", + phase, + ] if upto: cmd += ["--upto", upto] cmd.append("--source=%s" % self.indir) @@ -107,17 +120,23 @@ sixgill_bin = '{bindir}' return list(filter(lambda _: _ is not None, values)) def load_suppressed_functions(self): - return set(self.load_text_file("limitedFunctions.lst", extract=lambda l: l.split(' ')[1])) + return set( + self.load_text_file( + "limitedFunctions.lst", extract=lambda l: l.split(" ")[1] + ) + ) def load_gcTypes(self): def grab_type(line): - m = re.match(r'^(GC\w+): (.*)', line) + m = re.match(r"^(GC\w+): (.*)", line) if m: - return (m.group(1) + 's', m.group(2)) + return (m.group(1) + "s", m.group(2)) return None gctypes = defaultdict(list) - for collection, typename in self.load_text_file('gcTypes.txt', extract=grab_type): + for collection, typename in self.load_text_file( + "gcTypes.txt", extract=grab_type + ): gctypes[collection].append(typename) return gctypes @@ -126,11 +145,11 @@ sixgill_bin = '{bindir}' return json.load(fh) def load_gcFunctions(self): - return self.load_text_file('gcFunctions.lst', extract=extract_unmangled) + return self.load_text_file("gcFunctions.lst", extract=extract_unmangled) def load_callgraph(self): data = Callgraph( - functionNames=['dummy'], + functionNames=["dummy"], nameToId={}, mangledToUnmangled={}, unmangledToMangled={}, @@ -152,14 +171,14 @@ sixgill_bin = '{bindir}' data.callerGraph[callee][caller] = True def process(line): - if line.startswith('#'): + if line.startswith("#"): name = line.split(" ", 1)[1] data.nameToId[name] = len(data.functionNames) data.functionNames.append(name) return - if line.startswith('='): - m = re.match(r'^= (\d+) (.*)', line) + if line.startswith("="): + m = re.match(r"^= (\d+) (.*)", line) mangled = data.functionNames[int(m.group(1))] unmangled = m.group(2) data.nameToId[unmangled] = id @@ -168,32 +187,34 @@ sixgill_bin = '{bindir}' return limit = 0 - m = re.match(r'^\w (?:/(\d+))? ', line) + m = re.match(r"^\w (?:/(\d+))? ", line) if m: limit = int(m[1]) - tokens = line.split(' ') - if tokens[0] in ('D', 'R'): + tokens = line.split(" ") + if tokens[0] in ("D", "R"): _, caller, callee = tokens add_call(lookup(caller), lookup(callee), limit) - elif tokens[0] == 'T': - data.tags[tokens[1]].add(line.split(' ', 2)[2]) - elif tokens[0] in ('F', 'V'): - m = re.match(r'^[FV] (\d+) (\d+) CLASS (.*?) FIELD (.*)', line) + elif tokens[0] == "T": + data.tags[tokens[1]].add(line.split(" ", 2)[2]) + elif tokens[0] in ("F", "V"): + m = re.match(r"^[FV] (\d+) (\d+) CLASS (.*?) FIELD (.*)", line) caller, callee, csu, field = m.groups() add_call(lookup(caller), lookup(callee), limit) - elif tokens[0] == 'I': - m = re.match(r'^I (\d+) VARIABLE ([^\,]*)', line) + elif tokens[0] == "I": + m = re.match(r"^I (\d+) VARIABLE ([^\,]*)", line) pass - self.load_text_file('callgraph.txt', extract=process) + self.load_text_file("callgraph.txt", extract=process) return data def load_hazards(self): def grab_hazard(line): m = re.match( - r"Function '(.*?)' has unrooted '(.*?)' of type '(.*?)' live across GC call '(.*?)' at (.*)", line) # NOQA: E501 + r"Function '(.*?)' has unrooted '(.*?)' of type '(.*?)' live across GC call '(.*?)' at (.*)", # NOQA: E501 + line, + ) if m: info = list(m.groups()) info[0] = info[0].split("$")[-1] @@ -201,7 +222,7 @@ sixgill_bin = '{bindir}' return HazardSummary(*info) return None - return self.load_text_file('rootingHazards.txt', extract=grab_hazard) + return self.load_text_file("rootingHazards.txt", extract=grab_hazard) def process_body(self, body): return Body(body) diff --git a/js/src/util/make_unicode.py b/js/src/util/make_unicode.py index 13083ca20585..13fc354a9e03 100755 --- a/js/src/util/make_unicode.py +++ b/js/src/util/make_unicode.py @@ -35,6 +35,7 @@ from zipfile import ZipFile if sys.version_info.major == 2: from itertools import ifilter as filter, imap as map, izip_longest as zip_longest from urllib2 import urlopen + range = xrange else: from itertools import zip_longest @@ -44,11 +45,13 @@ else: class codepoint_dict(dict): def name(self, code_point): (_, _, name, alias) = self[code_point] - return '{}{}'.format(name, (' (' + alias + ')' if alias else '')) + return "{}{}".format(name, (" (" + alias + ")" if alias else "")) def full_name(self, code_point): (_, _, name, alias) = self[code_point] - return 'U+{:04X} {}{}'.format(code_point, name, (' (' + alias + ')' if alias else '')) + return "U+{:04X} {}{}".format( + code_point, name, (" (" + alias + ")" if alias else "") + ) # ECMAScript 2016 @@ -56,32 +59,32 @@ class codepoint_dict(dict): whitespace = [ # python doesn't support using control character names :( 0x9, # CHARACTER TABULATION - 0xb, # LINE TABULATION - 0xc, # FORM FEED - ord(u'\N{SPACE}'), - ord(u'\N{NO-BREAK SPACE}'), - ord(u'\N{ZERO WIDTH NO-BREAK SPACE}'), # also BOM + 0xB, # LINE TABULATION + 0xC, # FORM FEED + ord("\N{SPACE}"), + ord("\N{NO-BREAK SPACE}"), + ord("\N{ZERO WIDTH NO-BREAK SPACE}"), # also BOM ] # §11.3 Line Terminators line_terminator = [ - 0xa, # LINE FEED - 0xd, # CARRIAGE RETURN - ord(u'\N{LINE SEPARATOR}'), - ord(u'\N{PARAGRAPH SEPARATOR}'), + 0xA, # LINE FEED + 0xD, # CARRIAGE RETURN + ord("\N{LINE SEPARATOR}"), + ord("\N{PARAGRAPH SEPARATOR}"), ] # These are also part of IdentifierPart §11.6 Names and Keywords compatibility_identifier_part = [ - ord(u'\N{ZERO WIDTH NON-JOINER}'), - ord(u'\N{ZERO WIDTH JOINER}'), + ord("\N{ZERO WIDTH NON-JOINER}"), + ord("\N{ZERO WIDTH JOINER}"), ] FLAG_SPACE = 1 << 0 FLAG_UNICODE_ID_START = 1 << 1 FLAG_UNICODE_ID_CONTINUE_ONLY = 1 << 2 -MAX_BMP = 0xffff +MAX_BMP = 0xFFFF public_domain = """ /* @@ -109,12 +112,12 @@ unicode_version_message = """\ def read_unicode_data(unicode_data): """ - If you want to understand how this wonderful file format works checkout - Unicode Standard Annex #44 - Unicode Character Database - http://www.unicode.org/reports/tr44/ + If you want to understand how this wonderful file format works checkout + Unicode Standard Annex #44 - Unicode Character Database + http://www.unicode.org/reports/tr44/ """ - reader = csv.reader(unicode_data, delimiter=str(';')) + reader = csv.reader(unicode_data, delimiter=str(";")) while True: row = next(reader, None) @@ -123,7 +126,7 @@ def read_unicode_data(unicode_data): name = row[1] # We need to expand the UAX #44 4.2.3 Code Point Range - if name.startswith('<') and name.endswith('First>'): + if name.startswith("<") and name.endswith("First>"): next_row = next(reader) for i in range(int(row[0], 16), int(next_row[0], 16) + 1): @@ -138,18 +141,17 @@ def read_unicode_data(unicode_data): def read_case_folding(case_folding): """ - File format is: - ; ; ; # + File format is: + ; ; ; # """ for line in case_folding: - if line == '\n' or line.startswith('#'): + if line == "\n" or line.startswith("#"): continue - row = line.split('; ') - if row[1] in ['F', 'T']: + row = line.split("; ") + if row[1] in ["F", "T"]: continue - assert row[1] in ['C', 'S'],\ - "expect either (C)ommon or (S)imple case foldings" + assert row[1] in ["C", "S"], "expect either (C)ommon or (S)imple case foldings" code = int(row[0], 16) mapping = int(row[2], 16) yield (code, mapping) @@ -157,15 +159,15 @@ def read_case_folding(case_folding): def read_derived_core_properties(derived_core_properties): for line in derived_core_properties: - if line == '\n' or line.startswith('#'): + if line == "\n" or line.startswith("#"): continue - row = line.split('#')[0].split(';') + row = line.split("#")[0].split(";") char_range = row[0].strip() char_property = row[1].strip() - if '..' not in char_range: + if ".." not in char_range: yield (int(char_range, 16), char_property) else: - [start, end] = char_range.split('..') + [start, end] = char_range.split("..") for char in range(int(start, 16), int(end, 16) + 1): yield (char, char_property) @@ -174,19 +176,19 @@ def read_special_casing(special_casing): # Format: # ; ; ; <upper>; (<condition_list>;)? # <comment> for line in special_casing: - if line == '\n' or line.startswith('#'): + if line == "\n" or line.startswith("#"): continue - row = line.split('#')[0].split(';') + row = line.split("#")[0].split(";") code = int(row[0].strip(), 16) lower = row[1].strip() - lower = [int(c, 16) for c in lower.split(' ')] if lower else [] + lower = [int(c, 16) for c in lower.split(" ")] if lower else [] upper = row[3].strip() - upper = [int(c, 16) for c in upper.split(' ')] if upper else [] + upper = [int(c, 16) for c in upper.split(" ")] if upper else [] languages = [] contexts = [] condition = row[4].strip() if condition: - for cond in condition.split(' '): + for cond in condition.split(" "): if cond[0].islower(): languages.append(cond) else: @@ -225,17 +227,21 @@ def make_non_bmp_convert_macro(out_file, name, convert_map, codepoint_table): converted = convert_map[code] diff = converted - code - if (entry and code == entry['code'] + entry['length'] and - diff == entry['diff'] and lead == entry['lead']): - entry['length'] += 1 + if ( + entry + and code == entry["code"] + entry["length"] + and diff == entry["diff"] + and lead == entry["lead"] + ): + entry["length"] += 1 continue entry = { - 'code': code, - 'diff': diff, - 'length': 1, - 'lead': lead, - 'trail': trail, + "code": code, + "diff": diff, + "length": 1, + "lead": lead, + "trail": trail, } convert_list.append(entry) @@ -243,24 +249,30 @@ def make_non_bmp_convert_macro(out_file, name, convert_map, codepoint_table): lines = [] comment = [] for entry in convert_list: - from_code = entry['code'] - to_code = entry['code'] + entry['length'] - 1 - diff = entry['diff'] + from_code = entry["code"] + to_code = entry["code"] + entry["length"] - 1 + diff = entry["diff"] - lead = entry['lead'] - from_trail = entry['trail'] - to_trail = entry['trail'] + entry['length'] - 1 + lead = entry["lead"] + from_trail = entry["trail"] + to_trail = entry["trail"] + entry["length"] - 1 - lines.append(' MACRO(0x{:x}, 0x{:x}, 0x{:x}, 0x{:x}, 0x{:x}, {:d})'.format( - from_code, to_code, lead, from_trail, to_trail, diff)) - comment.append('// {} .. {}'.format(codepoint_table.full_name(from_code), - codepoint_table.full_name(to_code))) + lines.append( + " MACRO(0x{:x}, 0x{:x}, 0x{:x}, 0x{:x}, 0x{:x}, {:d})".format( + from_code, to_code, lead, from_trail, to_trail, diff + ) + ) + comment.append( + "// {} .. {}".format( + codepoint_table.full_name(from_code), codepoint_table.full_name(to_code) + ) + ) - out_file.write('\n'.join(comment)) - out_file.write('\n') - out_file.write('#define FOR_EACH_NON_BMP_{}(MACRO) \\\n'.format(name)) - out_file.write(' \\\n'.join(lines)) - out_file.write('\n') + out_file.write("\n".join(comment)) + out_file.write("\n") + out_file.write("#define FOR_EACH_NON_BMP_{}(MACRO) \\\n".format(name)) + out_file.write(" \\\n".join(lines)) + out_file.write("\n") def process_derived_core_properties(derived_core_properties): @@ -268,9 +280,9 @@ def process_derived_core_properties(derived_core_properties): id_continue = set() for (char, prop) in read_derived_core_properties(derived_core_properties): - if prop == 'ID_Start': + if prop == "ID_Start": id_start.add(char) - if prop == 'ID_Continue': + if prop == "ID_Continue": id_continue.add(char) return (id_start, id_continue) @@ -318,7 +330,7 @@ def process_unicode_data(unicode_data, derived_core_properties): non_bmp_lower_map[code] = lower if code != upper: non_bmp_upper_map[code] = upper - if category == 'Zs': + if category == "Zs": non_bmp_space_set[code] = 1 test_space_table.append(code) if code in id_start: @@ -332,7 +344,7 @@ def process_unicode_data(unicode_data, derived_core_properties): flags = 0 # we combine whitespace and lineterminators because in pratice we don't need them separated - if category == 'Zs' or code in whitespace or code in line_terminator: + if category == "Zs" or code in whitespace or code in line_terminator: flags |= FLAG_SPACE test_space_table.append(code) @@ -350,8 +362,8 @@ def process_unicode_data(unicode_data, derived_core_properties): assert up_d > -65535 and up_d < 65535 assert low_d > -65535 and low_d < 65535 - upper = up_d & 0xffff - lower = low_d & 0xffff + upper = up_d & 0xFFFF + lower = low_d & 0xFFFF item = (upper, lower, flags) @@ -363,11 +375,15 @@ def process_unicode_data(unicode_data, derived_core_properties): index[code] = i return ( - table, index, - non_bmp_lower_map, non_bmp_upper_map, + table, + index, + non_bmp_lower_map, + non_bmp_upper_map, non_bmp_space_set, - non_bmp_id_start_set, non_bmp_id_cont_set, - codepoint_table, test_space_table, + non_bmp_id_start_set, + non_bmp_id_cont_set, + codepoint_table, + test_space_table, ) @@ -419,7 +435,7 @@ def process_case_folding(case_folding): assert folding_d > -65535 and folding_d < 65535 - folding = folding_d & 0xffff + folding = folding_d & 0xFFFF item = (folding,) @@ -429,10 +445,7 @@ def process_case_folding(case_folding): folding_cache[item] = i = len(folding_table) folding_table.append(item) folding_index[code] = i - return ( - folding_table, folding_index, - folding_tests - ) + return (folding_table, folding_index, folding_tests) def process_special_casing(special_casing, table, index): @@ -450,12 +463,16 @@ def process_special_casing(special_casing, table, index): def caseInfo(code): (upper, lower, flags) = table[index[code]] - return ((code + lower) & 0xffff, (code + upper) & 0xffff) + return ((code + lower) & 0xFFFF, (code + upper) & 0xFFFF) - for (code, lower, upper, languages, contexts) in read_special_casing(special_casing): - assert code <= MAX_BMP, 'Unexpected character outside of BMP: %s' % code - assert len(languages) <= 1, 'Expected zero or one language ids: %s' % languages - assert len(contexts) <= 1, 'Expected zero or one casing contexts: %s' % languages + for (code, lower, upper, languages, contexts) in read_special_casing( + special_casing + ): + assert code <= MAX_BMP, "Unexpected character outside of BMP: %s" % code + assert len(languages) <= 1, "Expected zero or one language ids: %s" % languages + assert len(contexts) <= 1, ( + "Expected zero or one casing contexts: %s" % languages + ) (default_lower, default_upper) = caseInfo(code) special_lower = len(lower) != 1 or lower[0] != default_lower @@ -499,10 +516,10 @@ def process_special_casing(special_casing, table, index): return upper def ascii(char_dict): - return (ch for ch in char_dict.keys() if ch <= 0x7f) + return (ch for ch in char_dict.keys() if ch <= 0x7F) def latin1(char_dict): - return (ch for ch in char_dict.keys() if ch <= 0xff) + return (ch for ch in char_dict.keys() if ch <= 0xFF) def is_empty(iterable): return not any(True for _ in iterable) @@ -543,51 +560,93 @@ def process_special_casing(special_casing, table, index): assert is_equals(["az", "lt", "tr"], sorted(lang_conditional_toupper.keys())) # Maximum case mapping length is three characters. - assert max(map(len, chain( - unconditional_tolower.values(), - unconditional_toupper.values(), - map(itemgetter(0), conditional_tolower.values()), - map(itemgetter(0), conditional_toupper.values()), - map(itemgetter(0), chain.from_iterable(d.values() - for d in lang_conditional_tolower.values())), - map(itemgetter(0), chain.from_iterable(d.values() - for d in lang_conditional_toupper.values())), - ))) <= 3 + assert ( + max( + map( + len, + chain( + unconditional_tolower.values(), + unconditional_toupper.values(), + map(itemgetter(0), conditional_tolower.values()), + map(itemgetter(0), conditional_toupper.values()), + map( + itemgetter(0), + chain.from_iterable( + d.values() for d in lang_conditional_tolower.values() + ), + ), + map( + itemgetter(0), + chain.from_iterable( + d.values() for d in lang_conditional_toupper.values() + ), + ), + ), + ) + ) + <= 3 + ) # Ensure all case mapping contexts are known (see Unicode 9.0, §3.13 Default Case Algorithms). - assert set([ - 'After_I', 'After_Soft_Dotted', 'Final_Sigma', 'More_Above', 'Not_Before_Dot', - ]).issuperset(set(filter(partial(is_not, None), chain( - map(itemgetter(1), conditional_tolower.values()), - map(itemgetter(1), conditional_toupper.values()), - map(itemgetter(1), chain.from_iterable(d.values() - for d in lang_conditional_tolower.values())), - map(itemgetter(1), chain.from_iterable(d.values() - for d in lang_conditional_toupper.values())), - )))) + assert set( + [ + "After_I", + "After_Soft_Dotted", + "Final_Sigma", + "More_Above", + "Not_Before_Dot", + ] + ).issuperset( + set( + filter( + partial(is_not, None), + chain( + map(itemgetter(1), conditional_tolower.values()), + map(itemgetter(1), conditional_toupper.values()), + map( + itemgetter(1), + chain.from_iterable( + d.values() for d in lang_conditional_tolower.values() + ), + ), + map( + itemgetter(1), + chain.from_iterable( + d.values() for d in lang_conditional_toupper.values() + ), + ), + ), + ) + ) + ) # Special casing for U+00DF (LATIN SMALL LETTER SHARP S). - assert upperCase(0x00DF) == 0x00DF and unconditional_toupper[0x00DF] == [0x0053, 0x0053] + assert upperCase(0x00DF) == 0x00DF and unconditional_toupper[0x00DF] == [ + 0x0053, + 0x0053, + ] # Special casing for U+0130 (LATIN CAPITAL LETTER I WITH DOT ABOVE). assert unconditional_tolower[0x0130] == [0x0069, 0x0307] # Special casing for U+03A3 (GREEK CAPITAL LETTER SIGMA). - assert lowerCase(0x03A3) == 0x03C3 and conditional_tolower[0x03A3] == ([0x03C2], 'Final_Sigma') + assert lowerCase(0x03A3) == 0x03C3 and conditional_tolower[0x03A3] == ( + [0x03C2], + "Final_Sigma", + ) return (unconditional_tolower, unconditional_toupper) -def make_non_bmp_file(version, - non_bmp_lower_map, non_bmp_upper_map, - codepoint_table): - file_name = 'UnicodeNonBMP.h' - with io.open(file_name, mode='w', encoding='utf-8') as non_bmp_file: +def make_non_bmp_file(version, non_bmp_lower_map, non_bmp_upper_map, codepoint_table): + file_name = "UnicodeNonBMP.h" + with io.open(file_name, mode="w", encoding="utf-8") as non_bmp_file: non_bmp_file.write(mpl_license) - non_bmp_file.write('\n') + non_bmp_file.write("\n") non_bmp_file.write(warning_message) non_bmp_file.write(unicode_version_message.format(version)) - non_bmp_file.write(""" + non_bmp_file.write( + """ #ifndef util_UnicodeNonBMP_h #define util_UnicodeNonBMP_h @@ -601,55 +660,68 @@ def make_non_bmp_file(version, // DIFF: the difference between the code point in the range and // converted code point -""") +""" + ) - make_non_bmp_convert_macro(non_bmp_file, 'LOWERCASE', non_bmp_lower_map, codepoint_table) - non_bmp_file.write('\n') - make_non_bmp_convert_macro(non_bmp_file, 'UPPERCASE', non_bmp_upper_map, codepoint_table) + make_non_bmp_convert_macro( + non_bmp_file, "LOWERCASE", non_bmp_lower_map, codepoint_table + ) + non_bmp_file.write("\n") + make_non_bmp_convert_macro( + non_bmp_file, "UPPERCASE", non_bmp_upper_map, codepoint_table + ) - non_bmp_file.write(""" + non_bmp_file.write( + """ #endif /* util_UnicodeNonBMP_h */ -""") +""" + ) def write_special_casing_methods(unconditional_toupper, codepoint_table, println): def hexlit(n): """ Returns C++ hex-literal for |n|. """ - return '0x{:04X}'.format(n) + return "0x{:04X}".format(n) def describe_range(ranges, depth): - indent = depth * ' ' + indent = depth * " " for (start, end) in ranges: if start == end: - println(indent, '// {}'.format(codepoint_table.full_name(start))) + println(indent, "// {}".format(codepoint_table.full_name(start))) else: - println(indent, '// {} .. {}'.format(codepoint_table.full_name(start), - codepoint_table.full_name(end))) + println( + indent, + "// {} .. {}".format( + codepoint_table.full_name(start), codepoint_table.full_name(end) + ), + ) def out_range(start, end): """ Tests if the input character isn't a member of the set {x | start <= x <= end}. """ - if (start == end): - return 'ch != {}'.format(hexlit(start)) - return 'ch < {} || ch > {}'.format(hexlit(start), hexlit(end)) + if start == end: + return "ch != {}".format(hexlit(start)) + return "ch < {} || ch > {}".format(hexlit(start), hexlit(end)) def in_range(start, end, parenthesize=False): """ Tests if the input character is in the set {x | start <= x <= end}. """ - if (start == end): - return 'ch == {}'.format(hexlit(start)) - (left, right) = ('(', ')') if parenthesize else ('', '') - return '{}ch >= {} && ch <= {}{}'.format(left, hexlit(start), hexlit(end), right) + if start == end: + return "ch == {}".format(hexlit(start)) + (left, right) = ("(", ")") if parenthesize else ("", "") + return "{}ch >= {} && ch <= {}{}".format( + left, hexlit(start), hexlit(end), right + ) def in_any_range(ranges, spaces): """ Tests if the input character is included in any of the given ranges. """ lines = [[]] for (start, end) in ranges: expr = in_range(start, end, parenthesize=True) - line = ' || '.join(lines[-1] + [expr]) - if len(line) < (100 - len(spaces) - len(' ||')): + line = " || ".join(lines[-1] + [expr]) + if len(line) < (100 - len(spaces) - len(" ||")): lines[-1].append(expr) else: lines.append([expr]) - return ' ||\n{}'.format(spaces).join(' || '.join(t) for t in lines) + return " ||\n{}".format(spaces).join(" || ".join(t) for t in lines) def write_range_accept(parent_list, child_list, depth): """ Accepts the input character if it matches any code unit in |child_list|. """ @@ -657,7 +729,7 @@ def write_special_casing_methods(unconditional_toupper, codepoint_table, println (min_child, max_child) = (child_list[0], child_list[-1]) assert min_child >= min_parent assert max_child <= max_parent - indent = depth * ' ' + indent = depth * " " child_ranges = list(int_ranges(child_list)) has_successor = max_child != max_parent @@ -667,41 +739,41 @@ def write_special_casing_methods(unconditional_toupper, codepoint_table, println if len(child_ranges) == 1: describe_range(child_ranges, depth) if has_successor: - println(indent, 'if (ch <= {}) {{'.format(hexlit(max_child))) - println(indent, ' return ch >= {};'.format(hexlit(min_child))) - println(indent, '}') + println(indent, "if (ch <= {}) {{".format(hexlit(max_child))) + println(indent, " return ch >= {};".format(hexlit(min_child))) + println(indent, "}") else: - println(indent, 'return {};'.format(in_range(min_child, max_child))) + println(indent, "return {};".format(in_range(min_child, max_child))) return # Otherwise create a disjunction over the subranges in |child_ranges|. if not has_successor: - spaces = indent + len('return ') * ' ' + spaces = indent + len("return ") * " " else: - spaces = indent + len(' return ') * ' ' + spaces = indent + len(" return ") * " " range_test_expr = in_any_range(child_ranges, spaces) if min_child != min_parent: - println(indent, 'if (ch < {}) {{'.format(hexlit(min_child))) - println(indent, ' return false;') - println(indent, '}') + println(indent, "if (ch < {}) {{".format(hexlit(min_child))) + println(indent, " return false;") + println(indent, "}") # If there's no successor block, we can omit the |input <= max_child| check, # because it was already checked when we emitted the parent range test. if not has_successor: describe_range(child_ranges, depth) - println(indent, 'return {};'.format(range_test_expr)) + println(indent, "return {};".format(range_test_expr)) else: - println(indent, 'if (ch <= {}) {{'.format(hexlit(max_child))) + println(indent, "if (ch <= {}) {{".format(hexlit(max_child))) describe_range(child_ranges, depth + 1) - println(indent, ' return {};'.format(range_test_expr)) - println(indent, '}') + println(indent, " return {};".format(range_test_expr)) + println(indent, "}") def write_ChangesWhenUpperCasedSpecialCasing(): """ Checks if the input has a special upper case mapping. """ - println('bool') - println('js::unicode::ChangesWhenUpperCasedSpecialCasing(char16_t ch)') - println('{') + println("bool") + println("js::unicode::ChangesWhenUpperCasedSpecialCasing(char16_t ch)") + println("{") assert unconditional_toupper, "|unconditional_toupper| is not empty" @@ -709,9 +781,9 @@ def write_special_casing_methods(unconditional_toupper, codepoint_table, println code_list = sorted(unconditional_toupper.keys()) # Fail-fast if the input character isn't a special casing character. - println(' if ({}) {{'.format(out_range(code_list[0], code_list[-1]))) - println(' return false;') - println(' }') + println(" if ({}) {{".format(out_range(code_list[0], code_list[-1]))) + println(" return false;") + println(" }") for i in range(0, 16): # Check if the input characters is in the range: @@ -736,11 +808,11 @@ def write_special_casing_methods(unconditional_toupper, codepoint_table, println # largest value in the current range. is_last_block = matches[-1] == code_list[-1] if not is_last_block: - println(' if (ch <= {}) {{'.format(hexlit(matches[-1]))) + println(" if (ch <= {}) {{".format(hexlit(matches[-1]))) else: - println(' if (ch < {}) {{'.format(hexlit(matches[0]))) - println(' return false;') - println(' }') + println(" if (ch < {}) {{".format(hexlit(matches[0]))) + println(" return false;") + println(" }") for j in range(0, 16): inner_start = start_point + (j << 8) @@ -752,57 +824,70 @@ def write_special_casing_methods(unconditional_toupper, codepoint_table, println write_range_accept(matches, inner_matches, depth=d) if not is_last_block: - println(' }') + println(" }") - println('}') + println("}") def write_LengthUpperCaseSpecialCasing(): """ Slow case: Special casing character was found, returns its mapping length. """ - println('size_t') - println('js::unicode::LengthUpperCaseSpecialCasing(char16_t ch)') - println('{') + println("size_t") + println("js::unicode::LengthUpperCaseSpecialCasing(char16_t ch)") + println("{") - println(' switch(ch) {') - for (code, converted) in sorted(unconditional_toupper.items(), key=itemgetter(0)): - println(' case {}: return {}; // {}'.format(hexlit(code), len(converted), - codepoint_table.name(code))) - println(' }') - println('') + println(" switch(ch) {") + for (code, converted) in sorted( + unconditional_toupper.items(), key=itemgetter(0) + ): + println( + " case {}: return {}; // {}".format( + hexlit(code), len(converted), codepoint_table.name(code) + ) + ) + println(" }") + println("") println(' MOZ_ASSERT_UNREACHABLE("Bad character input.");') - println(' return 0;') + println(" return 0;") - println('}') + println("}") def write_AppendUpperCaseSpecialCasing(): """ Slow case: Special casing character was found, append its mapping characters. """ - println('void') - println('js::unicode::AppendUpperCaseSpecialCasing(char16_t ch, char16_t* elements, size_t* index)') # NOQA: E501 - println('{') + println("void") + println( + "js::unicode::AppendUpperCaseSpecialCasing(char16_t ch, char16_t* elements, size_t* index)" # NOQA: E501 + ) + println("{") - println(' switch(ch) {') - for (code, converted) in sorted(unconditional_toupper.items(), key=itemgetter(0)): - println(' case {}: // {}'.format(hexlit(code), codepoint_table.name(code))) + println(" switch(ch) {") + for (code, converted) in sorted( + unconditional_toupper.items(), key=itemgetter(0) + ): + println( + " case {}: // {}".format(hexlit(code), codepoint_table.name(code)) + ) for ch in converted: - println(' elements[(*index)++] = {}; // {}' - .format(hexlit(ch), - codepoint_table.name(ch))) - println(' return;') - println(' }') - println('') + println( + " elements[(*index)++] = {}; // {}".format( + hexlit(ch), codepoint_table.name(ch) + ) + ) + println(" return;") + println(" }") + println("") println(' MOZ_ASSERT_UNREACHABLE("Bad character input.");') - println('}') + println("}") write_ChangesWhenUpperCasedSpecialCasing() - println('') + println("") write_LengthUpperCaseSpecialCasing() - println('') + println("") write_AppendUpperCaseSpecialCasing() def write_ascii_lookup_tables(table, index, write, println): def is_id_compat(code): - return code == ord(u'\N{DOLLAR SIGN}') or code == ord(u'\N{LOW LINE}') + return code == ord("\N{DOLLAR SIGN}") or code == ord("\N{LOW LINE}") def is_id_start(code): (upper, lower, flags) = table[index[code]] @@ -817,32 +902,35 @@ def write_ascii_lookup_tables(table, index, write, println): return flags & FLAG_SPACE def write_entries(name, predicate): - println('const bool unicode::{}[] = {{'.format(name)) + println("const bool unicode::{}[] = {{".format(name)) header = "".join("{0: <6}".format(x) for x in range(0, 10)).rstrip() - println('/* {} */'.format(header)) + println("/* {} */".format(header)) for i in range(0, 13): - write('/* {0: >2} */'.format(i)) + write("/* {0: >2} */".format(i)) for j in range(0, 10): code = i * 10 + j - if (code <= 0x7f): - write(' {},'.format('true' if predicate(code) else '____')) - println('') - println('};') + if code <= 0x7F: + write(" {},".format("true" if predicate(code) else "____")) + println("") + println("};") - println('') - println('#define ____ false') + println("") + println("#define ____ false") - println(""" + println( + """ /* * Identifier start chars: * - 36: $ * - 65..90: A..Z * - 95: _ * - 97..122: a..z - */""") - write_entries('js_isidstart', is_id_start) + */""" + ) + write_entries("js_isidstart", is_id_start) - println(""" + println( + """ /* * Identifier chars: * - 36: $ @@ -850,72 +938,90 @@ def write_ascii_lookup_tables(table, index, write, println): * - 65..90: A..Z * - 95: _ * - 97..122: a..z - */""") - write_entries('js_isident', is_id_continue) + */""" + ) + write_entries("js_isident", is_id_continue) - println(""" -/* Whitespace chars: '\\t', '\\n', '\\v', '\\f', '\\r', ' '. */""") - write_entries('js_isspace', is_space) + println( + """ +/* Whitespace chars: '\\t', '\\n', '\\v', '\\f', '\\r', ' '. */""" + ) + write_entries("js_isspace", is_space) - println('') - println('#undef ____') + println("") + println("#undef ____") def write_latin1_lookup_tables(table, index, write, println): def case_info(code): assert 0 <= code and code <= MAX_BMP (upper, lower, flags) = table[index[code]] - return ((code + upper) & 0xffff, (code + lower) & 0xffff, flags) + return ((code + upper) & 0xFFFF, (code + lower) & 0xFFFF, flags) def toLowerCase(code): (_, lower, _) = case_info(code) - assert lower <= 0xff, "lower-case of Latin-1 is always Latin-1" + assert lower <= 0xFF, "lower-case of Latin-1 is always Latin-1" return lower def write_entries(name, mapper): - println('const JS::Latin1Char unicode::{}[] = {{'.format(name)) + println("const JS::Latin1Char unicode::{}[] = {{".format(name)) header = "".join("{0: <6}".format(x) for x in range(0, 16)).rstrip() - println('/* {} */'.format(header)) + println("/* {} */".format(header)) for i in range(0, 16): - write('/* {0: >2} */'.format(i)) + write("/* {0: >2} */".format(i)) for j in range(0, 16): code = i * 16 + j - if (code <= 0xff): - write(' 0x{:02X},'.format(mapper(code))) - println('') - println('};') + if code <= 0xFF: + write(" 0x{:02X},".format(mapper(code))) + println("") + println("};") - println('') - write_entries('latin1ToLowerCaseTable', toLowerCase) + println("") + write_entries("latin1ToLowerCaseTable", toLowerCase) -def make_bmp_mapping_test(version, codepoint_table, unconditional_tolower, unconditional_toupper): +def make_bmp_mapping_test( + version, codepoint_table, unconditional_tolower, unconditional_toupper +): def unicodeEsc(n): - return '\\u{:04X}'.format(n) + return "\\u{:04X}".format(n) - file_name = '../tests/non262/String/string-upper-lower-mapping.js' - with io.open(file_name, mode='w', encoding='utf-8') as output: - write = partial(print, file=output, sep='', end='') - println = partial(print, file=output, sep='', end='\n') + file_name = "../tests/non262/String/string-upper-lower-mapping.js" + with io.open(file_name, mode="w", encoding="utf-8") as output: + write = partial(print, file=output, sep="", end="") + println = partial(print, file=output, sep="", end="\n") write(warning_message) write(unicode_version_message.format(version)) write(public_domain) - println('var mapping = [') + println("var mapping = [") for code in range(0, MAX_BMP + 1): entry = codepoint_table.get(code) if entry: (upper, lower, _, _) = entry - upper = unconditional_toupper[code] if code in unconditional_toupper else [upper] - lower = unconditional_tolower[code] if code in unconditional_tolower else [lower] - println(' ["{}", "{}"], /* {} */'.format("".join(map(unicodeEsc, upper)), - "".join(map(unicodeEsc, lower)), - codepoint_table.name(code))) + upper = ( + unconditional_toupper[code] + if code in unconditional_toupper + else [upper] + ) + lower = ( + unconditional_tolower[code] + if code in unconditional_tolower + else [lower] + ) + println( + ' ["{}", "{}"], /* {} */'.format( + "".join(map(unicodeEsc, upper)), + "".join(map(unicodeEsc, lower)), + codepoint_table.name(code), + ) + ) else: println(' ["{0}", "{0}"],'.format(unicodeEsc(code))) - println('];') - write(""" + println("];") + write( + """ assertEq(mapping.length, 0x10000); for (var i = 0; i <= 0xffff; i++) { var char = String.fromCharCode(i); @@ -927,47 +1033,65 @@ for (var i = 0; i <= 0xffff; i++) { if (typeof reportCompare === "function") reportCompare(true, true); -""") +""" + ) -def make_non_bmp_mapping_test(version, non_bmp_upper_map, non_bmp_lower_map, codepoint_table): - file_name = '../tests/non262/String/string-code-point-upper-lower-mapping.js' - with io.open(file_name, mode='w', encoding='utf-8') as test_non_bmp_mapping: +def make_non_bmp_mapping_test( + version, non_bmp_upper_map, non_bmp_lower_map, codepoint_table +): + file_name = "../tests/non262/String/string-code-point-upper-lower-mapping.js" + with io.open(file_name, mode="w", encoding="utf-8") as test_non_bmp_mapping: test_non_bmp_mapping.write(warning_message) test_non_bmp_mapping.write(unicode_version_message.format(version)) test_non_bmp_mapping.write(public_domain) for code in sorted(non_bmp_upper_map.keys()): - test_non_bmp_mapping.write("""\ + test_non_bmp_mapping.write( + """\ assertEq(String.fromCodePoint(0x{:04X}).toUpperCase().codePointAt(0), 0x{:04X}); // {}, {} -""".format(code, non_bmp_upper_map[code], - codepoint_table.name(code), codepoint_table.name(non_bmp_upper_map[code]))) +""".format( + code, + non_bmp_upper_map[code], + codepoint_table.name(code), + codepoint_table.name(non_bmp_upper_map[code]), + ) + ) for code in sorted(non_bmp_lower_map.keys()): - test_non_bmp_mapping.write("""\ + test_non_bmp_mapping.write( + """\ assertEq(String.fromCodePoint(0x{:04X}).toLowerCase().codePointAt(0), 0x{:04X}); // {}, {} -""".format(code, non_bmp_lower_map[code], - codepoint_table.name(code), codepoint_table.name(non_bmp_lower_map[code]))) +""".format( + code, + non_bmp_lower_map[code], + codepoint_table.name(code), + codepoint_table.name(non_bmp_lower_map[code]), + ) + ) - test_non_bmp_mapping.write(""" + test_non_bmp_mapping.write( + """ if (typeof reportCompare === "function") reportCompare(true, true); -""") +""" + ) def make_space_test(version, test_space_table, codepoint_table): def hex_and_name(c): - return ' 0x{:04X} /* {} */'.format(c, codepoint_table.name(c)) + return " 0x{:04X} /* {} */".format(c, codepoint_table.name(c)) - file_name = '../tests/non262/String/string-space-trim.js' - with io.open(file_name, mode='w', encoding='utf-8') as test_space: + file_name = "../tests/non262/String/string-space-trim.js" + with io.open(file_name, mode="w", encoding="utf-8") as test_space: test_space.write(warning_message) test_space.write(unicode_version_message.format(version)) test_space.write(public_domain) - test_space.write('var onlySpace = String.fromCharCode(\n') - test_space.write(',\n'.join(map(hex_and_name, test_space_table))) - test_space.write('\n);\n') - test_space.write(""" + test_space.write("var onlySpace = String.fromCharCode(\n") + test_space.write(",\n".join(map(hex_and_name, test_space_table))) + test_space.write("\n);\n") + test_space.write( + """ assertEq(onlySpace.trim(), ""); assertEq((onlySpace + 'aaaa').trim(), 'aaaa'); assertEq(('aaaa' + onlySpace).trim(), 'aaaa'); @@ -975,22 +1099,24 @@ assertEq((onlySpace + 'aaaa' + onlySpace).trim(), 'aaaa'); if (typeof reportCompare === "function") reportCompare(true, true); -""") +""" + ) def make_regexp_space_test(version, test_space_table, codepoint_table): def hex_and_name(c): - return ' 0x{:04X} /* {} */'.format(c, codepoint_table.name(c)) + return " 0x{:04X} /* {} */".format(c, codepoint_table.name(c)) - file_name = '../tests/non262/RegExp/character-class-escape-s.js' - with io.open(file_name, mode='w', encoding='utf-8') as test_space: + file_name = "../tests/non262/RegExp/character-class-escape-s.js" + with io.open(file_name, mode="w", encoding="utf-8") as test_space: test_space.write(warning_message) test_space.write(unicode_version_message.format(version)) test_space.write(public_domain) - test_space.write('var onlySpace = String.fromCodePoint(\n') - test_space.write(',\n'.join(map(hex_and_name, test_space_table))) - test_space.write('\n);\n') - test_space.write(""" + test_space.write("var onlySpace = String.fromCodePoint(\n") + test_space.write(",\n".join(map(hex_and_name, test_space_table))) + test_space.write("\n);\n") + test_space.write( + """ assertEq(/^\s+$/.exec(onlySpace) !== null, true); assertEq(/^[\s]+$/.exec(onlySpace) !== null, true); assertEq(/^[^\s]+$/.exec(onlySpace) === null, true); @@ -1010,19 +1136,21 @@ assertEq(/^[^\S]+$/u.exec(onlySpace) !== null, true); if (typeof reportCompare === "function") reportCompare(true, true); -""") +""" + ) def make_icase_test(version, folding_tests, codepoint_table): def char_hex(c): - return '0x{:04X}'.format(c) + return "0x{:04X}".format(c) - file_name = '../tests/non262/RegExp/unicode-ignoreCase.js' - with io.open(file_name, mode='w', encoding='utf-8') as test_icase: + file_name = "../tests/non262/RegExp/unicode-ignoreCase.js" + with io.open(file_name, mode="w", encoding="utf-8") as test_icase: test_icase.write(warning_message) test_icase.write(unicode_version_message.format(version)) test_icase.write(public_domain) - test_icase.write(""" + test_icase.write( + """ var BUGNUMBER = 1135377; var summary = "Implement RegExp unicode flag -- ignoreCase flag."; @@ -1035,24 +1163,35 @@ function test(code, ...equivs) { codeRe = new RegExp("[" + String.fromCodePoint(code) + "]+", "iu"); assertEqArray(codeRe.exec("<" + ans + ">"), [ans]); } -""") +""" + ) for args in folding_tests: - test_icase.write('test({}); // {}\n'.format(', '.join(map(char_hex, args)), - ', '.join(map(codepoint_table.name, - args)))) - test_icase.write(""" + test_icase.write( + "test({}); // {}\n".format( + ", ".join(map(char_hex, args)), + ", ".join(map(codepoint_table.name, args)), + ) + ) + test_icase.write( + """ if (typeof reportCompare === "function") reportCompare(true, true); -""") +""" + ) -def make_unicode_file(version, - table, index, - folding_table, folding_index, - non_bmp_space_set, - non_bmp_id_start_set, non_bmp_id_cont_set, - unconditional_toupper, - codepoint_table): +def make_unicode_file( + version, + table, + index, + folding_table, + folding_index, + non_bmp_space_set, + non_bmp_id_start_set, + non_bmp_id_cont_set, + unconditional_toupper, + codepoint_table, +): index1, index2, shift = splitbins(index) # Don't forget to update CharInfo in Unicode.h if you need to change this @@ -1077,7 +1216,9 @@ def make_unicode_file(version, test = folding_table[folding_index[char]] idx = folding_index1[char >> folding_shift] - idx = folding_index2[(idx << folding_shift) + (char & ((1 << folding_shift) - 1))] + idx = folding_index2[ + (idx << folding_shift) + (char & ((1 << folding_shift) - 1)) + ] assert test == folding_table[idx] @@ -1129,9 +1270,9 @@ def make_unicode_file(version, """ def dump(data, name, println): - println('const uint8_t unicode::{}[] = {{'.format(name)) + println("const uint8_t unicode::{}[] = {{".format(name)) - line = pad = ' ' * 4 + line = pad = " " * 4 lines = [] for entry in data: assert entry < 256 @@ -1140,67 +1281,80 @@ def make_unicode_file(version, if len(line + s) + 5 > 99: lines.append(line.rstrip()) - line = pad + s + ', ' + line = pad + s + ", " else: - line = line + s + ', ' + line = line + s + ", " lines.append(line.rstrip()) - println('\n'.join(lines)) - println('};') + println("\n".join(lines)) + println("};") def write_table(data_type, name, tbl, idx1_name, idx1, idx2_name, idx2, println): - println('const {} unicode::{}[] = {{'.format(data_type, name)) + println("const {} unicode::{}[] = {{".format(data_type, name)) for d in tbl: - println(' {{ {} }},'.format(', '.join(str(e) for e in d))) - println('};') - println('') + println(" {{ {} }},".format(", ".join(str(e) for e in d))) + println("};") + println("") dump(idx1, idx1_name, println) - println('') + println("") dump(idx2, idx2_name, println) - println('') + println("") def write_supplemental_identifier_method(name, group_set, println): - println('bool') - println('js::unicode::{}(uint32_t codePoint)'.format(name)) - println('{') + println("bool") + println("js::unicode::{}(uint32_t codePoint)".format(name)) + println("{") for (from_code, to_code) in int_ranges(group_set.keys()): - println(' if (codePoint >= 0x{:X} && codePoint <= 0x{:X}) {{ // {} .. {}' - .format(from_code, - to_code, - codepoint_table.name(from_code), - codepoint_table.name(to_code))) - println(' return true;') - println(' }') - println(' return false;') - println('}') - println('') + println( + " if (codePoint >= 0x{:X} && codePoint <= 0x{:X}) {{ // {} .. {}".format( + from_code, + to_code, + codepoint_table.name(from_code), + codepoint_table.name(to_code), + ) + ) + println(" return true;") + println(" }") + println(" return false;") + println("}") + println("") - file_name = 'Unicode.cpp' - with io.open(file_name, 'w', encoding='utf-8') as data_file: - write = partial(print, file=data_file, sep='', end='') - println = partial(print, file=data_file, sep='', end='\n') + file_name = "Unicode.cpp" + with io.open(file_name, "w", encoding="utf-8") as data_file: + write = partial(print, file=data_file, sep="", end="") + println = partial(print, file=data_file, sep="", end="\n") write(warning_message) write(unicode_version_message.format(version)) write(public_domain) println('#include "util/Unicode.h"') - println('') - println('using namespace js;') - println('using namespace js::unicode;') + println("") + println("using namespace js;") + println("using namespace js::unicode;") write(comment) - write_table('CharacterInfo', - 'js_charinfo', table, - 'index1', index1, - 'index2', index2, - println) + write_table( + "CharacterInfo", + "js_charinfo", + table, + "index1", + index1, + "index2", + index2, + println, + ) - write_table('FoldingInfo', - 'js_foldinfo', folding_table, - 'folding_index1', folding_index1, - 'folding_index2', folding_index2, - println) + write_table( + "FoldingInfo", + "js_foldinfo", + folding_table, + "folding_index1", + folding_index1, + "folding_index2", + folding_index2, + println, + ) # If the following assert fails, it means space character is added to # non-BMP area. In that case the following code should be uncommented @@ -1208,11 +1362,13 @@ def make_unicode_file(version, # unicode::IsSpace will require updating to handle this.) assert len(non_bmp_space_set.keys()) == 0 - write_supplemental_identifier_method('IsIdentifierStartNonBMP', non_bmp_id_start_set, - println) + write_supplemental_identifier_method( + "IsIdentifierStartNonBMP", non_bmp_id_start_set, println + ) - write_supplemental_identifier_method('IsIdentifierPartNonBMP', non_bmp_id_cont_set, - println) + write_supplemental_identifier_method( + "IsIdentifierPartNonBMP", non_bmp_id_cont_set, println + ) write_special_casing_methods(unconditional_toupper, codepoint_table, println) @@ -1224,7 +1380,7 @@ def make_unicode_file(version, def getsize(data): """ return smallest possible integer size for the given array """ maxdata = max(data) - assert maxdata < 2**32 + assert maxdata < 2 ** 32 if maxdata < 256: return 1 @@ -1246,27 +1402,29 @@ def splitbins(t): """ def dump(t1, t2, shift, bytes): - print("%d+%d bins at shift %d; %d bytes" % ( - len(t1), len(t2), shift, bytes), file=sys.stderr) - print("Size of original table:", len(t)*getsize(t), - "bytes", file=sys.stderr) - n = len(t)-1 # last valid index - maxshift = 0 # the most we can shift n and still have something left + print( + "%d+%d bins at shift %d; %d bytes" % (len(t1), len(t2), shift, bytes), + file=sys.stderr, + ) + print("Size of original table:", len(t) * getsize(t), "bytes", file=sys.stderr) + + n = len(t) - 1 # last valid index + maxshift = 0 # the most we can shift n and still have something left if n > 0: while n >> 1: n >>= 1 maxshift += 1 del n bytes = sys.maxsize # smallest total size so far - t = tuple(t) # so slices can be dict keys + t = tuple(t) # so slices can be dict keys for shift in range(maxshift + 1): t1 = [] t2 = [] - size = 2**shift + size = 2 ** shift bincache = {} for i in range(0, len(t), size): - bin = t[i:i + size] + bin = t[i : i + size] index = bincache.get(bin) if index is None: @@ -1282,11 +1440,11 @@ def splitbins(t): bytes = b t1, t2, shift = best - print("Best:", end=' ', file=sys.stderr) + print("Best:", end=" ", file=sys.stderr) dump(t1, t2, shift, bytes) # exhaustively verify that the decomposition is correct - mask = 2**shift - 1 + mask = 2 ** shift - 1 for i in range(len(t)): assert t[i] == t2[(t1[i >> shift] << shift) + (i & mask)] return best @@ -1297,99 +1455,116 @@ def update_unicode(args): version = args.version if version is not None: - baseurl = 'https://unicode.org/Public' - if version == 'UNIDATA': - url = '%s/%s' % (baseurl, version) + baseurl = "https://unicode.org/Public" + if version == "UNIDATA": + url = "%s/%s" % (baseurl, version) else: - url = '%s/%s/ucd' % (baseurl, version) + url = "%s/%s/ucd" % (baseurl, version) - print('Arguments:') + print("Arguments:") if version is not None: - print('\tVersion: %s' % version) - print('\tDownload url: %s' % url) + print("\tVersion: %s" % version) + print("\tDownload url: %s" % url) - request_url = '{}/UCD.zip'.format(url) + request_url = "{}/UCD.zip".format(url) with closing(urlopen(request_url)) as downloaded_file: downloaded_data = io.BytesIO(downloaded_file.read()) with ZipFile(downloaded_data) as zip_file: - for fname in ['UnicodeData.txt', - 'CaseFolding.txt', - 'DerivedCoreProperties.txt', - 'SpecialCasing.txt']: + for fname in [ + "UnicodeData.txt", + "CaseFolding.txt", + "DerivedCoreProperties.txt", + "SpecialCasing.txt", + ]: zip_file.extract(fname, path=base_path) else: - print('\tUsing local files.') - print('\tAlways make sure you have the newest Unicode files!') - print('') + print("\tUsing local files.") + print("\tAlways make sure you have the newest Unicode files!") + print("") def version_from_file(f, fname): pat_version = re.compile(r"# %s-(?P<version>\d+\.\d+\.\d+).txt" % fname) return pat_version.match(f.readline()).group("version") - with io.open(os.path.join(base_path, 'UnicodeData.txt'), - 'r', encoding='utf-8') as unicode_data, \ - io.open(os.path.join(base_path, 'CaseFolding.txt'), - 'r', encoding='utf-8') as case_folding, \ - io.open(os.path.join(base_path, 'DerivedCoreProperties.txt'), - 'r', encoding='utf-8') as derived_core_properties, \ - io.open(os.path.join(base_path, 'SpecialCasing.txt'), - 'r', encoding='utf-8') as special_casing: - unicode_version = version_from_file(derived_core_properties, 'DerivedCoreProperties') + with io.open( + os.path.join(base_path, "UnicodeData.txt"), "r", encoding="utf-8" + ) as unicode_data, io.open( + os.path.join(base_path, "CaseFolding.txt"), "r", encoding="utf-8" + ) as case_folding, io.open( + os.path.join(base_path, "DerivedCoreProperties.txt"), "r", encoding="utf-8" + ) as derived_core_properties, io.open( + os.path.join(base_path, "SpecialCasing.txt"), "r", encoding="utf-8" + ) as special_casing: + unicode_version = version_from_file( + derived_core_properties, "DerivedCoreProperties" + ) - print('Processing...') + print("Processing...") ( - table, index, - non_bmp_lower_map, non_bmp_upper_map, + table, + index, + non_bmp_lower_map, + non_bmp_upper_map, non_bmp_space_set, - non_bmp_id_start_set, non_bmp_id_cont_set, - codepoint_table, test_space_table + non_bmp_id_start_set, + non_bmp_id_cont_set, + codepoint_table, + test_space_table, ) = process_unicode_data(unicode_data, derived_core_properties) - ( - folding_table, folding_index, - folding_tests - ) = process_case_folding(case_folding) - ( - unconditional_tolower, unconditional_toupper - ) = process_special_casing(special_casing, table, index) + (folding_table, folding_index, folding_tests) = process_case_folding( + case_folding + ) + (unconditional_tolower, unconditional_toupper) = process_special_casing( + special_casing, table, index + ) - print('Generating...') - make_unicode_file(unicode_version, - table, index, - folding_table, folding_index, - non_bmp_space_set, - non_bmp_id_start_set, non_bmp_id_cont_set, - unconditional_toupper, - codepoint_table) - make_non_bmp_file(unicode_version, - non_bmp_lower_map, non_bmp_upper_map, - codepoint_table) + print("Generating...") + make_unicode_file( + unicode_version, + table, + index, + folding_table, + folding_index, + non_bmp_space_set, + non_bmp_id_start_set, + non_bmp_id_cont_set, + unconditional_toupper, + codepoint_table, + ) + make_non_bmp_file( + unicode_version, non_bmp_lower_map, non_bmp_upper_map, codepoint_table + ) - make_bmp_mapping_test(unicode_version, - codepoint_table, unconditional_tolower, unconditional_toupper) - make_non_bmp_mapping_test(unicode_version, non_bmp_upper_map, - non_bmp_lower_map, codepoint_table) + make_bmp_mapping_test( + unicode_version, codepoint_table, unconditional_tolower, unconditional_toupper + ) + make_non_bmp_mapping_test( + unicode_version, non_bmp_upper_map, non_bmp_lower_map, codepoint_table + ) make_space_test(unicode_version, test_space_table, codepoint_table) make_regexp_space_test(unicode_version, test_space_table, codepoint_table) make_icase_test(unicode_version, folding_tests, codepoint_table) -if __name__ == '__main__': +if __name__ == "__main__": import argparse # This script must be run from js/src/util to work correctly. - if '/'.join(os.path.normpath(os.getcwd()).split(os.sep)[-3:]) != 'js/src/util': - raise RuntimeError('%s must be run from js/src/util' % sys.argv[0]) + if "/".join(os.path.normpath(os.getcwd()).split(os.sep)[-3:]) != "js/src/util": + raise RuntimeError("%s must be run from js/src/util" % sys.argv[0]) - parser = argparse.ArgumentParser(description='Update Unicode data.') + parser = argparse.ArgumentParser(description="Update Unicode data.") - parser.add_argument('--version', - help='Optional Unicode version number. If specified, downloads the\ + parser.add_argument( + "--version", + help='Optional Unicode version number. If specified, downloads the\ selected version from <https://unicode.org/Public>. If not specified\ uses the existing local files to generate the Unicode data. The\ number must match a published Unicode version, e.g. use\ "--version=8.0.0" to download Unicode 8 files. Alternatively use\ - "--version=UNIDATA" to download the latest published version.') + "--version=UNIDATA" to download the latest published version.', + ) parser.set_defaults(func=update_unicode) diff --git a/mobile/android/mach_commands.py b/mobile/android/mach_commands.py index 11d9494aaa83..691977425475 100644 --- a/mobile/android/mach_commands.py +++ b/mobile/android/mach_commands.py @@ -51,150 +51,223 @@ def REMOVED(cls): @CommandProvider class MachCommands(MachCommandBase): - @Command('android', category='devenv', - description='Run Android-specific commands.', - conditions=[conditions.is_android]) + @Command( + "android", + category="devenv", + description="Run Android-specific commands.", + conditions=[conditions.is_android], + ) def android(self): pass - @SubCommand('android', 'assemble-app', - """Assemble Firefox for Android. - See http://firefox-source-docs.mozilla.org/build/buildsystem/toolchains.html#firefox-for-android-with-gradle""") # NOQA: E501 - @CommandArgument('args', nargs=argparse.REMAINDER) + @SubCommand( + "android", + "assemble-app", + """Assemble Firefox for Android. + See http://firefox-source-docs.mozilla.org/build/buildsystem/toolchains.html#firefox-for-android-with-gradle""", # NOQA: E501 + ) + @CommandArgument("args", nargs=argparse.REMAINDER) def android_assemble_app(self, args): - ret = self.gradle(self.substs['GRADLE_ANDROID_APP_TASKS'] + - ['-x', 'lint'] + args, verbose=True) + ret = self.gradle( + self.substs["GRADLE_ANDROID_APP_TASKS"] + ["-x", "lint"] + args, + verbose=True, + ) return ret - @SubCommand('android', 'generate-sdk-bindings', - """Generate SDK bindings used when building GeckoView.""") - @CommandArgument('inputs', nargs='+', help='config files, ' - 'like [/path/to/ClassName-classes.txt]+') - @CommandArgument('args', nargs=argparse.REMAINDER) + @SubCommand( + "android", + "generate-sdk-bindings", + """Generate SDK bindings used when building GeckoView.""", + ) + @CommandArgument( + "inputs", + nargs="+", + help="config files, " "like [/path/to/ClassName-classes.txt]+", + ) + @CommandArgument("args", nargs=argparse.REMAINDER) def android_generate_sdk_bindings(self, inputs, args): import itertools def stem(input): # Turn "/path/to/ClassName-classes.txt" into "ClassName". - return os.path.basename(input).rsplit('-classes.txt', 1)[0] + return os.path.basename(input).rsplit("-classes.txt", 1)[0] - bindings_inputs = list(itertools.chain(*((input, stem(input)) for input in inputs))) - bindings_args = '-Pgenerate_sdk_bindings_args={}'.format(';'.join(bindings_inputs)) + bindings_inputs = list( + itertools.chain(*((input, stem(input)) for input in inputs)) + ) + bindings_args = "-Pgenerate_sdk_bindings_args={}".format( + ";".join(bindings_inputs) + ) ret = self.gradle( - self.substs['GRADLE_ANDROID_GENERATE_SDK_BINDINGS_TASKS'] + [bindings_args] + args, - verbose=True) + self.substs["GRADLE_ANDROID_GENERATE_SDK_BINDINGS_TASKS"] + + [bindings_args] + + args, + verbose=True, + ) return ret - @SubCommand('android', 'generate-generated-jni-wrappers', - """Generate GeckoView JNI wrappers used when building GeckoView.""") - @CommandArgument('args', nargs=argparse.REMAINDER) + @SubCommand( + "android", + "generate-generated-jni-wrappers", + """Generate GeckoView JNI wrappers used when building GeckoView.""", + ) + @CommandArgument("args", nargs=argparse.REMAINDER) def android_generate_generated_jni_wrappers(self, args): ret = self.gradle( - self.substs['GRADLE_ANDROID_GENERATE_GENERATED_JNI_WRAPPERS_TASKS'] + args, - verbose=True) + self.substs["GRADLE_ANDROID_GENERATE_GENERATED_JNI_WRAPPERS_TASKS"] + args, + verbose=True, + ) return ret - @SubCommand('android', 'api-lint', - """Run Android api-lint. -REMOVED/DEPRECATED: Use 'mach lint --linter android-api-lint'.""") + @SubCommand( + "android", + "api-lint", + """Run Android api-lint. +REMOVED/DEPRECATED: Use 'mach lint --linter android-api-lint'.""", + ) def android_apilint_REMOVED(self): print(LINT_DEPRECATION_MESSAGE) return 1 - @SubCommand('android', 'test', - """Run Android test. -REMOVED/DEPRECATED: Use 'mach lint --linter android-test'.""") + @SubCommand( + "android", + "test", + """Run Android test. +REMOVED/DEPRECATED: Use 'mach lint --linter android-test'.""", + ) def android_test_REMOVED(self): print(LINT_DEPRECATION_MESSAGE) return 1 - @SubCommand('android', 'lint', - """Run Android lint. -REMOVED/DEPRECATED: Use 'mach lint --linter android-lint'.""") + @SubCommand( + "android", + "lint", + """Run Android lint. +REMOVED/DEPRECATED: Use 'mach lint --linter android-lint'.""", + ) def android_lint_REMOVED(self): print(LINT_DEPRECATION_MESSAGE) return 1 - @SubCommand('android', 'checkstyle', - """Run Android checkstyle. -REMOVED/DEPRECATED: Use 'mach lint --linter android-checkstyle'.""") + @SubCommand( + "android", + "checkstyle", + """Run Android checkstyle. +REMOVED/DEPRECATED: Use 'mach lint --linter android-checkstyle'.""", + ) def android_checkstyle_REMOVED(self): print(LINT_DEPRECATION_MESSAGE) return 1 - @SubCommand('android', 'gradle-dependencies', - """Collect Android Gradle dependencies. - See http://firefox-source-docs.mozilla.org/build/buildsystem/toolchains.html#firefox-for-android-with-gradle""") # NOQA: E501 - @CommandArgument('args', nargs=argparse.REMAINDER) + @SubCommand( + "android", + "gradle-dependencies", + """Collect Android Gradle dependencies. + See http://firefox-source-docs.mozilla.org/build/buildsystem/toolchains.html#firefox-for-android-with-gradle""", # NOQA: E501 + ) + @CommandArgument("args", nargs=argparse.REMAINDER) def android_gradle_dependencies(self, args): # We don't want to gate producing dependency archives on clean # lint or checkstyle, particularly because toolchain versions # can change the outputs for those processes. - self.gradle(self.substs['GRADLE_ANDROID_DEPENDENCIES_TASKS'] + - ["--continue"] + args, verbose=True) + self.gradle( + self.substs["GRADLE_ANDROID_DEPENDENCIES_TASKS"] + ["--continue"] + args, + verbose=True, + ) return 0 - @SubCommand('android', 'archive-geckoview', - """Create GeckoView archives. - See http://firefox-source-docs.mozilla.org/build/buildsystem/toolchains.html#firefox-for-android-with-gradle""") # NOQA: E501 - @CommandArgument('args', nargs=argparse.REMAINDER) + @SubCommand( + "android", + "archive-geckoview", + """Create GeckoView archives. + See http://firefox-source-docs.mozilla.org/build/buildsystem/toolchains.html#firefox-for-android-with-gradle""", # NOQA: E501 + ) + @CommandArgument("args", nargs=argparse.REMAINDER) def android_archive_geckoview(self, args): ret = self.gradle( - self.substs['GRADLE_ANDROID_ARCHIVE_GECKOVIEW_TASKS'] + args, - verbose=True) + self.substs["GRADLE_ANDROID_ARCHIVE_GECKOVIEW_TASKS"] + args, verbose=True + ) return ret - @SubCommand('android', 'build-geckoview_example', - """Build geckoview_example """) - @CommandArgument('args', nargs=argparse.REMAINDER) + @SubCommand("android", "build-geckoview_example", """Build geckoview_example """) + @CommandArgument("args", nargs=argparse.REMAINDER) def android_build_geckoview_example(self, args): - self.gradle(self.substs['GRADLE_ANDROID_BUILD_GECKOVIEW_EXAMPLE_TASKS'] + args, - verbose=True) + self.gradle( + self.substs["GRADLE_ANDROID_BUILD_GECKOVIEW_EXAMPLE_TASKS"] + args, + verbose=True, + ) - print('Execute `mach android install-geckoview_example` ' - 'to push the geckoview_example and test APKs to a device.') + print( + "Execute `mach android install-geckoview_example` " + "to push the geckoview_example and test APKs to a device." + ) return 0 - @SubCommand('android', 'install-geckoview_example', - """Install geckoview_example """) - @CommandArgument('args', nargs=argparse.REMAINDER) + @SubCommand( + "android", "install-geckoview_example", """Install geckoview_example """ + ) + @CommandArgument("args", nargs=argparse.REMAINDER) def android_install_geckoview_example(self, args): - self.gradle(self.substs['GRADLE_ANDROID_INSTALL_GECKOVIEW_EXAMPLE_TASKS'] + args, - verbose=True) + self.gradle( + self.substs["GRADLE_ANDROID_INSTALL_GECKOVIEW_EXAMPLE_TASKS"] + args, + verbose=True, + ) - print('Execute `mach android build-geckoview_example` ' - 'to just build the geckoview_example and test APKs.') + print( + "Execute `mach android build-geckoview_example` " + "to just build the geckoview_example and test APKs." + ) return 0 - @SubCommand('android', 'geckoview-docs', - """Create GeckoView javadoc and optionally upload to Github""") - @CommandArgument('--archive', action='store_true', - help='Generate a javadoc archive.') - @CommandArgument('--upload', metavar='USER/REPO', - help='Upload geckoview documentation to Github, ' - 'using the specified USER/REPO.') - @CommandArgument('--upload-branch', metavar='BRANCH[/PATH]', - default='gh-pages', - help='Use the specified branch/path for documentation commits.') - @CommandArgument('--javadoc-path', metavar='/PATH', - default='javadoc', - help='Use the specified path for javadoc commits.') - @CommandArgument('--upload-message', metavar='MSG', - default='GeckoView docs upload', - help='Use the specified message for commits.') - def android_geckoview_docs(self, archive, upload, upload_branch, javadoc_path, - upload_message): + @SubCommand( + "android", + "geckoview-docs", + """Create GeckoView javadoc and optionally upload to Github""", + ) + @CommandArgument( + "--archive", action="store_true", help="Generate a javadoc archive." + ) + @CommandArgument( + "--upload", + metavar="USER/REPO", + help="Upload geckoview documentation to Github, " + "using the specified USER/REPO.", + ) + @CommandArgument( + "--upload-branch", + metavar="BRANCH[/PATH]", + default="gh-pages", + help="Use the specified branch/path for documentation commits.", + ) + @CommandArgument( + "--javadoc-path", + metavar="/PATH", + default="javadoc", + help="Use the specified path for javadoc commits.", + ) + @CommandArgument( + "--upload-message", + metavar="MSG", + default="GeckoView docs upload", + help="Use the specified message for commits.", + ) + def android_geckoview_docs( + self, archive, upload, upload_branch, javadoc_path, upload_message + ): - tasks = (self.substs['GRADLE_ANDROID_GECKOVIEW_DOCS_ARCHIVE_TASKS'] if archive or upload - else self.substs['GRADLE_ANDROID_GECKOVIEW_DOCS_TASKS']) + tasks = ( + self.substs["GRADLE_ANDROID_GECKOVIEW_DOCS_ARCHIVE_TASKS"] + if archive or upload + else self.substs["GRADLE_ANDROID_GECKOVIEW_DOCS_TASKS"] + ) ret = self.gradle(tasks, verbose=True) if ret or not upload: @@ -202,9 +275,9 @@ REMOVED/DEPRECATED: Use 'mach lint --linter android-checkstyle'.""") # Upload to Github. fmt = { - 'level': os.environ.get('MOZ_SCM_LEVEL', '0'), - 'project': os.environ.get('MH_BRANCH', 'unknown'), - 'revision': os.environ.get('GECKO_HEAD_REV', 'tip'), + "level": os.environ.get("MOZ_SCM_LEVEL", "0"), + "project": os.environ.get("MH_BRANCH", "unknown"), + "revision": os.environ.get("GECKO_HEAD_REV", "tip"), } env = {} @@ -212,66 +285,104 @@ REMOVED/DEPRECATED: Use 'mach lint --linter android-checkstyle'.""") # in the TaskCluster secrets store in the format {"content": "<KEY>"}, # and the corresponding public key as a writable deploy key for the # destination repo on GitHub. - secret = os.environ.get('GECKOVIEW_DOCS_UPLOAD_SECRET', '').format(**fmt) + secret = os.environ.get("GECKOVIEW_DOCS_UPLOAD_SECRET", "").format(**fmt) if secret: # Set up a private key from the secrets store if applicable. import requests - req = requests.get('http://taskcluster/secrets/v1/secret/' + secret) + + req = requests.get("http://taskcluster/secrets/v1/secret/" + secret) req.raise_for_status() - keyfile = mozpath.abspath('gv-docs-upload-key') - with open(keyfile, 'w') as f: + keyfile = mozpath.abspath("gv-docs-upload-key") + with open(keyfile, "w") as f: os.chmod(keyfile, 0o600) - f.write(req.json()['secret']['content']) + f.write(req.json()["secret"]["content"]) # Turn off strict host key checking so ssh does not complain about # unknown github.com host. We're not pushing anything sensitive, so # it's okay to not check GitHub's host keys. - env['GIT_SSH_COMMAND'] = 'ssh -i "%s" -o StrictHostKeyChecking=no' % keyfile + env["GIT_SSH_COMMAND"] = 'ssh -i "%s" -o StrictHostKeyChecking=no' % keyfile # Clone remote repo. branch = upload_branch.format(**fmt) - repo_url = 'git@github.com:%s.git' % upload - repo_path = mozpath.abspath('gv-docs-repo') - self.run_process(['git', 'clone', '--branch', upload_branch, '--depth', '1', - repo_url, repo_path], append_env=env, pass_thru=True) - env['GIT_DIR'] = mozpath.join(repo_path, '.git') - env['GIT_WORK_TREE'] = repo_path - env['GIT_AUTHOR_NAME'] = env['GIT_COMMITTER_NAME'] = 'GeckoView Docs Bot' - env['GIT_AUTHOR_EMAIL'] = env['GIT_COMMITTER_EMAIL'] = 'nobody@mozilla.com' + repo_url = "git@github.com:%s.git" % upload + repo_path = mozpath.abspath("gv-docs-repo") + self.run_process( + [ + "git", + "clone", + "--branch", + upload_branch, + "--depth", + "1", + repo_url, + repo_path, + ], + append_env=env, + pass_thru=True, + ) + env["GIT_DIR"] = mozpath.join(repo_path, ".git") + env["GIT_WORK_TREE"] = repo_path + env["GIT_AUTHOR_NAME"] = env["GIT_COMMITTER_NAME"] = "GeckoView Docs Bot" + env["GIT_AUTHOR_EMAIL"] = env["GIT_COMMITTER_EMAIL"] = "nobody@mozilla.com" # Copy over user documentation. import mozfile # Extract new javadoc to specified directory inside repo. - src_tar = mozpath.join(self.topobjdir, 'gradle', 'build', 'mobile', 'android', - 'geckoview', 'libs', 'geckoview-javadoc.jar') + src_tar = mozpath.join( + self.topobjdir, + "gradle", + "build", + "mobile", + "android", + "geckoview", + "libs", + "geckoview-javadoc.jar", + ) dst_path = mozpath.join(repo_path, javadoc_path.format(**fmt)) mozfile.remove(dst_path) mozfile.extract_zip(src_tar, dst_path) # Commit and push. - self.run_process(['git', 'add', '--all'], append_env=env, pass_thru=True) - if self.run_process(['git', 'diff', '--cached', '--quiet'], - append_env=env, pass_thru=True, ensure_exit_code=False) != 0: + self.run_process(["git", "add", "--all"], append_env=env, pass_thru=True) + if ( + self.run_process( + ["git", "diff", "--cached", "--quiet"], + append_env=env, + pass_thru=True, + ensure_exit_code=False, + ) + != 0 + ): # We have something to commit. - self.run_process(['git', 'commit', - '--message', upload_message.format(**fmt)], - append_env=env, pass_thru=True) - self.run_process(['git', 'push', 'origin', branch], - append_env=env, pass_thru=True) + self.run_process( + ["git", "commit", "--message", upload_message.format(**fmt)], + append_env=env, + pass_thru=True, + ) + self.run_process( + ["git", "push", "origin", branch], append_env=env, pass_thru=True + ) mozfile.remove(repo_path) if secret: mozfile.remove(keyfile) return 0 - @Command('gradle', category='devenv', - description='Run gradle.', - conditions=[conditions.is_android]) - @CommandArgument('-v', '--verbose', action='store_true', - help='Verbose output for what commands the build is running.') - @CommandArgument('args', nargs=argparse.REMAINDER) + @Command( + "gradle", + category="devenv", + description="Run gradle.", + conditions=[conditions.is_android], + ) + @CommandArgument( + "-v", + "--verbose", + action="store_true", + help="Verbose output for what commands the build is running.", + ) + @CommandArgument("args", nargs=argparse.REMAINDER) def gradle(self, args, verbose=False): if not verbose: # Avoid logging the command @@ -280,10 +391,11 @@ REMOVED/DEPRECATED: Use 'mach lint --linter android-checkstyle'.""") # In automation, JAVA_HOME is set via mozconfig, which needs # to be specially handled in each mach command. This turns # $JAVA_HOME/bin/java into $JAVA_HOME. - java_home = os.path.dirname(os.path.dirname(self.substs['JAVA'])) + java_home = os.path.dirname(os.path.dirname(self.substs["JAVA"])) - gradle_flags = self.substs.get('GRADLE_FLAGS', '') or \ - os.environ.get('GRADLE_FLAGS', '') + gradle_flags = self.substs.get("GRADLE_FLAGS", "") or os.environ.get( + "GRADLE_FLAGS", "" + ) gradle_flags = shell_split(gradle_flags) # We force the Gradle JVM to run with the UTF-8 encoding, since we @@ -304,30 +416,32 @@ REMOVED/DEPRECATED: Use 'mach lint --linter android-checkstyle'.""") # https://discuss.gradle.org/t/unmappable-character-for-encoding-ascii-when-building-a-utf-8-project/10692/11 # NOQA: E501 # and especially https://stackoverflow.com/a/21755671. - if self.substs.get('MOZ_AUTOMATION'): - gradle_flags += ['--console=plain'] + if self.substs.get("MOZ_AUTOMATION"): + gradle_flags += ["--console=plain"] env = os.environ.copy() - env.update({ - 'GRADLE_OPTS': '-Dfile.encoding=utf-8', - 'JAVA_HOME': java_home, - 'JAVA_TOOL_OPTIONS': '-Dfile.encoding=utf-8', - }) + env.update( + { + "GRADLE_OPTS": "-Dfile.encoding=utf-8", + "JAVA_HOME": java_home, + "JAVA_TOOL_OPTIONS": "-Dfile.encoding=utf-8", + } + ) # Set ANDROID_SDK_ROOT if --with-android-sdk was set. # See https://bugzilla.mozilla.org/show_bug.cgi?id=1576471 - android_sdk_root = self.substs.get('ANDROID_SDK_ROOT', '') + android_sdk_root = self.substs.get("ANDROID_SDK_ROOT", "") if android_sdk_root: - env['ANDROID_SDK_ROOT'] = android_sdk_root + env["ANDROID_SDK_ROOT"] = android_sdk_root return self.run_process( - [self.substs['GRADLE']] + gradle_flags + args, + [self.substs["GRADLE"]] + gradle_flags + args, explicit_env=env, pass_thru=True, # Allow user to run gradle interactively. ensure_exit_code=False, # Don't throw on non-zero exit code. - cwd=mozpath.join(self.topsrcdir)) + cwd=mozpath.join(self.topsrcdir), + ) - @Command('gradle-install', category='devenv', - conditions=[REMOVED]) + @Command("gradle-install", category="devenv", conditions=[REMOVED]) def gradle_install_REMOVED(self): pass @@ -335,36 +449,50 @@ REMOVED/DEPRECATED: Use 'mach lint --linter android-checkstyle'.""") @CommandProvider class AndroidEmulatorCommands(MachCommandBase): """ - Run the Android emulator with one of the AVDs used in the Mozilla - automated test environment. If necessary, the AVD is fetched from - the tooltool server and installed. + Run the Android emulator with one of the AVDs used in the Mozilla + automated test environment. If necessary, the AVD is fetched from + the tooltool server and installed. """ - @Command('android-emulator', category='devenv', - conditions=[], - description='Run the Android emulator with an AVD from test automation. ' - 'Environment variable MOZ_EMULATOR_COMMAND_ARGS, if present, will ' - 'over-ride the command line arguments used to launch the emulator.') - @CommandArgument('--version', metavar='VERSION', - choices=['arm-4.3', 'x86-7.0'], - help='Specify which AVD to run in emulator. ' - 'One of "arm-4.3" (Android 4.3 supporting armv7 binaries), or ' - '"x86-7.0" (Android 7.0 supporting x86 or x86_64 binaries, ' - 'recommended for most applications). ' - 'By default, "arm-4.3" will be used if the current build environment ' - 'architecture is arm; otherwise "x86-7.0".') - @CommandArgument('--wait', action='store_true', - help='Wait for emulator to be closed.') - @CommandArgument('--force-update', action='store_true', - help='Update AVD definition even when AVD is already installed.') - @CommandArgument('--gpu', - help='Over-ride the emulator -gpu argument.') - @CommandArgument('--verbose', action='store_true', - help='Log informative status messages.') - def emulator(self, version, wait=False, force_update=False, gpu=None, verbose=False): + + @Command( + "android-emulator", + category="devenv", + conditions=[], + description="Run the Android emulator with an AVD from test automation. " + "Environment variable MOZ_EMULATOR_COMMAND_ARGS, if present, will " + "over-ride the command line arguments used to launch the emulator.", + ) + @CommandArgument( + "--version", + metavar="VERSION", + choices=["arm-4.3", "x86-7.0"], + help="Specify which AVD to run in emulator. " + 'One of "arm-4.3" (Android 4.3 supporting armv7 binaries), or ' + '"x86-7.0" (Android 7.0 supporting x86 or x86_64 binaries, ' + "recommended for most applications). " + 'By default, "arm-4.3" will be used if the current build environment ' + 'architecture is arm; otherwise "x86-7.0".', + ) + @CommandArgument( + "--wait", action="store_true", help="Wait for emulator to be closed." + ) + @CommandArgument( + "--force-update", + action="store_true", + help="Update AVD definition even when AVD is already installed.", + ) + @CommandArgument("--gpu", help="Over-ride the emulator -gpu argument.") + @CommandArgument( + "--verbose", action="store_true", help="Log informative status messages." + ) + def emulator( + self, version, wait=False, force_update=False, gpu=None, verbose=False + ): from mozrunner.devices.android_device import AndroidEmulator - emulator = AndroidEmulator(version, verbose, substs=self.substs, - device_serial='emulator-5554') + emulator = AndroidEmulator( + version, verbose, substs=self.substs, device_serial="emulator-5554" + ) if emulator.is_running(): # It is possible to run multiple emulators simultaneously, but: # - if more than one emulator is using the same avd, errors may @@ -372,51 +500,86 @@ class AndroidEmulatorCommands(MachCommandBase): # - additional parameters must be specified when running tests, # to select a specific device. # To avoid these complications, allow just one emulator at a time. - self.log(logging.ERROR, "emulator", {}, - "An Android emulator is already running.\n" - "Close the existing emulator and re-run this command.") + self.log( + logging.ERROR, + "emulator", + {}, + "An Android emulator is already running.\n" + "Close the existing emulator and re-run this command.", + ) return 1 if not emulator.is_available(): - self.log(logging.WARN, "emulator", {}, - "Emulator binary not found.\n" - "Install the Android SDK and make sure 'emulator' is in your PATH.") + self.log( + logging.WARN, + "emulator", + {}, + "Emulator binary not found.\n" + "Install the Android SDK and make sure 'emulator' is in your PATH.", + ) return 2 if not emulator.check_avd(force_update): - self.log(logging.INFO, "emulator", {}, - "Fetching and installing AVD. This may take a few minutes...") + self.log( + logging.INFO, + "emulator", + {}, + "Fetching and installing AVD. This may take a few minutes...", + ) emulator.update_avd(force_update) - self.log(logging.INFO, "emulator", {}, - "Starting Android emulator running %s..." % - emulator.get_avd_description()) + self.log( + logging.INFO, + "emulator", + {}, + "Starting Android emulator running %s..." % emulator.get_avd_description(), + ) emulator.start(gpu) if emulator.wait_for_start(): - self.log(logging.INFO, "emulator", {}, - "Android emulator is running.") + self.log(logging.INFO, "emulator", {}, "Android emulator is running.") else: # This is unusual but the emulator may still function. - self.log(logging.WARN, "emulator", {}, - "Unable to verify that emulator is running.") + self.log( + logging.WARN, + "emulator", + {}, + "Unable to verify that emulator is running.", + ) if conditions.is_android(self): - self.log(logging.INFO, "emulator", {}, - "Use 'mach install' to install or update Firefox on your emulator.") + self.log( + logging.INFO, + "emulator", + {}, + "Use 'mach install' to install or update Firefox on your emulator.", + ) else: - self.log(logging.WARN, "emulator", {}, - "No Firefox for Android build detected.\n" - "Switch to a Firefox for Android build context or use 'mach bootstrap'\n" - "to setup an Android build environment.") + self.log( + logging.WARN, + "emulator", + {}, + "No Firefox for Android build detected.\n" + "Switch to a Firefox for Android build context or use 'mach bootstrap'\n" + "to setup an Android build environment.", + ) if wait: - self.log(logging.INFO, "emulator", {}, - "Waiting for Android emulator to close...") + self.log( + logging.INFO, "emulator", {}, "Waiting for Android emulator to close..." + ) rc = emulator.wait() if rc is not None: - self.log(logging.INFO, "emulator", {}, - "Android emulator completed with return code %d." % rc) + self.log( + logging.INFO, + "emulator", + {}, + "Android emulator completed with return code %d." % rc, + ) else: - self.log(logging.WARN, "emulator", {}, - "Unable to retrieve Android emulator return code.") + self.log( + logging.WARN, + "emulator", + {}, + "Unable to retrieve Android emulator return code.", + ) return 0 diff --git a/python/mozbuild/mozbuild/mach_commands.py b/python/mozbuild/mozbuild/mach_commands.py index 241644198099..df89c4c7183f 100644 --- a/python/mozbuild/mozbuild/mach_commands.py +++ b/python/mozbuild/mozbuild/mach_commands.py @@ -38,7 +38,7 @@ from mozbuild.util import MOZBUILD_METRICS_PATH here = os.path.abspath(os.path.dirname(__file__)) -EXCESSIVE_SWAP_MESSAGE = ''' +EXCESSIVE_SWAP_MESSAGE = """ =================== PERFORMANCE WARNING @@ -53,13 +53,15 @@ https://bugzilla.mozilla.org/enter_bug.cgi?product=Firefox%20Build%20System&comp and tell us about your machine and build configuration so we can adjust the warning heuristic. =================== -''' +""" class StoreDebugParamsAndWarnAction(argparse.Action): def __call__(self, parser, namespace, values, option_string=None): - sys.stderr.write('The --debugparams argument is deprecated. Please ' + - 'use --debugger-args instead.\n\n') + sys.stderr.write( + "The --debugparams argument is deprecated. Please " + + "use --debugger-args instead.\n\n" + ) setattr(namespace, self.dest, values) @@ -67,32 +69,47 @@ class StoreDebugParamsAndWarnAction(argparse.Action): class Watch(MachCommandBase): """Interface to watch and re-build the tree.""" - @Command('watch', category='post-build', description='Watch and re-build the tree.', - conditions=[conditions.is_firefox]) - @CommandArgument('-v', '--verbose', action='store_true', - help='Verbose output for what commands the watcher is running.') + @Command( + "watch", + category="post-build", + description="Watch and re-build the tree.", + conditions=[conditions.is_firefox], + ) + @CommandArgument( + "-v", + "--verbose", + action="store_true", + help="Verbose output for what commands the watcher is running.", + ) def watch(self, verbose=False): """Watch and re-build the source tree.""" if not conditions.is_artifact_build(self): - print('mach watch requires an artifact build. See ' - 'https://developer.mozilla.org/docs/Mozilla/Developer_guide/Build_Instructions/Simple_Firefox_build') # noqa + print( + "mach watch requires an artifact build. See " + "https://developer.mozilla.org/docs/Mozilla/Developer_guide/Build_Instructions/Simple_Firefox_build" # noqa + ) return 1 - if not self.substs.get('WATCHMAN', None): - print('mach watch requires watchman to be installed. See ' - 'https://developer.mozilla.org/docs/Mozilla/Developer_guide/Build_Instructions/Incremental_builds_with_filesystem_watching') # noqa + if not self.substs.get("WATCHMAN", None): + print( + "mach watch requires watchman to be installed. See " + "https://developer.mozilla.org/docs/Mozilla/Developer_guide/Build_Instructions/Incremental_builds_with_filesystem_watching" # noqa + ) return 1 self.activate_virtualenv() try: - self.virtualenv_manager.install_pip_package('pywatchman==1.4.1') + self.virtualenv_manager.install_pip_package("pywatchman==1.4.1") except Exception: - print('Could not install pywatchman from pip. See ' - 'https://developer.mozilla.org/docs/Mozilla/Developer_guide/Build_Instructions/Incremental_builds_with_filesystem_watching') # noqa + print( + "Could not install pywatchman from pip. See " + "https://developer.mozilla.org/docs/Mozilla/Developer_guide/Build_Instructions/Incremental_builds_with_filesystem_watching" # noqa + ) return 1 from mozbuild.faster_daemon import Daemon + daemon = Daemon(self.config_environment) try: @@ -106,55 +123,76 @@ class Watch(MachCommandBase): class CargoProvider(MachCommandBase): """Invoke cargo in useful ways.""" - @Command('cargo', category='build', - description='Invoke cargo in useful ways.') + @Command("cargo", category="build", description="Invoke cargo in useful ways.") def cargo(self): - self._sub_mach(['help', 'cargo']) + self._sub_mach(["help", "cargo"]) return 1 - @SubCommand('cargo', 'check', - description='Run `cargo check` on a given crate. Defaults to gkrust.') - @CommandArgument('--all-crates', default=None, action='store_true', - help='Check all of the crates in the tree.') - @CommandArgument('crates', default=None, nargs='*', help='The crate name(s) to check.') - @CommandArgument('--jobs', '-j', default='1', nargs='?', metavar='jobs', type=int, - help='Run the tests in parallel using multiple processes.') - @CommandArgument('-v', '--verbose', action='store_true', - help='Verbose output.') + @SubCommand( + "cargo", + "check", + description="Run `cargo check` on a given crate. Defaults to gkrust.", + ) + @CommandArgument( + "--all-crates", + default=None, + action="store_true", + help="Check all of the crates in the tree.", + ) + @CommandArgument( + "crates", default=None, nargs="*", help="The crate name(s) to check." + ) + @CommandArgument( + "--jobs", + "-j", + default="1", + nargs="?", + metavar="jobs", + type=int, + help="Run the tests in parallel using multiple processes.", + ) + @CommandArgument("-v", "--verbose", action="store_true", help="Verbose output.") def check(self, all_crates=None, crates=None, jobs=0, verbose=False): # XXX duplication with `mach vendor rust` crates_and_roots = { - 'gkrust': 'toolkit/library/rust', - 'gkrust-gtest': 'toolkit/library/gtest/rust', - 'js': 'js/rust', - 'mozjs_sys': 'js/src', - 'baldrdash': 'js/src/wasm/cranelift', - 'geckodriver': 'testing/geckodriver', + "gkrust": "toolkit/library/rust", + "gkrust-gtest": "toolkit/library/gtest/rust", + "js": "js/rust", + "mozjs_sys": "js/src", + "baldrdash": "js/src/wasm/cranelift", + "geckodriver": "testing/geckodriver", } if all_crates: crates = crates_and_roots.keys() elif crates is None or crates == []: - crates = ['gkrust'] + crates = ["gkrust"] for crate in crates: root = crates_and_roots.get(crate, None) if not root: - print('Cannot locate crate %s. Please check your spelling or ' - 'add the crate information to the list.' % crate) + print( + "Cannot locate crate %s. Please check your spelling or " + "add the crate information to the list." % crate + ) return 1 check_targets = [ - 'force-cargo-library-check', - 'force-cargo-host-library-check', - 'force-cargo-program-check', - 'force-cargo-host-program-check', + "force-cargo-library-check", + "force-cargo-host-library-check", + "force-cargo-program-check", + "force-cargo-host-program-check", ] - ret = self._run_make(srcdir=False, directory=root, - ensure_exit_code=0, silent=not verbose, - print_directory=False, target=check_targets, - num_jobs=jobs) + ret = self._run_make( + srcdir=False, + directory=root, + ensure_exit_code=0, + silent=not verbose, + print_directory=False, + target=check_targets, + num_jobs=jobs, + ) if ret != 0: return ret @@ -164,13 +202,18 @@ class CargoProvider(MachCommandBase): @CommandProvider class Doctor(MachCommandBase): """Provide commands for diagnosing common build environment problems""" - @Command('doctor', category='devenv', - description='') - @CommandArgument('--fix', default=None, action='store_true', - help='Attempt to fix found problems.') + + @Command("doctor", category="devenv", description="") + @CommandArgument( + "--fix", + default=None, + action="store_true", + help="Attempt to fix found problems.", + ) def doctor(self, fix=None): self.activate_virtualenv() from mozbuild.doctor import Doctor + doctor = Doctor(self.topsrcdir, self.topobjdir, fix) return doctor.check_all() @@ -178,16 +221,21 @@ class Doctor(MachCommandBase): @CommandProvider(metrics_path=MOZBUILD_METRICS_PATH) class Clobber(MachCommandBase): NO_AUTO_LOG = True - CLOBBER_CHOICES = set(['objdir', 'python', 'gradle']) + CLOBBER_CHOICES = set(["objdir", "python", "gradle"]) - @Command('clobber', category='build', - description='Clobber the tree (delete the object directory).') - @CommandArgument('what', default=['objdir', 'python'], nargs='*', - help='Target to clobber, must be one of {{{}}} (default ' - 'objdir and python).'.format( - ', '.join(CLOBBER_CHOICES))) - @CommandArgument('--full', action='store_true', - help='Perform a full clobber') + @Command( + "clobber", + category="build", + description="Clobber the tree (delete the object directory).", + ) + @CommandArgument( + "what", + default=["objdir", "python"], + nargs="*", + help="Target to clobber, must be one of {{{}}} (default " + "objdir and python).".format(", ".join(CLOBBER_CHOICES)), + ) + @CommandArgument("--full", action="store_true", help="Perform a full clobber") def clobber(self, what, full=False): """Clean up the source and object directories. @@ -217,43 +265,75 @@ class Clobber(MachCommandBase): what = set(what) invalid = what - self.CLOBBER_CHOICES if invalid: - print('Unknown clobber target(s): {}'.format(', '.join(invalid))) + print("Unknown clobber target(s): {}".format(", ".join(invalid))) return 1 ret = 0 - if 'objdir' in what: + if "objdir" in what: from mozbuild.controller.clobber import Clobberer + try: - Clobberer(self.topsrcdir, self.topobjdir, self.substs).remove_objdir(full) + Clobberer(self.topsrcdir, self.topobjdir, self.substs).remove_objdir( + full + ) except OSError as e: - if sys.platform.startswith('win'): + if sys.platform.startswith("win"): if isinstance(e, WindowsError) and e.winerror in (5, 32): - self.log(logging.ERROR, 'file_access_error', {'error': e}, - "Could not clobber because a file was in use. If the " - "application is running, try closing it. {error}") + self.log( + logging.ERROR, + "file_access_error", + {"error": e}, + "Could not clobber because a file was in use. If the " + "application is running, try closing it. {error}", + ) return 1 raise - if 'python' in what: + if "python" in what: if conditions.is_hg(self): - cmd = ['hg', '--config', 'extensions.purge=', 'purge', '--all', - '-I', 'glob:**.py[cdo]', '-I', 'glob:**/__pycache__', - '-I', 'path:third_party/python/'] + cmd = [ + "hg", + "--config", + "extensions.purge=", + "purge", + "--all", + "-I", + "glob:**.py[cdo]", + "-I", + "glob:**/__pycache__", + "-I", + "path:third_party/python/", + ] elif conditions.is_git(self): - cmd = ['git', 'clean', '-d', '-f', '-x', '*.py[cdo]', '*/__pycache__/*', - 'third_party/python/'] + cmd = [ + "git", + "clean", + "-d", + "-f", + "-x", + "*.py[cdo]", + "*/__pycache__/*", + "third_party/python/", + ] else: # We don't know what is tracked/untracked if we don't have VCS. # So we can't clean python/ and third_party/python/. - cmd = ['find', '.', '-type', 'f', '-name', '*.py[cdo]', - '-delete'] + cmd = ["find", ".", "-type", "f", "-name", "*.py[cdo]", "-delete"] subprocess.call(cmd, cwd=self.topsrcdir) - cmd = ['find', '.', '-type', 'd', '-name', '__pycache__', - '-empty', '-delete'] + cmd = [ + "find", + ".", + "-type", + "d", + "-name", + "__pycache__", + "-empty", + "-delete", + ] ret = subprocess.call(cmd, cwd=self.topsrcdir) - if 'gradle' in what: - shutil.rmtree(mozpath.join(self.topobjdir, 'gradle')) + if "gradle" in what: + shutil.rmtree(mozpath.join(self.topobjdir, "gradle")) return ret @@ -268,25 +348,29 @@ class Clobber(MachCommandBase): @CommandProvider class Logs(MachCommandBase): """Provide commands to read mach logs.""" + NO_AUTO_LOG = True - @Command('show-log', category='post-build', - description='Display mach logs') - @CommandArgument('log_file', nargs='?', type=argparse.FileType('rb'), - help='Filename to read log data from. Defaults to the log of the last ' - 'mach command.') + @Command("show-log", category="post-build", description="Display mach logs") + @CommandArgument( + "log_file", + nargs="?", + type=argparse.FileType("rb"), + help="Filename to read log data from. Defaults to the log of the last " + "mach command.", + ) def show_log(self, log_file=None): if not log_file: - path = self._get_state_filename('last_log.json') - log_file = open(path, 'rb') + path = self._get_state_filename("last_log.json") + log_file = open(path, "rb") if os.isatty(sys.stdout.fileno()): env = dict(os.environ) - if 'LESS' not in env: + if "LESS" not in env: # Sensible default flags if none have been set in the user # environment. - env[b'LESS'] = b'FRX' - less = subprocess.Popen(['less'], stdin=subprocess.PIPE, env=env) + env[b"LESS"] = b"FRX" + less = subprocess.Popen(["less"], stdin=subprocess.PIPE, env=env) # Various objects already have a reference to sys.stdout, so we # can't just change it, we need to change the file descriptor under # it to redirect to less's input. @@ -299,17 +383,18 @@ class Logs(MachCommandBase): created, action, params = json.loads(line) if not startTime: startTime = created - self.log_manager.terminal_handler.formatter.start_time = \ - created - if 'line' in params: - record = logging.makeLogRecord({ - 'created': created, - 'name': self._logger.name, - 'levelno': logging.INFO, - 'msg': '{line}', - 'params': params, - 'action': action, - }) + self.log_manager.terminal_handler.formatter.start_time = created + if "line" in params: + record = logging.makeLogRecord( + { + "created": created, + "name": self._logger.name, + "levelno": logging.INFO, + "msg": "{line}", + "params": params, + "action": action, + } + ) self._logger.handle(record) if self.log_manager.terminal: @@ -330,7 +415,7 @@ class Warnings(MachCommandBase): @property def database_path(self): - return self._get_state_filename('warnings.json') + return self._get_state_filename("warnings.json") @property def database(self): @@ -345,13 +430,24 @@ class Warnings(MachCommandBase): return database - @Command('warnings-summary', category='post-build', - description='Show a summary of compiler warnings.') - @CommandArgument('-C', '--directory', default=None, - help='Change to a subdirectory of the build directory first.') - @CommandArgument('report', default=None, nargs='?', - help='Warnings report to display. If not defined, show the most ' - 'recent report.') + @Command( + "warnings-summary", + category="post-build", + description="Show a summary of compiler warnings.", + ) + @CommandArgument( + "-C", + "--directory", + default=None, + help="Change to a subdirectory of the build directory first.", + ) + @CommandArgument( + "report", + default=None, + nargs="?", + help="Warnings report to display. If not defined, show the most " + "recent report.", + ) def summary(self, directory=None, report=None): database = self.database @@ -363,25 +459,36 @@ class Warnings(MachCommandBase): dirpath = None type_counts = database.type_counts(dirpath) - sorted_counts = sorted(type_counts.items(), - key=operator.itemgetter(1)) + sorted_counts = sorted(type_counts.items(), key=operator.itemgetter(1)) total = 0 for k, v in sorted_counts: - print('%d\t%s' % (v, k)) + print("%d\t%s" % (v, k)) total += v - print('%d\tTotal' % total) + print("%d\tTotal" % total) - @Command('warnings-list', category='post-build', - description='Show a list of compiler warnings.') - @CommandArgument('-C', '--directory', default=None, - help='Change to a subdirectory of the build directory first.') - @CommandArgument('--flags', default=None, nargs='+', - help='Which warnings flags to match.') - @CommandArgument('report', default=None, nargs='?', - help='Warnings report to display. If not defined, show the most ' - 'recent report.') + @Command( + "warnings-list", + category="post-build", + description="Show a list of compiler warnings.", + ) + @CommandArgument( + "-C", + "--directory", + default=None, + help="Change to a subdirectory of the build directory first.", + ) + @CommandArgument( + "--flags", default=None, nargs="+", help="Which warnings flags to match." + ) + @CommandArgument( + "report", + default=None, + nargs="?", + help="Warnings report to display. If not defined, show the most " + "recent report.", + ) def list(self, directory=None, flags=None, report=None): database = self.database @@ -397,27 +504,36 @@ class Warnings(MachCommandBase): if flags: # Flatten lists of flags. - flags = set(itertools.chain(*[flaglist.split(',') for flaglist in flags])) + flags = set(itertools.chain(*[flaglist.split(",") for flaglist in flags])) for warning in by_name: - filename = mozpath.normsep(warning['filename']) + filename = mozpath.normsep(warning["filename"]) if filename.startswith(topsrcdir): - filename = filename[len(topsrcdir) + 1:] + filename = filename[len(topsrcdir) + 1 :] if directory and not filename.startswith(directory): continue - if flags and warning['flag'] not in flags: + if flags and warning["flag"] not in flags: continue - if warning['column'] is not None: - print('%s:%d:%d [%s] %s' % ( - filename, warning['line'], warning['column'], - warning['flag'], warning['message'])) + if warning["column"] is not None: + print( + "%s:%d:%d [%s] %s" + % ( + filename, + warning["line"], + warning["column"], + warning["flag"], + warning["message"], + ) + ) else: - print('%s:%d [%s] %s' % (filename, warning['line'], - warning['flag'], warning['message'])) + print( + "%s:%d [%s] %s" + % (filename, warning["line"], warning["flag"], warning["message"]) + ) def join_ensure_dir(self, dir1, dir2): dir1 = mozpath.normpath(dir1) @@ -425,68 +541,127 @@ class Warnings(MachCommandBase): joined_path = mozpath.join(dir1, dir2) if os.path.isdir(joined_path): return joined_path - print('Specified directory not found.') + print("Specified directory not found.") return None @CommandProvider class GTestCommands(MachCommandBase): - @Command('gtest', category='testing', - description='Run GTest unit tests (C++ tests).') - @CommandArgument('gtest_filter', default=b"*", nargs='?', metavar='gtest_filter', - help="test_filter is a ':'-separated list of wildcard patterns " - "(called the positive patterns), optionally followed by a '-' " - "and another ':'-separated pattern list (called the negative patterns).") - @CommandArgument('--jobs', '-j', default='1', nargs='?', metavar='jobs', type=int, - help='Run the tests in parallel using multiple processes.') - @CommandArgument('--tbpl-parser', '-t', action='store_true', - help='Output test results in a format that can be parsed by TBPL.') - @CommandArgument('--shuffle', '-s', action='store_true', - help='Randomize the execution order of tests.') - @CommandArgument('--enable-webrender', action='store_true', - default=False, dest='enable_webrender', - help='Enable the WebRender compositor in Gecko.') - @CommandArgumentGroup('Android') - @CommandArgument('--package', - default='org.mozilla.geckoview.test', - group='Android', - help='Package name of test app.') - @CommandArgument('--adbpath', - dest='adb_path', - group='Android', - help='Path to adb binary.') - @CommandArgument('--deviceSerial', - dest='device_serial', - group='Android', - help="adb serial number of remote device. " - "Required when more than one device is connected to the host. " - "Use 'adb devices' to see connected devices.") - @CommandArgument('--remoteTestRoot', - dest='remote_test_root', - group='Android', - help='Remote directory to use as test root ' - '(eg. /data/local/tmp/test_root).') - @CommandArgument('--libxul', - dest='libxul_path', - group='Android', - help='Path to gtest libxul.so.') - @CommandArgument('--no-install', action='store_true', - default=False, - group='Android', - help='Skip the installation of the APK.') - @CommandArgumentGroup('debugging') - @CommandArgument('--debug', action='store_true', group='debugging', - help='Enable the debugger. Not specifying a --debugger option will result in ' - 'the default debugger being used.') - @CommandArgument('--debugger', default=None, type=str, group='debugging', - help='Name of debugger to use.') - @CommandArgument('--debugger-args', default=None, metavar='params', type=str, - group='debugging', - help='Command-line arguments to pass to the debugger itself; ' - 'split as the Bourne shell would.') - def gtest(self, shuffle, jobs, gtest_filter, tbpl_parser, enable_webrender, - package, adb_path, device_serial, remote_test_root, libxul_path, no_install, - debug, debugger, debugger_args): + @Command( + "gtest", category="testing", description="Run GTest unit tests (C++ tests)." + ) + @CommandArgument( + "gtest_filter", + default=b"*", + nargs="?", + metavar="gtest_filter", + help="test_filter is a ':'-separated list of wildcard patterns " + "(called the positive patterns), optionally followed by a '-' " + "and another ':'-separated pattern list (called the negative patterns).", + ) + @CommandArgument( + "--jobs", + "-j", + default="1", + nargs="?", + metavar="jobs", + type=int, + help="Run the tests in parallel using multiple processes.", + ) + @CommandArgument( + "--tbpl-parser", + "-t", + action="store_true", + help="Output test results in a format that can be parsed by TBPL.", + ) + @CommandArgument( + "--shuffle", + "-s", + action="store_true", + help="Randomize the execution order of tests.", + ) + @CommandArgument( + "--enable-webrender", + action="store_true", + default=False, + dest="enable_webrender", + help="Enable the WebRender compositor in Gecko.", + ) + @CommandArgumentGroup("Android") + @CommandArgument( + "--package", + default="org.mozilla.geckoview.test", + group="Android", + help="Package name of test app.", + ) + @CommandArgument( + "--adbpath", dest="adb_path", group="Android", help="Path to adb binary." + ) + @CommandArgument( + "--deviceSerial", + dest="device_serial", + group="Android", + help="adb serial number of remote device. " + "Required when more than one device is connected to the host. " + "Use 'adb devices' to see connected devices.", + ) + @CommandArgument( + "--remoteTestRoot", + dest="remote_test_root", + group="Android", + help="Remote directory to use as test root " "(eg. /data/local/tmp/test_root).", + ) + @CommandArgument( + "--libxul", dest="libxul_path", group="Android", help="Path to gtest libxul.so." + ) + @CommandArgument( + "--no-install", + action="store_true", + default=False, + group="Android", + help="Skip the installation of the APK.", + ) + @CommandArgumentGroup("debugging") + @CommandArgument( + "--debug", + action="store_true", + group="debugging", + help="Enable the debugger. Not specifying a --debugger option will result in " + "the default debugger being used.", + ) + @CommandArgument( + "--debugger", + default=None, + type=str, + group="debugging", + help="Name of debugger to use.", + ) + @CommandArgument( + "--debugger-args", + default=None, + metavar="params", + type=str, + group="debugging", + help="Command-line arguments to pass to the debugger itself; " + "split as the Bourne shell would.", + ) + def gtest( + self, + shuffle, + jobs, + gtest_filter, + tbpl_parser, + enable_webrender, + package, + adb_path, + device_serial, + remote_test_root, + libxul_path, + no_install, + debug, + debugger, + debugger_args, + ): # We lazy build gtest because it's slow to link try: @@ -495,17 +670,19 @@ class GTestCommands(MachCommandBase): print("Please run |./mach build| before |./mach gtest|.") return 1 - res = self._mach_context.commands.dispatch('build', self._mach_context, - what=['recurse_gtest']) + res = self._mach_context.commands.dispatch( + "build", self._mach_context, what=["recurse_gtest"] + ) if res: print("Could not build xul-gtest") return res - if self.substs.get('MOZ_WIDGET_TOOLKIT') == 'cocoa': - self._run_make(directory='browser/app', target='repackage', - ensure_exit_code=True) + if self.substs.get("MOZ_WIDGET_TOOLKIT") == "cocoa": + self._run_make( + directory="browser/app", target="repackage", ensure_exit_code=True + ) - cwd = os.path.join(self.topobjdir, '_tests', 'gtest') + cwd = os.path.join(self.topobjdir, "_tests", "gtest") if not os.path.isdir(cwd): os.makedirs(cwd) @@ -514,23 +691,39 @@ class GTestCommands(MachCommandBase): if jobs != 1: print("--jobs is not supported on Android and will be ignored") if debug or debugger or debugger_args: - print("--debug options are not supported on Android and will be ignored") + print( + "--debug options are not supported on Android and will be ignored" + ) from mozrunner.devices.android_device import InstallIntent - return self.android_gtest(cwd, shuffle, gtest_filter, - package, adb_path, device_serial, - remote_test_root, libxul_path, - enable_webrender, - InstallIntent.NO if no_install else InstallIntent.YES) - if package or adb_path or device_serial or remote_test_root or libxul_path or no_install: + return self.android_gtest( + cwd, + shuffle, + gtest_filter, + package, + adb_path, + device_serial, + remote_test_root, + libxul_path, + enable_webrender, + InstallIntent.NO if no_install else InstallIntent.YES, + ) + + if ( + package + or adb_path + or device_serial + or remote_test_root + or libxul_path + or no_install + ): print("One or more Android-only options will be ignored") - app_path = self.get_binary_path('app') - args = [app_path, '-unittest', '--gtest_death_test_style=threadsafe'] + app_path = self.get_binary_path("app") + args = [app_path, "-unittest", "--gtest_death_test_style=threadsafe"] - if sys.platform.startswith('win') and \ - 'MOZ_LAUNCHER_PROCESS' in self.defines: - args.append('--wait-for-browser') + if sys.platform.startswith("win") and "MOZ_LAUNCHER_PROCESS" in self.defines: + args.append("--wait-for-browser") if debug or debugger or debugger_args: args = self.prepend_debugger_args(args, debugger, debugger_args) @@ -540,7 +733,7 @@ class GTestCommands(MachCommandBase): # Use GTest environment variable to control test execution # For details see: # https://code.google.com/p/googletest/wiki/AdvancedGuide#Running_Test_Programs:_Advanced_Options - gtest_env = {b'GTEST_FILTER': gtest_filter} + gtest_env = {b"GTEST_FILTER": gtest_filter} # Note: we must normalize the path here so that gtest on Windows sees # a MOZ_GMP_PATH which has only Windows dir seperators, because @@ -548,8 +741,7 @@ class GTestCommands(MachCommandBase): xre_path = os.path.join(os.path.normpath(self.topobjdir), "dist", "bin") gtest_env["MOZ_XRE_DIR"] = xre_path gtest_env["MOZ_GMP_PATH"] = os.pathsep.join( - os.path.join(xre_path, p, "1.0") - for p in ('gmp-fake', 'gmp-fakeopenh264') + os.path.join(xre_path, p, "1.0") for p in ("gmp-fake", "gmp-fakeopenh264") ) gtest_env[b"MOZ_RUN_GTEST"] = b"True" @@ -567,30 +759,33 @@ class GTestCommands(MachCommandBase): gtest_env[b"MOZ_WEBRENDER"] = b"0" if jobs == 1: - return self.run_process(args=args, - append_env=gtest_env, - cwd=cwd, - ensure_exit_code=False, - pass_thru=True) + return self.run_process( + args=args, + append_env=gtest_env, + cwd=cwd, + ensure_exit_code=False, + pass_thru=True, + ) from mozprocess import ProcessHandlerMixin import functools def handle_line(job_id, line): # Prepend the jobId - line = '[%d] %s' % (job_id + 1, line.strip()) - self.log(logging.INFO, "GTest", {'line': line}, '{line}') + line = "[%d] %s" % (job_id + 1, line.strip()) + self.log(logging.INFO, "GTest", {"line": line}, "{line}") gtest_env["GTEST_TOTAL_SHARDS"] = str(jobs) processes = {} for i in range(0, jobs): gtest_env["GTEST_SHARD_INDEX"] = str(i) - processes[i] = ProcessHandlerMixin([app_path, "-unittest"], - cwd=cwd, - env=gtest_env, - processOutputLine=[ - functools.partial(handle_line, i)], - universal_newlines=True) + processes[i] = ProcessHandlerMixin( + [app_path, "-unittest"], + cwd=cwd, + env=gtest_env, + processOutputLine=[functools.partial(handle_line, i)], + universal_newlines=True, + ) processes[i].run() exit_code = 0 @@ -606,56 +801,85 @@ class GTestCommands(MachCommandBase): return exit_code - def android_gtest(self, test_dir, shuffle, gtest_filter, - package, adb_path, device_serial, remote_test_root, libxul_path, - enable_webrender, install): + def android_gtest( + self, + test_dir, + shuffle, + gtest_filter, + package, + adb_path, + device_serial, + remote_test_root, + libxul_path, + enable_webrender, + install, + ): # setup logging for mozrunner from mozlog.commandline import setup_logging - format_args = {'level': self._mach_context.settings['test']['level']} - default_format = self._mach_context.settings['test']['format'] - setup_logging('mach-gtest', {}, {default_format: sys.stdout}, format_args) + + format_args = {"level": self._mach_context.settings["test"]["level"]} + default_format = self._mach_context.settings["test"]["format"] + setup_logging("mach-gtest", {}, {default_format: sys.stdout}, format_args) # ensure that a device is available and test app is installed - from mozrunner.devices.android_device import (verify_android_device, get_adb_path) - verify_android_device(self, install=install, app=package, device_serial=device_serial) + from mozrunner.devices.android_device import verify_android_device, get_adb_path + + verify_android_device( + self, install=install, app=package, device_serial=device_serial + ) if not adb_path: adb_path = get_adb_path(self) if not libxul_path: - libxul_path = os.path.join(self.topobjdir, "dist", "bin", "gtest", "libxul.so") + libxul_path = os.path.join( + self.topobjdir, "dist", "bin", "gtest", "libxul.so" + ) # run gtest via remotegtests.py exit_code = 0 import imp - path = os.path.join('testing', 'gtest', 'remotegtests.py') - with open(path, 'r') as fh: - imp.load_module('remotegtests', fh, path, - ('.py', 'r', imp.PY_SOURCE)) + + path = os.path.join("testing", "gtest", "remotegtests.py") + with open(path, "r") as fh: + imp.load_module("remotegtests", fh, path, (".py", "r", imp.PY_SOURCE)) import remotegtests + tester = remotegtests.RemoteGTests() - if not tester.run_gtest(test_dir, shuffle, gtest_filter, package, adb_path, device_serial, - remote_test_root, libxul_path, None, enable_webrender): + if not tester.run_gtest( + test_dir, + shuffle, + gtest_filter, + package, + adb_path, + device_serial, + remote_test_root, + libxul_path, + None, + enable_webrender, + ): exit_code = 1 tester.cleanup() return exit_code def prepend_debugger_args(self, args, debugger, debugger_args): - ''' + """ Given an array with program arguments, prepend arguments to run it under a debugger. :param args: The executable and arguments used to run the process normally. :param debugger: The debugger to use, or empty to use the default debugger. :param debugger_args: Any additional parameters to pass to the debugger. - ''' + """ import mozdebug if not debugger: # No debugger name was provided. Look for the default ones on # current OS. - debugger = mozdebug.get_default_debugger_name(mozdebug.DebuggerSearch.KeepLooking) + debugger = mozdebug.get_default_debugger_name( + mozdebug.DebuggerSearch.KeepLooking + ) if debugger: debuggerInfo = mozdebug.get_debugger_info(debugger, debugger_args) @@ -668,10 +892,13 @@ class GTestCommands(MachCommandBase): # their use. if debugger_args: from mozbuild import shellutil + try: debugger_args = shellutil.split(debugger_args) except shellutil.MetaCharacterException as e: - print("The --debugger_args you passed require a real shell to parse them.") + print( + "The --debugger_args you passed require a real shell to parse them." + ) print("(We can't handle the %r character.)" % e.char) return None @@ -684,25 +911,39 @@ class GTestCommands(MachCommandBase): class Package(MachCommandBase): """Package the built product for distribution.""" - @Command('package', category='post-build', - description='Package the built product for distribution as an APK, DMG, etc.') - @CommandArgument('-v', '--verbose', action='store_true', - help='Verbose output for what commands the packaging process is running.') + @Command( + "package", + category="post-build", + description="Package the built product for distribution as an APK, DMG, etc.", + ) + @CommandArgument( + "-v", + "--verbose", + action="store_true", + help="Verbose output for what commands the packaging process is running.", + ) def package(self, verbose=False): - ret = self._run_make(directory=".", target='package', - silent=not verbose, ensure_exit_code=False) + ret = self._run_make( + directory=".", target="package", silent=not verbose, ensure_exit_code=False + ) if ret == 0: - self.notify('Packaging complete') + self.notify("Packaging complete") return ret def _get_android_install_parser(): parser = argparse.ArgumentParser() - parser.add_argument('--app', default='org.mozilla.geckoview_example', - help='Android package to install ' - '(default: org.mozilla.geckoview_example)') - parser.add_argument('--verbose', '-v', action='store_true', - help='Print verbose output when installing.') + parser.add_argument( + "--app", + default="org.mozilla.geckoview_example", + help="Android package to install " "(default: org.mozilla.geckoview_example)", + ) + parser.add_argument( + "--verbose", + "-v", + action="store_true", + help="Print verbose output when installing.", + ) return parser @@ -717,143 +958,264 @@ def setup_install_parser(): class Install(MachCommandBase): """Install a package.""" - @Command('install', category='post-build', - conditions=[conditions.has_build], - parser=setup_install_parser, - description='Install the package on the machine (or device in the case of Android).') + @Command( + "install", + category="post-build", + conditions=[conditions.has_build], + parser=setup_install_parser, + description="Install the package on the machine (or device in the case of Android).", + ) def install(self, **kwargs): if conditions.is_android(self): - from mozrunner.devices.android_device import (verify_android_device, InstallIntent) + from mozrunner.devices.android_device import ( + verify_android_device, + InstallIntent, + ) + ret = verify_android_device(self, install=InstallIntent.YES, **kwargs) == 0 else: - ret = self._run_make(directory=".", target='install', ensure_exit_code=False) + ret = self._run_make( + directory=".", target="install", ensure_exit_code=False + ) if ret == 0: - self.notify('Install complete') + self.notify("Install complete") return ret @SettingsProvider -class RunSettings(): +class RunSettings: config_settings = [ - ('runprefs.*', 'string', """ + ( + "runprefs.*", + "string", + """ Pass a pref into Firefox when using `mach run`, of the form `foo.bar=value`. Prefs will automatically be cast into the appropriate type. Integers can be single quoted to force them to be strings. -""".strip()), +""".strip(), + ), ] def _get_android_run_parser(): parser = argparse.ArgumentParser() - parser.add_argument('--app', default='org.mozilla.geckoview_example', - help='Android package to run ' - '(default: org.mozilla.geckoview_example)') - parser.add_argument('--intent', default='android.intent.action.VIEW', - help='Android intent action to launch with ' - '(default: android.intent.action.VIEW)') - parser.add_argument('--setenv', dest='env', action='append', default=[], - help='Set target environment variable, like FOO=BAR') - parser.add_argument('--profile', '-P', default=None, - help='Path to Gecko profile, like /path/to/host/profile ' - 'or /path/to/target/profile') - parser.add_argument('--url', default=None, - help='URL to open') - parser.add_argument('--no-install', action='store_true', default=False, - help='Do not try to install application on device before running ' - '(default: False)') - parser.add_argument('--no-wait', action='store_true', default=False, - help='Do not wait for application to start before returning ' - '(default: False)') - parser.add_argument('--enable-fission', action='store_true', - help='Run the program with Fission (site isolation) enabled.') - parser.add_argument('--fail-if-running', action='store_true', default=False, - help='Fail if application is already running (default: False)') - parser.add_argument('--restart', action='store_true', default=False, - help='Stop the application if it is already running (default: False)') + parser.add_argument( + "--app", + default="org.mozilla.geckoview_example", + help="Android package to run " "(default: org.mozilla.geckoview_example)", + ) + parser.add_argument( + "--intent", + default="android.intent.action.VIEW", + help="Android intent action to launch with " + "(default: android.intent.action.VIEW)", + ) + parser.add_argument( + "--setenv", + dest="env", + action="append", + default=[], + help="Set target environment variable, like FOO=BAR", + ) + parser.add_argument( + "--profile", + "-P", + default=None, + help="Path to Gecko profile, like /path/to/host/profile " + "or /path/to/target/profile", + ) + parser.add_argument("--url", default=None, help="URL to open") + parser.add_argument( + "--no-install", + action="store_true", + default=False, + help="Do not try to install application on device before running " + "(default: False)", + ) + parser.add_argument( + "--no-wait", + action="store_true", + default=False, + help="Do not wait for application to start before returning " + "(default: False)", + ) + parser.add_argument( + "--enable-fission", + action="store_true", + help="Run the program with Fission (site isolation) enabled.", + ) + parser.add_argument( + "--fail-if-running", + action="store_true", + default=False, + help="Fail if application is already running (default: False)", + ) + parser.add_argument( + "--restart", + action="store_true", + default=False, + help="Stop the application if it is already running (default: False)", + ) return parser def _get_jsshell_run_parser(): parser = argparse.ArgumentParser() - group = parser.add_argument_group('the compiled program') - group.add_argument('params', nargs='...', default=[], - help='Command-line arguments to be passed through to the program. Not ' - 'specifying a --profile or -P option will result in a temporary profile ' - 'being used.') + group = parser.add_argument_group("the compiled program") + group.add_argument( + "params", + nargs="...", + default=[], + help="Command-line arguments to be passed through to the program. Not " + "specifying a --profile or -P option will result in a temporary profile " + "being used.", + ) - group = parser.add_argument_group('debugging') - group.add_argument('--debug', action='store_true', - help='Enable the debugger. Not specifying a --debugger option will result ' - 'in the default debugger being used.') - group.add_argument('--debugger', default=None, type=str, - help='Name of debugger to use.') - group.add_argument('--debugger-args', default=None, metavar='params', type=str, - help='Command-line arguments to pass to the debugger itself; ' - 'split as the Bourne shell would.') - group.add_argument('--debugparams', action=StoreDebugParamsAndWarnAction, - default=None, type=str, dest='debugger_args', - help=argparse.SUPPRESS) + group = parser.add_argument_group("debugging") + group.add_argument( + "--debug", + action="store_true", + help="Enable the debugger. Not specifying a --debugger option will result " + "in the default debugger being used.", + ) + group.add_argument( + "--debugger", default=None, type=str, help="Name of debugger to use." + ) + group.add_argument( + "--debugger-args", + default=None, + metavar="params", + type=str, + help="Command-line arguments to pass to the debugger itself; " + "split as the Bourne shell would.", + ) + group.add_argument( + "--debugparams", + action=StoreDebugParamsAndWarnAction, + default=None, + type=str, + dest="debugger_args", + help=argparse.SUPPRESS, + ) return parser def _get_desktop_run_parser(): parser = argparse.ArgumentParser() - group = parser.add_argument_group('the compiled program') - group.add_argument('params', nargs='...', default=[], - help='Command-line arguments to be passed through to the program. Not ' - 'specifying a --profile or -P option will result in a temporary profile ' - 'being used.') - group.add_argument('--packaged', action='store_true', - help='Run a packaged build.') - group.add_argument('--remote', '-r', action='store_true', - help='Do not pass the --no-remote argument by default.') - group.add_argument('--background', '-b', action='store_true', - help='Do not pass the --foreground argument by default on Mac.') - group.add_argument('--noprofile', '-n', action='store_true', - help='Do not pass the --profile argument by default.') - group.add_argument('--disable-e10s', action='store_true', - help='Run the program with electrolysis disabled.') - group.add_argument('--enable-crash-reporter', action='store_true', - help='Run the program with the crash reporter enabled.') - group.add_argument('--enable-fission', action='store_true', - help='Run the program with Fission (site isolation) enabled.') - group.add_argument('--setpref', action='append', default=[], - help='Set the specified pref before starting the program. Can be set ' - 'multiple times. Prefs can also be set in ~/.mozbuild/machrc in the ' - '[runprefs] section - see `./mach settings` for more information.') - group.add_argument('--temp-profile', action='store_true', - help='Run the program using a new temporary profile created inside ' - 'the objdir.') - group.add_argument('--macos-open', action='store_true', - help="On macOS, run the program using the open(1) command. Per open(1), " - "the browser is launched \"just as if you had double-clicked the file's " - "icon\". The browser can not be launched under a debugger with this " - "option.") + group = parser.add_argument_group("the compiled program") + group.add_argument( + "params", + nargs="...", + default=[], + help="Command-line arguments to be passed through to the program. Not " + "specifying a --profile or -P option will result in a temporary profile " + "being used.", + ) + group.add_argument("--packaged", action="store_true", help="Run a packaged build.") + group.add_argument( + "--remote", + "-r", + action="store_true", + help="Do not pass the --no-remote argument by default.", + ) + group.add_argument( + "--background", + "-b", + action="store_true", + help="Do not pass the --foreground argument by default on Mac.", + ) + group.add_argument( + "--noprofile", + "-n", + action="store_true", + help="Do not pass the --profile argument by default.", + ) + group.add_argument( + "--disable-e10s", + action="store_true", + help="Run the program with electrolysis disabled.", + ) + group.add_argument( + "--enable-crash-reporter", + action="store_true", + help="Run the program with the crash reporter enabled.", + ) + group.add_argument( + "--enable-fission", + action="store_true", + help="Run the program with Fission (site isolation) enabled.", + ) + group.add_argument( + "--setpref", + action="append", + default=[], + help="Set the specified pref before starting the program. Can be set " + "multiple times. Prefs can also be set in ~/.mozbuild/machrc in the " + "[runprefs] section - see `./mach settings` for more information.", + ) + group.add_argument( + "--temp-profile", + action="store_true", + help="Run the program using a new temporary profile created inside " + "the objdir.", + ) + group.add_argument( + "--macos-open", + action="store_true", + help="On macOS, run the program using the open(1) command. Per open(1), " + "the browser is launched \"just as if you had double-clicked the file's " + 'icon". The browser can not be launched under a debugger with this ' + "option.", + ) - group = parser.add_argument_group('debugging') - group.add_argument('--debug', action='store_true', - help='Enable the debugger. Not specifying a --debugger option will result ' - 'in the default debugger being used.') - group.add_argument('--debugger', default=None, type=str, - help='Name of debugger to use.') - group.add_argument('--debugger-args', default=None, metavar='params', type=str, - help='Command-line arguments to pass to the debugger itself; ' - 'split as the Bourne shell would.') - group.add_argument('--debugparams', action=StoreDebugParamsAndWarnAction, - default=None, type=str, dest='debugger_args', - help=argparse.SUPPRESS) + group = parser.add_argument_group("debugging") + group.add_argument( + "--debug", + action="store_true", + help="Enable the debugger. Not specifying a --debugger option will result " + "in the default debugger being used.", + ) + group.add_argument( + "--debugger", default=None, type=str, help="Name of debugger to use." + ) + group.add_argument( + "--debugger-args", + default=None, + metavar="params", + type=str, + help="Command-line arguments to pass to the debugger itself; " + "split as the Bourne shell would.", + ) + group.add_argument( + "--debugparams", + action=StoreDebugParamsAndWarnAction, + default=None, + type=str, + dest="debugger_args", + help=argparse.SUPPRESS, + ) - group = parser.add_argument_group('DMD') - group.add_argument('--dmd', action='store_true', - help='Enable DMD. The following arguments have no effect without this.') - group.add_argument('--mode', choices=['live', 'dark-matter', 'cumulative', 'scan'], - help='Profiling mode. The default is \'dark-matter\'.') - group.add_argument('--stacks', choices=['partial', 'full'], - help='Allocation stack trace coverage. The default is \'partial\'.') - group.add_argument('--show-dump-stats', action='store_true', - help='Show stats when doing dumps.') + group = parser.add_argument_group("DMD") + group.add_argument( + "--dmd", + action="store_true", + help="Enable DMD. The following arguments have no effect without this.", + ) + group.add_argument( + "--mode", + choices=["live", "dark-matter", "cumulative", "scan"], + help="Profiling mode. The default is 'dark-matter'.", + ) + group.add_argument( + "--stacks", + choices=["partial", "full"], + help="Allocation stack trace coverage. The default is 'partial'.", + ) + group.add_argument( + "--show-dump-stats", action="store_true", help="Show stats when doing dumps." + ) return parser @@ -871,10 +1233,13 @@ def setup_run_parser(): class RunProgram(MachCommandBase): """Run the compiled program.""" - @Command('run', category='post-build', - conditions=[conditions.has_build_or_shell], - parser=setup_run_parser, - description='Run the compiled program, possibly under a debugger or DMD.') + @Command( + "run", + category="post-build", + conditions=[conditions.has_build_or_shell], + parser=setup_run_parser, + description="Run the compiled program, possibly under a debugger or DMD.", + ) def run(self, **kwargs): if conditions.is_android(self): return self._run_android(**kwargs) @@ -882,29 +1247,42 @@ class RunProgram(MachCommandBase): return self._run_jsshell(**kwargs) return self._run_desktop(**kwargs) - def _run_android(self, app='org.mozilla.geckoview_example', intent=None, env=[], profile=None, - url=None, no_install=None, no_wait=None, fail_if_running=None, restart=None, - enable_fission=False): - from mozrunner.devices.android_device import (verify_android_device, - _get_device, - InstallIntent) + def _run_android( + self, + app="org.mozilla.geckoview_example", + intent=None, + env=[], + profile=None, + url=None, + no_install=None, + no_wait=None, + fail_if_running=None, + restart=None, + enable_fission=False, + ): + from mozrunner.devices.android_device import ( + verify_android_device, + _get_device, + InstallIntent, + ) from six.moves import shlex_quote - if app == 'org.mozilla.geckoview_example': - activity_name = 'org.mozilla.geckoview_example.GeckoViewActivity' - elif app == 'org.mozilla.geckoview.test': - activity_name = 'org.mozilla.geckoview.test.TestRunnerActivity' - elif 'fennec' in app or 'firefox' in app: - activity_name = 'org.mozilla.gecko.BrowserApp' + if app == "org.mozilla.geckoview_example": + activity_name = "org.mozilla.geckoview_example.GeckoViewActivity" + elif app == "org.mozilla.geckoview.test": + activity_name = "org.mozilla.geckoview.test.TestRunnerActivity" + elif "fennec" in app or "firefox" in app: + activity_name = "org.mozilla.gecko.BrowserApp" else: - raise RuntimeError('Application not recognized: {}'.format(app)) + raise RuntimeError("Application not recognized: {}".format(app)) # `verify_android_device` respects `DEVICE_SERIAL` if it is set and sets it otherwise. - verify_android_device(self, app=app, - install=InstallIntent.NO if no_install else InstallIntent.YES) - device_serial = os.environ.get('DEVICE_SERIAL') + verify_android_device( + self, app=app, install=InstallIntent.NO if no_install else InstallIntent.YES + ) + device_serial = os.environ.get("DEVICE_SERIAL") if not device_serial: - print('No ADB devices connected.') + print("No ADB devices connected.") return 1 device = _get_device(self.substs, device_serial=device_serial) @@ -915,45 +1293,57 @@ class RunProgram(MachCommandBase): host_profile = profile # Always /data/local/tmp, rather than `device.test_root`, because GeckoView only # takes its configuration file from /data/local/tmp, and we want to follow suit. - target_profile = '/data/local/tmp/{}-profile'.format(app) + target_profile = "/data/local/tmp/{}-profile".format(app) device.rm(target_profile, recursive=True, force=True) device.push(host_profile, target_profile) - self.log(logging.INFO, "run", - {'host_profile': host_profile, 'target_profile': target_profile}, - 'Pushed profile from host "{host_profile}" to target "{target_profile}"') + self.log( + logging.INFO, + "run", + {"host_profile": host_profile, "target_profile": target_profile}, + 'Pushed profile from host "{host_profile}" to target "{target_profile}"', + ) else: target_profile = profile - self.log(logging.INFO, "run", - {'target_profile': target_profile}, - 'Using profile from target "{target_profile}"') + self.log( + logging.INFO, + "run", + {"target_profile": target_profile}, + 'Using profile from target "{target_profile}"', + ) - args = ['--profile', shlex_quote(target_profile)] + args = ["--profile", shlex_quote(target_profile)] if enable_fission: - env.append('MOZ_FORCE_ENABLE_FISSION=1') + env.append("MOZ_FORCE_ENABLE_FISSION=1") extras = {} for i, e in enumerate(env): - extras['env{}'.format(i)] = e + extras["env{}".format(i)] = e if args: - extras['args'] = " ".join(args) - extras['use_multiprocess'] = True # Only GVE and TRA process this extra. + extras["args"] = " ".join(args) + extras["use_multiprocess"] = True # Only GVE and TRA process this extra. if env or args: restart = True if restart: fail_if_running = False - self.log(logging.INFO, "run", - {'app': app}, - 'Stopping {app} to ensure clean restart.') + self.log( + logging.INFO, + "run", + {"app": app}, + "Stopping {app} to ensure clean restart.", + ) device.stop_application(app) # We'd prefer to log the actual `am start ...` command, but it's not trivial to wire the # device's logger to mach's logger. - self.log(logging.INFO, "run", - {'app': app, 'activity_name': activity_name}, - 'Starting {app}/{activity_name}.') + self.log( + logging.INFO, + "run", + {"app": app, "activity_name": activity_name}, + "Starting {app}/{activity_name}.", + ) device.launch_application( app_name=app, @@ -962,20 +1352,17 @@ class RunProgram(MachCommandBase): extras=extras, url=url, wait=not no_wait, - fail_if_running=fail_if_running) + fail_if_running=fail_if_running, + ) return 0 def _run_jsshell(self, params, debug, debugger, debugger_args): try: - binpath = self.get_binary_path('app') + binpath = self.get_binary_path("app") except BinaryNotFoundException as e: - self.log(logging.ERROR, 'run', - {'error': str(e)}, - 'ERROR: {error}') - self.log(logging.INFO, 'run', - {'help': e.help()}, - '{help}') + self.log(logging.ERROR, "run", {"error": str(e)}, "ERROR: {error}") + self.log(logging.INFO, "run", {"help": e.help()}, "{help}") return 1 args = [binpath] @@ -984,18 +1371,21 @@ class RunProgram(MachCommandBase): args.extend(params) extra_env = { - 'RUST_BACKTRACE': 'full', + "RUST_BACKTRACE": "full", } if debug or debugger or debugger_args: - if 'INSIDE_EMACS' in os.environ: + if "INSIDE_EMACS" in os.environ: self.log_manager.terminal_handler.setLevel(logging.WARNING) import mozdebug + if not debugger: # No debugger name was provided. Look for the default ones on # current OS. - debugger = mozdebug.get_default_debugger_name(mozdebug.DebuggerSearch.KeepLooking) + debugger = mozdebug.get_default_debugger_name( + mozdebug.DebuggerSearch.KeepLooking + ) if debugger: self.debuggerInfo = mozdebug.get_debugger_info(debugger, debugger_args) @@ -1008,58 +1398,84 @@ class RunProgram(MachCommandBase): # their use. if debugger_args: from mozbuild import shellutil + try: debugger_args = shellutil.split(debugger_args) except shellutil.MetaCharacterException as e: - print("The --debugger-args you passed require a real shell to parse them.") + print( + "The --debugger-args you passed require a real shell to parse them." + ) print("(We can't handle the %r character.)" % e.char) return 1 # Prepend the debugger args. args = [self.debuggerInfo.path] + self.debuggerInfo.args + args - return self.run_process(args=args, ensure_exit_code=False, - pass_thru=True, append_env=extra_env) + return self.run_process( + args=args, ensure_exit_code=False, pass_thru=True, append_env=extra_env + ) - def _run_desktop(self, params, packaged, remote, background, noprofile, - disable_e10s, enable_crash_reporter, enable_fission, setpref, - temp_profile, macos_open, debug, debugger, debugger_args, dmd, - mode, stacks, show_dump_stats): + def _run_desktop( + self, + params, + packaged, + remote, + background, + noprofile, + disable_e10s, + enable_crash_reporter, + enable_fission, + setpref, + temp_profile, + macos_open, + debug, + debugger, + debugger_args, + dmd, + mode, + stacks, + show_dump_stats, + ): from mozprofile import Profile, Preferences try: if packaged: - binpath = self.get_binary_path(where='staged-package') + binpath = self.get_binary_path(where="staged-package") else: - binpath = self.get_binary_path('app') + binpath = self.get_binary_path("app") except BinaryNotFoundException as e: - self.log(logging.ERROR, 'run', - {'error': str(e)}, - 'ERROR: {error}') + self.log(logging.ERROR, "run", {"error": str(e)}, "ERROR: {error}") if packaged: - self.log(logging.INFO, 'run', - {'help': "It looks like your build isn\'t packaged. " - "You can run |./mach package| to package it."}, - '{help}') + self.log( + logging.INFO, + "run", + { + "help": "It looks like your build isn't packaged. " + "You can run |./mach package| to package it." + }, + "{help}", + ) else: - self.log(logging.INFO, 'run', - {'help': e.help()}, - '{help}') + self.log(logging.INFO, "run", {"help": e.help()}, "{help}") return 1 args = [] if macos_open: if debug: - print("The browser can not be launched in the debugger " - "when using the macOS open command.") + print( + "The browser can not be launched in the debugger " + "when using the macOS open command." + ) return 1 try: - m = re.search(r'^.+\.app', binpath) + m = re.search(r"^.+\.app", binpath) apppath = m.group(0) - args = ['open', apppath, '--args'] + args = ["open", apppath, "--args"] except Exception as e: - print("Couldn't get the .app path from the binary path. " - "The macOS open option can only be used on macOS") + print( + "Couldn't get the .app path from the binary path. " + "The macOS open option can only be used on macOS" + ) print(e) return 1 else: @@ -1069,39 +1485,39 @@ class RunProgram(MachCommandBase): args.extend(params) if not remote: - args.append('-no-remote') + args.append("-no-remote") - if not background and sys.platform == 'darwin': - args.append('-foreground') + if not background and sys.platform == "darwin": + args.append("-foreground") - if sys.platform.startswith('win') and \ - 'MOZ_LAUNCHER_PROCESS' in self.defines: - args.append('-wait-for-browser') + if sys.platform.startswith("win") and "MOZ_LAUNCHER_PROCESS" in self.defines: + args.append("-wait-for-browser") - no_profile_option_given = \ - all(p not in params for p in ['-profile', '--profile', '-P']) + no_profile_option_given = all( + p not in params for p in ["-profile", "--profile", "-P"] + ) if no_profile_option_given and not noprofile: prefs = { - 'browser.aboutConfig.showWarning': False, - 'browser.shell.checkDefaultBrowser': False, - 'general.warnOnAboutConfig': False, + "browser.aboutConfig.showWarning": False, + "browser.shell.checkDefaultBrowser": False, + "general.warnOnAboutConfig": False, } prefs.update(self._mach_context.settings.runprefs) - prefs.update([p.split('=', 1) for p in setpref]) + prefs.update([p.split("=", 1) for p in setpref]) for pref in prefs: prefs[pref] = Preferences.cast(prefs[pref]) - tmpdir = os.path.join(self.topobjdir, 'tmp') + tmpdir = os.path.join(self.topobjdir, "tmp") if not os.path.exists(tmpdir): os.makedirs(tmpdir) - if (temp_profile): - path = tempfile.mkdtemp(dir=tmpdir, prefix='profile-') + if temp_profile: + path = tempfile.mkdtemp(dir=tmpdir, prefix="profile-") else: - path = os.path.join(tmpdir, 'profile-default') + path = os.path.join(tmpdir, "profile-default") profile = Profile(path, preferences=prefs) - args.append('-profile') + args.append("-profile") args.append(profile.profile) if not no_profile_option_given and setpref: @@ -1112,10 +1528,11 @@ class RunProgram(MachCommandBase): # The profile name may be non-ascii, but come from the # commandline as str, so convert here with a better guess at # an encoding than the default. - encoding = (sys.getfilesystemencoding() or - sys.getdefaultencoding()) - args = [unicode(a, encoding) if not isinstance(a, unicode) else a - for a in args] + encoding = sys.getfilesystemencoding() or sys.getdefaultencoding() + args = [ + unicode(a, encoding) if not isinstance(a, unicode) else a + for a in args + ] some_debugging_option = debug or debugger or debugger_args @@ -1129,39 +1546,49 @@ class RunProgram(MachCommandBase): # -attach-console just ends us up with output that gets relayed via mach. # We shouldn't override the user using -console. For more info, see # https://bugzilla.mozilla.org/show_bug.cgi?id=1257155 - if sys.platform.startswith('win') and not some_debugging_option and \ - '-console' not in args and '--console' not in args and \ - '-attach-console' not in args and '--attach-console' not in args: - args.append('-attach-console') + if ( + sys.platform.startswith("win") + and not some_debugging_option + and "-console" not in args + and "--console" not in args + and "-attach-console" not in args + and "--attach-console" not in args + ): + args.append("-attach-console") extra_env = { - 'MOZ_DEVELOPER_REPO_DIR': self.topsrcdir, - 'MOZ_DEVELOPER_OBJ_DIR': self.topobjdir, - 'RUST_BACKTRACE': 'full', + "MOZ_DEVELOPER_REPO_DIR": self.topsrcdir, + "MOZ_DEVELOPER_OBJ_DIR": self.topobjdir, + "RUST_BACKTRACE": "full", } if not enable_crash_reporter: - extra_env['MOZ_CRASHREPORTER_DISABLE'] = '1' + extra_env["MOZ_CRASHREPORTER_DISABLE"] = "1" else: - extra_env['MOZ_CRASHREPORTER'] = '1' + extra_env["MOZ_CRASHREPORTER"] = "1" if disable_e10s: - version_file = os.path.join(self.topsrcdir, 'browser', 'config', 'version.txt') - f = open(version_file, 'r') - extra_env['MOZ_FORCE_DISABLE_E10S'] = f.read().strip() + version_file = os.path.join( + self.topsrcdir, "browser", "config", "version.txt" + ) + f = open(version_file, "r") + extra_env["MOZ_FORCE_DISABLE_E10S"] = f.read().strip() if enable_fission: - extra_env['MOZ_FORCE_ENABLE_FISSION'] = '1' + extra_env["MOZ_FORCE_ENABLE_FISSION"] = "1" if some_debugging_option: - if 'INSIDE_EMACS' in os.environ: + if "INSIDE_EMACS" in os.environ: self.log_manager.terminal_handler.setLevel(logging.WARNING) import mozdebug + if not debugger: # No debugger name was provided. Look for the default ones on # current OS. - debugger = mozdebug.get_default_debugger_name(mozdebug.DebuggerSearch.KeepLooking) + debugger = mozdebug.get_default_debugger_name( + mozdebug.DebuggerSearch.KeepLooking + ) if debugger: self.debuggerInfo = mozdebug.get_debugger_info(debugger, debugger_args) @@ -1174,10 +1601,13 @@ class RunProgram(MachCommandBase): # their use. if debugger_args: from mozbuild import shellutil + try: debugger_args = shellutil.split(debugger_args) except shellutil.MetaCharacterException as e: - print("The --debugger-args you passed require a real shell to parse them.") + print( + "The --debugger-args you passed require a real shell to parse them." + ) print("(We can't handle the %r character.)" % e.char) return 1 @@ -1188,49 +1618,62 @@ class RunProgram(MachCommandBase): dmd_params = [] if mode: - dmd_params.append('--mode=' + mode) + dmd_params.append("--mode=" + mode) if stacks: - dmd_params.append('--stacks=' + stacks) + dmd_params.append("--stacks=" + stacks) if show_dump_stats: - dmd_params.append('--show-dump-stats=yes') + dmd_params.append("--show-dump-stats=yes") if dmd_params: - extra_env['DMD'] = ' '.join(dmd_params) + extra_env["DMD"] = " ".join(dmd_params) else: - extra_env['DMD'] = '1' + extra_env["DMD"] = "1" - return self.run_process(args=args, ensure_exit_code=False, - pass_thru=True, append_env=extra_env) + return self.run_process( + args=args, ensure_exit_code=False, pass_thru=True, append_env=extra_env + ) @CommandProvider class Buildsymbols(MachCommandBase): """Produce a package of debug symbols suitable for use with Breakpad.""" - @Command('buildsymbols', category='post-build', - description='Produce a package of Breakpad-format symbols.') + @Command( + "buildsymbols", + category="post-build", + description="Produce a package of Breakpad-format symbols.", + ) def buildsymbols(self): - return self._run_make(directory=".", target='buildsymbols', ensure_exit_code=False) + return self._run_make( + directory=".", target="buildsymbols", ensure_exit_code=False + ) @CommandProvider class MachDebug(MachCommandBase): - @Command('environment', category='build-dev', - description='Show info about the mach and build environment.') - @CommandArgument('--format', default='pretty', - choices=['pretty', 'json'], - help='Print data in the given format.') - @CommandArgument('--output', '-o', type=str, - help='Output to the given file.') - @CommandArgument('--verbose', '-v', action='store_true', - help='Print verbose output.') + @Command( + "environment", + category="build-dev", + description="Show info about the mach and build environment.", + ) + @CommandArgument( + "--format", + default="pretty", + choices=["pretty", "json"], + help="Print data in the given format.", + ) + @CommandArgument("--output", "-o", type=str, help="Output to the given file.") + @CommandArgument( + "--verbose", "-v", action="store_true", help="Print verbose output." + ) def environment(self, format, output=None, verbose=False): - func = getattr(self, '_environment_%s' % format.replace('.', '_')) + func = getattr(self, "_environment_%s" % format.replace(".", "_")) if output: # We want to preserve mtimes if the output file already exists # and the content hasn't changed. from mozbuild.util import FileAvoidWrite + with FileAvoidWrite(output) as out: return func(out, verbose) return func(sys.stdout, verbose) @@ -1238,32 +1681,33 @@ class MachDebug(MachCommandBase): def _environment_pretty(self, out, verbose): state_dir = self._mach_context.state_dir import platform - print('platform:\n\t%s' % platform.platform(), file=out) - print('python version:\n\t%s' % sys.version, file=out) - print('python prefix:\n\t%s' % sys.prefix, file=out) - print('mach cwd:\n\t%s' % self._mach_context.cwd, file=out) - print('os cwd:\n\t%s' % os.getcwd(), file=out) - print('mach directory:\n\t%s' % self._mach_context.topdir, file=out) - print('state directory:\n\t%s' % state_dir, file=out) - print('object directory:\n\t%s' % self.topobjdir, file=out) + print("platform:\n\t%s" % platform.platform(), file=out) + print("python version:\n\t%s" % sys.version, file=out) + print("python prefix:\n\t%s" % sys.prefix, file=out) + print("mach cwd:\n\t%s" % self._mach_context.cwd, file=out) + print("os cwd:\n\t%s" % os.getcwd(), file=out) + print("mach directory:\n\t%s" % self._mach_context.topdir, file=out) + print("state directory:\n\t%s" % state_dir, file=out) - if self.mozconfig['path']: - print('mozconfig path:\n\t%s' % self.mozconfig['path'], file=out) - if self.mozconfig['configure_args']: - print('mozconfig configure args:', file=out) - for arg in self.mozconfig['configure_args']: - print('\t%s' % arg, file=out) + print("object directory:\n\t%s" % self.topobjdir, file=out) - if self.mozconfig['make_extra']: - print('mozconfig extra make args:', file=out) - for arg in self.mozconfig['make_extra']: - print('\t%s' % arg, file=out) + if self.mozconfig["path"]: + print("mozconfig path:\n\t%s" % self.mozconfig["path"], file=out) + if self.mozconfig["configure_args"]: + print("mozconfig configure args:", file=out) + for arg in self.mozconfig["configure_args"]: + print("\t%s" % arg, file=out) - if self.mozconfig['make_flags']: - print('mozconfig make flags:', file=out) - for arg in self.mozconfig['make_flags']: - print('\t%s' % arg, file=out) + if self.mozconfig["make_extra"]: + print("mozconfig extra make args:", file=out) + for arg in self.mozconfig["make_extra"]: + print("\t%s" % arg, file=out) + + if self.mozconfig["make_flags"]: + print("mozconfig make flags:", file=out) + for arg in self.mozconfig["make_flags"]: + print("\t%s" % arg, file=out) config = None @@ -1274,17 +1718,17 @@ class MachDebug(MachCommandBase): pass if config: - print('config topsrcdir:\n\t%s' % config.topsrcdir, file=out) - print('config topobjdir:\n\t%s' % config.topobjdir, file=out) + print("config topsrcdir:\n\t%s" % config.topsrcdir, file=out) + print("config topobjdir:\n\t%s" % config.topobjdir, file=out) if verbose: - print('config substitutions:', file=out) + print("config substitutions:", file=out) for k in sorted(config.substs): - print('\t%s: %s' % (k, config.substs[k]), file=out) + print("\t%s: %s" % (k, config.substs[k]), file=out) - print('config defines:', file=out) + print("config defines:", file=out) for k in sorted(config.defines): - print('\t%s' % k, file=out) + print("\t%s" % k, file=out) def _environment_json(self, out, verbose): import json @@ -1293,70 +1737,101 @@ class MachDebug(MachCommandBase): def default(self, obj): if isinstance(obj, MozbuildObject): result = { - 'topsrcdir': obj.topsrcdir, - 'topobjdir': obj.topobjdir, - 'mozconfig': obj.mozconfig, + "topsrcdir": obj.topsrcdir, + "topobjdir": obj.topobjdir, + "mozconfig": obj.mozconfig, } if verbose: - result['substs'] = obj.substs - result['defines'] = obj.defines + result["substs"] = obj.substs + result["defines"] = obj.defines return result elif isinstance(obj, set): return list(obj) return json.JSONEncoder.default(self, obj) + json.dump(self, cls=EnvironmentEncoder, sort_keys=True, fp=out) @CommandProvider class Repackage(MachCommandBase): - '''Repackages artifacts into different formats. + """Repackages artifacts into different formats. This is generally used after packages are signed by the signing scriptworkers in order to bundle things up into shippable formats, such as a .dmg on OSX or an installer exe on Windows. - ''' - @Command('repackage', category='misc', - description='Repackage artifacts into different formats.') + """ + + @Command( + "repackage", + category="misc", + description="Repackage artifacts into different formats.", + ) def repackage(self): print("Usage: ./mach repackage [dmg|installer|mar] [args...]") - @SubCommand('repackage', 'dmg', - description='Repackage a tar file into a .dmg for OSX') - @CommandArgument('--input', '-i', type=str, required=True, - help='Input filename') - @CommandArgument('--output', '-o', type=str, required=True, - help='Output filename') + @SubCommand( + "repackage", "dmg", description="Repackage a tar file into a .dmg for OSX" + ) + @CommandArgument("--input", "-i", type=str, required=True, help="Input filename") + @CommandArgument("--output", "-o", type=str, required=True, help="Output filename") def repackage_dmg(self, input, output): if not os.path.exists(input): - print('Input file does not exist: %s' % input) + print("Input file does not exist: %s" % input) return 1 - if not os.path.exists(os.path.join(self.topobjdir, 'config.status')): - print('config.status not found. Please run |mach configure| ' - 'prior to |mach repackage|.') + if not os.path.exists(os.path.join(self.topobjdir, "config.status")): + print( + "config.status not found. Please run |mach configure| " + "prior to |mach repackage|." + ) return 1 from mozbuild.repackaging.dmg import repackage_dmg + repackage_dmg(input, output) - @SubCommand('repackage', 'installer', - description='Repackage into a Windows installer exe') - @CommandArgument('--tag', type=str, required=True, - help='The .tag file used to build the installer') - @CommandArgument('--setupexe', type=str, required=True, - help='setup.exe file inside the installer') - @CommandArgument('--package', type=str, required=False, - help='Optional package .zip for building a full installer') - @CommandArgument('--output', '-o', type=str, required=True, - help='Output filename') - @CommandArgument('--package-name', type=str, required=False, - help='Name of the package being rebuilt') - @CommandArgument('--sfx-stub', type=str, required=True, - help='Path to the self-extraction stub.') - @CommandArgument('--use-upx', required=False, action='store_true', - help='Run UPX on the self-extraction stub.') - def repackage_installer(self, tag, setupexe, package, output, package_name, sfx_stub, use_upx): + @SubCommand( + "repackage", "installer", description="Repackage into a Windows installer exe" + ) + @CommandArgument( + "--tag", + type=str, + required=True, + help="The .tag file used to build the installer", + ) + @CommandArgument( + "--setupexe", + type=str, + required=True, + help="setup.exe file inside the installer", + ) + @CommandArgument( + "--package", + type=str, + required=False, + help="Optional package .zip for building a full installer", + ) + @CommandArgument("--output", "-o", type=str, required=True, help="Output filename") + @CommandArgument( + "--package-name", + type=str, + required=False, + help="Name of the package being rebuilt", + ) + @CommandArgument( + "--sfx-stub", type=str, required=True, help="Path to the self-extraction stub." + ) + @CommandArgument( + "--use-upx", + required=False, + action="store_true", + help="Run UPX on the self-extraction stub.", + ) + def repackage_installer( + self, tag, setupexe, package, output, package_name, sfx_stub, use_upx + ): from mozbuild.repackaging.installer import repackage_installer + repackage_installer( topsrcdir=self.topsrcdir, tag=tag, @@ -1368,26 +1843,38 @@ class Repackage(MachCommandBase): use_upx=use_upx, ) - @SubCommand('repackage', 'msi', - description='Repackage into a MSI') - @CommandArgument('--wsx', type=str, required=True, - help='The wsx file used to build the installer') - @CommandArgument('--version', type=str, required=True, - help='The Firefox version used to create the installer') - @CommandArgument('--locale', type=str, required=True, - help='The locale of the installer') - @CommandArgument('--arch', type=str, required=True, - help='The archtecture you are building.') - @CommandArgument('--setupexe', type=str, required=True, - help='setup.exe installer') - @CommandArgument('--candle', type=str, required=False, - help='location of candle binary') - @CommandArgument('--light', type=str, required=False, - help='location of light binary') - @CommandArgument('--output', '-o', type=str, required=True, - help='Output filename') - def repackage_msi(self, wsx, version, locale, arch, setupexe, candle, light, output): + @SubCommand("repackage", "msi", description="Repackage into a MSI") + @CommandArgument( + "--wsx", + type=str, + required=True, + help="The wsx file used to build the installer", + ) + @CommandArgument( + "--version", + type=str, + required=True, + help="The Firefox version used to create the installer", + ) + @CommandArgument( + "--locale", type=str, required=True, help="The locale of the installer" + ) + @CommandArgument( + "--arch", type=str, required=True, help="The archtecture you are building." + ) + @CommandArgument("--setupexe", type=str, required=True, help="setup.exe installer") + @CommandArgument( + "--candle", type=str, required=False, help="location of candle binary" + ) + @CommandArgument( + "--light", type=str, required=False, help="location of light binary" + ) + @CommandArgument("--output", "-o", type=str, required=True, help="Output filename") + def repackage_msi( + self, wsx, version, locale, arch, setupexe, candle, light, output + ): from mozbuild.repackaging.msi import repackage_msi + repackage_msi( topsrcdir=self.topsrcdir, wsx=wsx, @@ -1400,20 +1887,17 @@ class Repackage(MachCommandBase): output=output, ) - @SubCommand('repackage', 'mar', - description='Repackage into complete MAR file') - @CommandArgument('--input', '-i', type=str, required=True, - help='Input filename') - @CommandArgument('--mar', type=str, required=True, - help='Mar binary path') - @CommandArgument('--output', '-o', type=str, required=True, - help='Output filename') - @CommandArgument('--arch', type=str, required=True, - help='The archtecture you are building.') - @CommandArgument('--mar-channel-id', type=str, - help='Mar channel id') + @SubCommand("repackage", "mar", description="Repackage into complete MAR file") + @CommandArgument("--input", "-i", type=str, required=True, help="Input filename") + @CommandArgument("--mar", type=str, required=True, help="Mar binary path") + @CommandArgument("--output", "-o", type=str, required=True, help="Output filename") + @CommandArgument( + "--arch", type=str, required=True, help="The archtecture you are building." + ) + @CommandArgument("--mar-channel-id", type=str, help="Mar channel id") def repackage_mar(self, input, mar, output, arch, mar_channel_id): from mozbuild.repackaging.mar import repackage_mar + repackage_mar( self.topsrcdir, input, @@ -1425,91 +1909,137 @@ class Repackage(MachCommandBase): @SettingsProvider -class TelemetrySettings(): +class TelemetrySettings: config_settings = [ - ('build.telemetry', 'boolean', """ + ( + "build.telemetry", + "boolean", + """ Enable submission of build system telemetry. - """.strip(), False), + """.strip(), + False, + ), ] @CommandProvider class L10NCommands(MachCommandBase): - @Command('package-multi-locale', category='post-build', - description='Package a multi-locale version of the built product ' - 'for distribution as an APK, DMG, etc.') - @CommandArgument('--locales', metavar='LOCALES', nargs='+', - required=True, - help='List of locales to package, including "en-US"') - @CommandArgument('--verbose', action='store_true', - help='Log informative status messages.') + @Command( + "package-multi-locale", + category="post-build", + description="Package a multi-locale version of the built product " + "for distribution as an APK, DMG, etc.", + ) + @CommandArgument( + "--locales", + metavar="LOCALES", + nargs="+", + required=True, + help='List of locales to package, including "en-US"', + ) + @CommandArgument( + "--verbose", action="store_true", help="Log informative status messages." + ) def package_l10n(self, verbose=False, locales=[]): - if 'RecursiveMake' not in self.substs['BUILD_BACKENDS']: - print('Artifact builds do not support localization. ' - 'If you know what you are doing, you can use:\n' - 'ac_add_options --disable-compile-environment\n' - 'export BUILD_BACKENDS=FasterMake,RecursiveMake\n' - 'in your mozconfig.') + if "RecursiveMake" not in self.substs["BUILD_BACKENDS"]: + print( + "Artifact builds do not support localization. " + "If you know what you are doing, you can use:\n" + "ac_add_options --disable-compile-environment\n" + "export BUILD_BACKENDS=FasterMake,RecursiveMake\n" + "in your mozconfig." + ) return 1 - if 'en-US' not in locales: - self.log(logging.WARN, 'package-multi-locale', {'locales': locales}, - 'List of locales does not include default locale "en-US": ' - '{locales}; adding "en-US"') - locales.append('en-US') + if "en-US" not in locales: + self.log( + logging.WARN, + "package-multi-locale", + {"locales": locales}, + 'List of locales does not include default locale "en-US": ' + '{locales}; adding "en-US"', + ) + locales.append("en-US") locales = list(sorted(locales)) append_env = { # We are only (re-)packaging, we don't want to (re-)build # anything inside Gradle. - 'GRADLE_INVOKED_WITHIN_MACH_BUILD': '1', - 'MOZ_CHROME_MULTILOCALE': ' '.join(locales), + "GRADLE_INVOKED_WITHIN_MACH_BUILD": "1", + "MOZ_CHROME_MULTILOCALE": " ".join(locales), } for locale in locales: - if locale == 'en-US': - self.log(logging.INFO, 'package-multi-locale', {'locale': locale}, - 'Skipping default locale {locale}') + if locale == "en-US": + self.log( + logging.INFO, + "package-multi-locale", + {"locale": locale}, + "Skipping default locale {locale}", + ) continue - self.log(logging.INFO, 'package-multi-locale', {'locale': locale}, - 'Processing chrome Gecko resources for locale {locale}') + self.log( + logging.INFO, + "package-multi-locale", + {"locale": locale}, + "Processing chrome Gecko resources for locale {locale}", + ) self.run_process( - [mozpath.join(self.topsrcdir, 'mach'), 'build', 'chrome-{}'.format(locale)], + [ + mozpath.join(self.topsrcdir, "mach"), + "build", + "chrome-{}".format(locale), + ], append_env=append_env, pass_thru=True, ensure_exit_code=True, - cwd=mozpath.join(self.topsrcdir)) + cwd=mozpath.join(self.topsrcdir), + ) - if self.substs['MOZ_BUILD_APP'] == 'mobile/android': - self.log(logging.INFO, 'package-multi-locale', {}, - 'Invoking `mach android assemble-app`') + if self.substs["MOZ_BUILD_APP"] == "mobile/android": + self.log( + logging.INFO, + "package-multi-locale", + {}, + "Invoking `mach android assemble-app`", + ) self.run_process( - [mozpath.join(self.topsrcdir, 'mach'), 'android', 'assemble-app'], + [mozpath.join(self.topsrcdir, "mach"), "android", "assemble-app"], append_env=append_env, pass_thru=True, ensure_exit_code=True, - cwd=mozpath.join(self.topsrcdir)) + cwd=mozpath.join(self.topsrcdir), + ) - self.log(logging.INFO, 'package-multi-locale', {}, - 'Invoking multi-locale `mach package`') + self.log( + logging.INFO, + "package-multi-locale", + {}, + "Invoking multi-locale `mach package`", + ) self._run_make( directory=self.topobjdir, - target=['package', 'AB_CD=multi'], + target=["package", "AB_CD=multi"], append_env=append_env, pass_thru=True, - ensure_exit_code=True) + ensure_exit_code=True, + ) - if self.substs['MOZ_BUILD_APP'] == 'mobile/android': - self.log(logging.INFO, 'package-multi-locale', {}, - 'Invoking `mach android archive-geckoview`') + if self.substs["MOZ_BUILD_APP"] == "mobile/android": + self.log( + logging.INFO, + "package-multi-locale", + {}, + "Invoking `mach android archive-geckoview`", + ) self.run_process( - [mozpath.join(self.topsrcdir, 'mach'), 'android', - 'archive-geckoview'], + [mozpath.join(self.topsrcdir, "mach"), "android", "archive-geckoview"], append_env=append_env, pass_thru=True, ensure_exit_code=True, - cwd=mozpath.join(self.topsrcdir)) + cwd=mozpath.join(self.topsrcdir), + ) return 0 @@ -1518,16 +2048,22 @@ class L10NCommands(MachCommandBase): class CreateMachEnvironment(MachCommandBase): """Create the mach virtualenvs.""" - @Command('create-mach-environment', category='devenv', - description=( - 'Create the `mach` virtualenvs. If executed with python3 (the ' - 'default when entering from `mach`), create both a python3 ' - 'and python2.7 virtualenv. If executed with python2, only ' - 'create the python2.7 virtualenv.')) + @Command( + "create-mach-environment", + category="devenv", + description=( + "Create the `mach` virtualenvs. If executed with python3 (the " + "default when entering from `mach`), create both a python3 " + "and python2.7 virtualenv. If executed with python2, only " + "create the python2.7 virtualenv." + ), + ) @CommandArgument( - '-f', '--force', action='store_true', - help=('Force re-creating the virtualenv even if it is already ' - 'up-to-date.')) + "-f", + "--force", + action="store_true", + help=("Force re-creating the virtualenv even if it is already " "up-to-date."), + ) def create_mach_environment(self, force=False): from mozboot.util import get_mach_virtualenv_root from mozbuild.pythonutil import find_python2_executable @@ -1536,47 +2072,59 @@ class CreateMachEnvironment(MachCommandBase): virtualenv_path = get_mach_virtualenv_root(py2=PY2) if sys.executable.startswith(virtualenv_path): - print('You can only create a mach environment with the system ' - 'Python. Re-run this `mach` command with the system Python.', - file=sys.stderr) + print( + "You can only create a mach environment with the system " + "Python. Re-run this `mach` command with the system Python.", + file=sys.stderr, + ) return 1 manager = VirtualenvManager( - self.topsrcdir, virtualenv_path, sys.stdout, - os.path.join(self.topsrcdir, 'build', - 'mach_virtualenv_packages.txt'), - populate_local_paths=False) + self.topsrcdir, + virtualenv_path, + sys.stdout, + os.path.join(self.topsrcdir, "build", "mach_virtualenv_packages.txt"), + populate_local_paths=False, + ) if manager.up_to_date(sys.executable) and not force: - print('virtualenv at %s is already up to date.' % virtualenv_path) + print("virtualenv at %s is already up to date." % virtualenv_path) else: manager.build(sys.executable) - manager.install_pip_package('zstandard>=0.9.0,<=0.13.0') + manager.install_pip_package("zstandard>=0.9.0,<=0.13.0") if not PY2: # This can fail on some platforms. See # https://bugzilla.mozilla.org/show_bug.cgi?id=1660120 try: - manager.install_pip_package('glean_sdk~=32.3.1') + manager.install_pip_package("glean_sdk~=32.3.1") except subprocess.CalledProcessError: - print('Could not install glean_sdk, so telemetry will not be ' - 'collected. Continuing.') - print('Python 3 mach environment created.') + print( + "Could not install glean_sdk, so telemetry will not be " + "collected. Continuing." + ) + print("Python 3 mach environment created.") python2, _ = find_python2_executable() if not python2: - print('WARNING! Could not find a Python 2 executable to create ' - 'a Python 2 virtualenv', file=sys.stderr) + print( + "WARNING! Could not find a Python 2 executable to create " + "a Python 2 virtualenv", + file=sys.stderr, + ) return 0 args = [ - python2, os.path.join(self.topsrcdir, 'mach'), - 'create-mach-environment' + python2, + os.path.join(self.topsrcdir, "mach"), + "create-mach-environment", ] if force: - args.append('-f') + args.append("-f") ret = subprocess.call(args) if ret: - print('WARNING! Failed to create a Python 2 mach environment.', - file=sys.stderr) + print( + "WARNING! Failed to create a Python 2 mach environment.", + file=sys.stderr, + ) else: - print('Python 2 mach environment created.') + print("Python 2 mach environment created.") diff --git a/python/mozbuild/mozbuild/telemetry.py b/python/mozbuild/mozbuild/telemetry.py index 1046836fee66..fb8a82bbac1c 100644 --- a/python/mozbuild/mozbuild/telemetry.py +++ b/python/mozbuild/mozbuild/telemetry.py @@ -4,10 +4,10 @@ from __future__ import division, absolute_import, print_function, unicode_literals -''' +""" This file contains a voluptuous schema definition for build system telemetry, and functions to fill an instance of that schema for a single mach invocation. -''' +""" import json import os @@ -31,104 +31,150 @@ import mozpack.path as mozpath from .base import BuildEnvironmentNotFoundException from .configure.constants import CompilerType -schema = Schema({ - Required('client_id', description='A UUID to uniquely identify a client'): Any(*string_types), - Required('time', description='Time at which this event happened'): Datetime(), - Required('command', description='The mach command that was invoked'): Any(*string_types), - Required('argv', description=( - 'Full mach commandline. ' + - 'If the commandline contains ' + - 'absolute paths they will be sanitized.')): [Any(*string_types)], - Required('success', description='true if the command succeeded'): bool, - Optional('exception', description=( - 'If a Python exception was encountered during the execution ' + - 'of the command, this value contains the result of calling `repr` ' + - 'on the exception object.')): Any(*string_types), - Optional('file_types_changed', description=( - 'This array contains a list of objects with {ext, count} properties giving the count ' + - 'of files changed since the last invocation grouped by file type')): [ +schema = Schema( + { + Required("client_id", description="A UUID to uniquely identify a client"): Any( + *string_types + ), + Required("time", description="Time at which this event happened"): Datetime(), + Required("command", description="The mach command that was invoked"): Any( + *string_types + ), + Required( + "argv", + description=( + "Full mach commandline. " + + "If the commandline contains " + + "absolute paths they will be sanitized." + ), + ): [Any(*string_types)], + Required("success", description="true if the command succeeded"): bool, + Optional( + "exception", + description=( + "If a Python exception was encountered during the execution " + + "of the command, this value contains the result of calling `repr` " + + "on the exception object." + ), + ): Any(*string_types), + Optional( + "file_types_changed", + description=( + "This array contains a list of objects with {ext, count} properties giving the " + + "count of files changed since the last invocation grouped by file type" + ), + ): [ { - Required('ext', description='File extension'): Any(*string_types), - Required('count', description='Count of changed files with this extension'): int, + Required("ext", description="File extension"): Any(*string_types), + Required( + "count", description="Count of changed files with this extension" + ): int, } ], - Required('duration_ms', description='Command duration in milliseconds'): int, - Required('build_opts', description='Selected build options'): { - Optional('compiler', description='The compiler type in use (CC_TYPE)'): - Any(*CompilerType.POSSIBLE_VALUES), - Optional('artifact', description='true if --enable-artifact-builds'): bool, - Optional('debug', description='true if build is debug (--enable-debug)'): bool, - Optional('opt', description='true if build is optimized (--enable-optimize)'): bool, - Optional('ccache', description='true if ccache is in use (--with-ccache)'): bool, - Optional('sccache', description='true if ccache in use is sccache'): bool, - Optional('icecream', description='true if icecream in use'): bool, - }, - Optional('build_attrs', description='Attributes characterizing a build'): { - Optional('cpu_percent', description='cpu utilization observed during a build'): int, - Optional('clobber', description='true if the build was a clobber/full build'): bool, - }, - Required('system'): { - # We don't need perfect granularity here. - Required('os', description='Operating system'): Any('windows', 'macos', 'linux', 'other'), - Optional('cpu_brand', description='CPU brand string from CPUID'): Any(*string_types), - Optional('logical_cores', description='Number of logical CPU cores present'): int, - Optional('physical_cores', description='Number of physical CPU cores present'): int, - Optional('memory_gb', description='System memory in GB'): int, - Optional('drive_is_ssd', - description='true if the source directory is on a solid-state disk'): bool, - Optional('virtual_machine', - description='true if the OS appears to be running in a virtual machine'): bool, - }, -}) + Required("duration_ms", description="Command duration in milliseconds"): int, + Required("build_opts", description="Selected build options"): { + Optional("compiler", description="The compiler type in use (CC_TYPE)"): Any( + *CompilerType.POSSIBLE_VALUES + ), + Optional("artifact", description="true if --enable-artifact-builds"): bool, + Optional( + "debug", description="true if build is debug (--enable-debug)" + ): bool, + Optional( + "opt", description="true if build is optimized (--enable-optimize)" + ): bool, + Optional( + "ccache", description="true if ccache is in use (--with-ccache)" + ): bool, + Optional("sccache", description="true if ccache in use is sccache"): bool, + Optional("icecream", description="true if icecream in use"): bool, + }, + Optional("build_attrs", description="Attributes characterizing a build"): { + Optional( + "cpu_percent", description="cpu utilization observed during a build" + ): int, + Optional( + "clobber", description="true if the build was a clobber/full build" + ): bool, + }, + Required("system"): { + # We don't need perfect granularity here. + Required("os", description="Operating system"): Any( + "windows", "macos", "linux", "other" + ), + Optional("cpu_brand", description="CPU brand string from CPUID"): Any( + *string_types + ), + Optional( + "logical_cores", description="Number of logical CPU cores present" + ): int, + Optional( + "physical_cores", description="Number of physical CPU cores present" + ): int, + Optional("memory_gb", description="System memory in GB"): int, + Optional( + "drive_is_ssd", + description="true if the source directory is on a solid-state disk", + ): bool, + Optional( + "virtual_machine", + description="true if the OS appears to be running in a virtual machine", + ): bool, + }, + } +) def get_client_id(state_dir): - ''' + """ Get a client id, which is a UUID, from a file in the state directory. If the file doesn't exist, generate a UUID and save it to a file. - ''' - path = os.path.join(state_dir, 'telemetry_client_id.json') + """ + path = os.path.join(state_dir, "telemetry_client_id.json") if os.path.exists(path): - with open(path, 'r') as f: - return json.load(f)['client_id'] + with open(path, "r") as f: + return json.load(f)["client_id"] import uuid + # uuid4 is random, other uuid types may include identifiers from the local system. client_id = str(uuid.uuid4()) if PY3: - file_mode = 'w' + file_mode = "w" else: - file_mode = 'wb' + file_mode = "wb" with open(path, file_mode) as f: - json.dump({'client_id': client_id}, f) + json.dump({"client_id": client_id}, f) return client_id def cpu_brand_linux(): - ''' + """ Read the CPU brand string out of /proc/cpuinfo on Linux. - ''' - with open('/proc/cpuinfo', 'r') as f: + """ + with open("/proc/cpuinfo", "r") as f: for line in f: - if line.startswith('model name'): - _, brand = line.split(': ', 1) + if line.startswith("model name"): + _, brand = line.split(": ", 1) return brand.rstrip() # not found? return None def cpu_brand_windows(): - ''' + """ Read the CPU brand string from the registry on Windows. - ''' + """ try: import _winreg except ImportError: import winreg as _winreg try: - h = _winreg.OpenKey(_winreg.HKEY_LOCAL_MACHINE, - r'HARDWARE\DESCRIPTION\System\CentralProcessor\0') - (brand, ty) = _winreg.QueryValueEx(h, 'ProcessorNameString') + h = _winreg.OpenKey( + _winreg.HKEY_LOCAL_MACHINE, + r"HARDWARE\DESCRIPTION\System\CentralProcessor\0", + ) + (brand, ty) = _winreg.QueryValueEx(h, "ProcessorNameString") if ty == _winreg.REG_SZ: return brand except WindowsError: @@ -137,23 +183,26 @@ def cpu_brand_windows(): def cpu_brand_mac(): - ''' + """ Get the CPU brand string via sysctl on macos. - ''' + """ import ctypes import ctypes.util libc = ctypes.cdll.LoadLibrary(ctypes.util.find_library("c")) # First, find the required buffer size. bufsize = ctypes.c_size_t(0) - result = libc.sysctlbyname(b'machdep.cpu.brand_string', None, ctypes.byref(bufsize), - None, 0) + result = libc.sysctlbyname( + b"machdep.cpu.brand_string", None, ctypes.byref(bufsize), None, 0 + ) if result != 0: return None bufsize.value += 1 buf = ctypes.create_string_buffer(bufsize.value) # Now actually get the value. - result = libc.sysctlbyname(b'machdep.cpu.brand_string', buf, ctypes.byref(bufsize), None, 0) + result = libc.sysctlbyname( + b"machdep.cpu.brand_string", buf, ctypes.byref(bufsize), None, 0 + ) if result != 0: return None @@ -161,30 +210,30 @@ def cpu_brand_mac(): def get_cpu_brand(): - ''' + """ Get the CPU brand string as returned by CPUID. - ''' + """ return { - 'Linux': cpu_brand_linux, - 'Windows': cpu_brand_windows, - 'Darwin': cpu_brand_mac, + "Linux": cpu_brand_linux, + "Windows": cpu_brand_windows, + "Darwin": cpu_brand_mac, }.get(platform.system(), lambda: None)() def get_os_name(): return { - 'Linux': 'linux', - 'Windows': 'windows', - 'Darwin': 'macos', - }.get(platform.system(), 'other') + "Linux": "linux", + "Windows": "windows", + "Darwin": "macos", + }.get(platform.system(), "other") def get_psutil_stats(): - '''Return whether psutil exists and its associated stats. + """Return whether psutil exists and its associated stats. @returns (bool, int, int, int) whether psutil exists, the logical CPU count, physical CPU count, and total number of bytes of memory. - ''' + """ try: import psutil @@ -192,91 +241,93 @@ def get_psutil_stats(): True, psutil.cpu_count(), psutil.cpu_count(logical=False), - psutil.virtual_memory().total) + psutil.virtual_memory().total, + ) except ImportError: return False, None, None, None def get_system_info(): - ''' + """ Gather info to fill the `system` keys in the schema. - ''' + """ # Normalize OS names a bit, and bucket non-tier-1 platforms into "other". has_psutil, logical_cores, physical_cores, memory_total = get_psutil_stats() info = { - 'os': get_os_name(), + "os": get_os_name(), } if has_psutil: # `total` on Linux is gathered from /proc/meminfo's `MemTotal`, which is the # total amount of physical memory minus some kernel usage, so round up to the # nearest GB to get a sensible answer. - info['memory_gb'] = int(math.ceil(float(memory_total) / (1024 * 1024 * 1024))) - info['logical_cores'] = logical_cores + info["memory_gb"] = int(math.ceil(float(memory_total) / (1024 * 1024 * 1024))) + info["logical_cores"] = logical_cores if physical_cores is not None: - info['physical_cores'] = physical_cores + info["physical_cores"] = physical_cores cpu_brand = get_cpu_brand() if cpu_brand is not None: - info['cpu_brand'] = cpu_brand + info["cpu_brand"] = cpu_brand # TODO: drive_is_ssd, virtual_machine: https://bugzilla.mozilla.org/show_bug.cgi?id=1481613 return info def get_build_opts(substs): - ''' + """ Translate selected items from `substs` into `build_opts` keys in the schema. - ''' + """ try: opts = { - k: ty(substs.get(s, None)) for (k, s, ty) in ( + k: ty(substs.get(s, None)) + for (k, s, ty) in ( # Selected substitutions. - ('artifact', 'MOZ_ARTIFACT_BUILDS', bool), - ('debug', 'MOZ_DEBUG', bool), - ('opt', 'MOZ_OPTIMIZE', bool), - ('ccache', 'CCACHE', bool), - ('sccache', 'MOZ_USING_SCCACHE', bool), + ("artifact", "MOZ_ARTIFACT_BUILDS", bool), + ("debug", "MOZ_DEBUG", bool), + ("opt", "MOZ_OPTIMIZE", bool), + ("ccache", "CCACHE", bool), + ("sccache", "MOZ_USING_SCCACHE", bool), ) } - compiler = substs.get('CC_TYPE', None) + compiler = substs.get("CC_TYPE", None) if compiler: - opts['compiler'] = str(compiler) - if substs.get('CXX_IS_ICECREAM', None): - opts['icecream'] = True + opts["compiler"] = str(compiler) + if substs.get("CXX_IS_ICECREAM", None): + opts["icecream"] = True return opts except BuildEnvironmentNotFoundException: return {} def get_build_attrs(attrs): - ''' + """ Extracts clobber and cpu usage info from command attributes. - ''' + """ res = {} - clobber = attrs.get('clobber') + clobber = attrs.get("clobber") if clobber: - res['clobber'] = clobber - usage = attrs.get('usage') + res["clobber"] = clobber + usage = attrs.get("usage") if usage: - cpu_percent = usage.get('cpu_percent') + cpu_percent = usage.get("cpu_percent") if cpu_percent: - res['cpu_percent'] = int(round(cpu_percent)) + res["cpu_percent"] = int(round(cpu_percent)) return res def filter_args(command, argv, instance): - ''' + """ Given the full list of command-line arguments, remove anything up to and including `command`, and attempt to filter absolute pathnames out of any arguments after that. - ''' + """ # Each key is a pathname and the values are replacement sigils paths = { - instance.topsrcdir: '$topsrcdir/', - instance.topobjdir: '$topobjdir/', - mozpath.normpath(os.path.expanduser('~')): '$HOME/', + instance.topsrcdir: "$topsrcdir/", + instance.topobjdir: "$topobjdir/", + mozpath.normpath(os.path.expanduser("~")): "$HOME/", # This might override one of the existing entries, that's OK. # We don't use a sigil here because we treat all arguments as potentially relative # paths, so we'd like to get them back as they were specified. - mozpath.normpath(os.getcwd()): '', + mozpath.normpath(os.getcwd()): "", } args = list(argv) @@ -291,36 +342,38 @@ def filter_args(command, argv, instance): if base: return paths[base] + mozpath.relpath(p, base) # Best-effort. - return '<path omitted>' + return "<path omitted>" + return [filter_path(arg) for arg in args] -def gather_telemetry(command, success, start_time, end_time, mach_context, - instance, command_attrs): - ''' +def gather_telemetry( + command, success, start_time, end_time, mach_context, instance, command_attrs +): + """ Gather telemetry about the build and the user's system and pass it to the telemetry handler to be stored for later submission. Any absolute paths on the command line will be made relative to a relevant base path or replaced with a placeholder to avoid including paths from developer's machines. - ''' + """ try: substs = instance.substs except BuildEnvironmentNotFoundException: substs = {} data = { - 'client_id': get_client_id(mach_context.state_dir), + "client_id": get_client_id(mach_context.state_dir), # Get an rfc3339 datetime string. - 'time': datetime.utcfromtimestamp(start_time).strftime('%Y-%m-%dT%H:%M:%S.%fZ'), - 'command': command, - 'argv': filter_args(command, sys.argv, instance), - 'success': success, + "time": datetime.utcfromtimestamp(start_time).strftime("%Y-%m-%dT%H:%M:%S.%fZ"), + "command": command, + "argv": filter_args(command, sys.argv, instance), + "success": success, # TODO: use a monotonic clock: https://bugzilla.mozilla.org/show_bug.cgi?id=1481624 - 'duration_ms': int((end_time - start_time) * 1000), - 'build_opts': get_build_opts(substs), - 'build_attrs': get_build_attrs(command_attrs), - 'system': get_system_info(), + "duration_ms": int((end_time - start_time) * 1000), + "build_opts": get_build_opts(substs), + "build_attrs": get_build_attrs(command_attrs), + "system": get_system_info(), # TODO: exception: https://bugzilla.mozilla.org/show_bug.cgi?id=1481617 # TODO: file_types_changed: https://bugzilla.mozilla.org/show_bug.cgi?id=1481774 } @@ -329,15 +382,15 @@ def gather_telemetry(command, success, start_time, end_time, mach_context, schema(data) return data except MultipleInvalid as exc: - msg = ['Build telemetry is invalid:'] + msg = ["Build telemetry is invalid:"] for error in exc.errors: msg.append(str(error)) - print('\n'.join(msg) + '\n' + pprint.pformat(data)) + print("\n".join(msg) + "\n" + pprint.pformat(data)) return None def verify_statedir(statedir): - ''' + """ Verifies the statedir is structured correctly. Returns the outgoing, submitted and log paths. @@ -347,18 +400,18 @@ def verify_statedir(statedir): Creates the following directories and files if absent (first submission): - statedir/telemetry/submitted - ''' + """ - telemetry_dir = os.path.join(statedir, 'telemetry') - outgoing = os.path.join(telemetry_dir, 'outgoing') - submitted = os.path.join(telemetry_dir, 'submitted') - telemetry_log = os.path.join(telemetry_dir, 'telemetry.log') + telemetry_dir = os.path.join(statedir, "telemetry") + outgoing = os.path.join(telemetry_dir, "outgoing") + submitted = os.path.join(telemetry_dir, "submitted") + telemetry_log = os.path.join(telemetry_dir, "telemetry.log") if not os.path.isdir(telemetry_dir): - raise Exception('{} does not exist'.format(telemetry_dir)) + raise Exception("{} does not exist".format(telemetry_dir)) if not os.path.isdir(outgoing): - raise Exception('{} does not exist'.format(outgoing)) + raise Exception("{} does not exist".format(outgoing)) if not os.path.isdir(submitted): os.mkdir(submitted) diff --git a/python/mozbuild/mozbuild/test/backend/test_build.py b/python/mozbuild/mozbuild/test/backend/test_build.py index ecf4c46846d3..8ae6246ab9f6 100644 --- a/python/mozbuild/mozbuild/test/backend/test_build.py +++ b/python/mozbuild/mozbuild/test/backend/test_build.py @@ -26,18 +26,18 @@ from tempfile import mkdtemp BASE_SUBSTS = [ - ('PYTHON', mozpath.normsep(sys.executable)), - ('PYTHON3', mozpath.normsep(sys.executable)), - ('MOZ_UI_LOCALE', 'en-US'), + ("PYTHON", mozpath.normsep(sys.executable)), + ("PYTHON3", mozpath.normsep(sys.executable)), + ("MOZ_UI_LOCALE", "en-US"), ] class TestBuild(unittest.TestCase): def setUp(self): self._old_env = dict(os.environ) - os.environ.pop('MOZCONFIG', None) - os.environ.pop('MOZ_OBJDIR', None) - os.environ.pop('MOZ_PGO', None) + os.environ.pop("MOZCONFIG", None) + os.environ.pop("MOZ_OBJDIR", None) + os.environ.pop("MOZ_PGO", None) def tearDown(self): os.environ.clear() @@ -49,13 +49,11 @@ class TestBuild(unittest.TestCase): # the same drive on Windows. topobjdir = mkdtemp(dir=buildconfig.topsrcdir) try: - config = ConfigEnvironment(buildconfig.topsrcdir, topobjdir, - **kwargs) + config = ConfigEnvironment(buildconfig.topsrcdir, topobjdir, **kwargs) reader = BuildReader(config) emitter = TreeMetadataEmitter(config) - moz_build = mozpath.join(config.topsrcdir, 'test.mozbuild') - definitions = list(emitter.emit( - reader.read_mozbuild(moz_build, config))) + moz_build = mozpath.join(config.topsrcdir, "test.mozbuild") + definitions = list(emitter.emit(reader.read_mozbuild(moz_build, config))) for backend in backends: backend(config).consume(definitions) @@ -63,7 +61,7 @@ class TestBuild(unittest.TestCase): except Exception: raise finally: - if not os.environ.get('MOZ_NO_CLEANUP'): + if not os.environ.get("MOZ_NO_CLEANUP"): shutil.rmtree(topobjdir) @contextmanager @@ -76,162 +74,172 @@ class TestBuild(unittest.TestCase): try: yield handle_make_line except Exception: - print('\n'.join(lines)) + print("\n".join(lines)) raise - if os.environ.get('MOZ_VERBOSE_MAKE'): - print('\n'.join(lines)) + if os.environ.get("MOZ_VERBOSE_MAKE"): + print("\n".join(lines)) def test_recursive_make(self): substs = list(BASE_SUBSTS) - with self.do_test_backend(RecursiveMakeBackend, - substs=substs) as config: - build = MozbuildObject(config.topsrcdir, None, None, - config.topobjdir) + with self.do_test_backend(RecursiveMakeBackend, substs=substs) as config: + build = MozbuildObject(config.topsrcdir, None, None, config.topobjdir) overrides = [ - 'install_manifest_depends=', - 'MOZ_JAR_MAKER_FILE_FORMAT=flat', - 'TEST_MOZBUILD=1', + "install_manifest_depends=", + "MOZ_JAR_MAKER_FILE_FORMAT=flat", + "TEST_MOZBUILD=1", ] with self.line_handler() as handle_make_line: - build._run_make(directory=config.topobjdir, target=overrides, - silent=False, line_handler=handle_make_line) + build._run_make( + directory=config.topobjdir, + target=overrides, + silent=False, + line_handler=handle_make_line, + ) self.validate(config) def test_faster_recursive_make(self): substs = list(BASE_SUBSTS) + [ - ('BUILD_BACKENDS', 'FasterMake+RecursiveMake'), + ("BUILD_BACKENDS", "FasterMake+RecursiveMake"), ] - with self.do_test_backend(get_backend_class( - 'FasterMake+RecursiveMake'), substs=substs) as config: - buildid = mozpath.join(config.topobjdir, 'config', 'buildid') + with self.do_test_backend( + get_backend_class("FasterMake+RecursiveMake"), substs=substs + ) as config: + buildid = mozpath.join(config.topobjdir, "config", "buildid") ensureParentDir(buildid) - with open(buildid, 'w') as fh: - fh.write('20100101012345\n') + with open(buildid, "w") as fh: + fh.write("20100101012345\n") - build = MozbuildObject(config.topsrcdir, None, None, - config.topobjdir) + build = MozbuildObject(config.topsrcdir, None, None, config.topobjdir) overrides = [ - 'install_manifest_depends=', - 'MOZ_JAR_MAKER_FILE_FORMAT=flat', - 'TEST_MOZBUILD=1', + "install_manifest_depends=", + "MOZ_JAR_MAKER_FILE_FORMAT=flat", + "TEST_MOZBUILD=1", ] with self.line_handler() as handle_make_line: - build._run_make(directory=config.topobjdir, target=overrides, - silent=False, line_handler=handle_make_line) + build._run_make( + directory=config.topobjdir, + target=overrides, + silent=False, + line_handler=handle_make_line, + ) self.validate(config) def test_faster_make(self): substs = list(BASE_SUBSTS) + [ - ('MOZ_BUILD_APP', 'dummy_app'), - ('MOZ_WIDGET_TOOLKIT', 'dummy_widget'), + ("MOZ_BUILD_APP", "dummy_app"), + ("MOZ_WIDGET_TOOLKIT", "dummy_widget"), ] - with self.do_test_backend(RecursiveMakeBackend, FasterMakeBackend, - substs=substs) as config: - buildid = mozpath.join(config.topobjdir, 'config', 'buildid') + with self.do_test_backend( + RecursiveMakeBackend, FasterMakeBackend, substs=substs + ) as config: + buildid = mozpath.join(config.topobjdir, "config", "buildid") ensureParentDir(buildid) - with open(buildid, 'w') as fh: - fh.write('20100101012345\n') + with open(buildid, "w") as fh: + fh.write("20100101012345\n") - build = MozbuildObject(config.topsrcdir, None, None, - config.topobjdir) + build = MozbuildObject(config.topsrcdir, None, None, config.topobjdir) overrides = [ - 'TEST_MOZBUILD=1', + "TEST_MOZBUILD=1", ] with self.line_handler() as handle_make_line: - build._run_make(directory=mozpath.join(config.topobjdir, - 'faster'), - target=overrides, silent=False, - line_handler=handle_make_line) + build._run_make( + directory=mozpath.join(config.topobjdir, "faster"), + target=overrides, + silent=False, + line_handler=handle_make_line, + ) self.validate(config) def validate(self, config): self.maxDiff = None - test_path = mozpath.join('$SRCDIR', 'python', 'mozbuild', 'mozbuild', - 'test', 'backend', 'data', 'build') + test_path = mozpath.join( + "$SRCDIR", + "python", + "mozbuild", + "mozbuild", + "test", + "backend", + "data", + "build", + ) result = { p: six.ensure_text(f.open().read()) - for p, f in FileFinder(mozpath.join(config.topobjdir, 'dist')) + for p, f in FileFinder(mozpath.join(config.topobjdir, "dist")) } self.assertTrue(len(result)) - self.assertEqual(result, { - 'bin/baz.ini': 'baz.ini: FOO is foo\n', - 'bin/child/bar.ini': 'bar.ini\n', - 'bin/child2/foo.css': 'foo.css: FOO is foo\n', - 'bin/child2/qux.ini': 'qux.ini: BAR is not defined\n', - 'bin/chrome.manifest': - 'manifest chrome/foo.manifest\n' - 'manifest components/components.manifest\n', - 'bin/chrome/foo.manifest': - 'content bar foo/child/\n' - 'content foo foo/\n' - 'override chrome://foo/bar.svg#hello ' - 'chrome://bar/bar.svg#hello\n', - 'bin/chrome/foo/bar.js': 'bar.js\n', - 'bin/chrome/foo/child/baz.jsm': - '//@line 2 "%s/baz.jsm"\nbaz.jsm: FOO is foo\n' % (test_path), - 'bin/chrome/foo/child/hoge.js': - '//@line 2 "%s/bar.js"\nbar.js: FOO is foo\n' % (test_path), - 'bin/chrome/foo/foo.css': 'foo.css: FOO is foo\n', - 'bin/chrome/foo/foo.js': 'foo.js\n', - 'bin/chrome/foo/qux.js': 'bar.js\n', - 'bin/components/bar.js': - '//@line 2 "%s/bar.js"\nbar.js: FOO is foo\n' % (test_path), - 'bin/components/components.manifest': - 'component {foo} foo.js\ncomponent {bar} bar.js\n', - 'bin/components/foo.js': 'foo.js\n', - 'bin/defaults/pref/prefs.js': 'prefs.js\n', - 'bin/foo.ini': 'foo.ini\n', - 'bin/modules/baz.jsm': - '//@line 2 "%s/baz.jsm"\nbaz.jsm: FOO is foo\n' % (test_path), - 'bin/modules/child/bar.jsm': 'bar.jsm\n', - 'bin/modules/child2/qux.jsm': - '//@line 4 "%s/qux.jsm"\nqux.jsm: BAR is not defined\n' + self.assertEqual( + result, + { + "bin/baz.ini": "baz.ini: FOO is foo\n", + "bin/child/bar.ini": "bar.ini\n", + "bin/child2/foo.css": "foo.css: FOO is foo\n", + "bin/child2/qux.ini": "qux.ini: BAR is not defined\n", + "bin/chrome.manifest": "manifest chrome/foo.manifest\n" + "manifest components/components.manifest\n", + "bin/chrome/foo.manifest": "content bar foo/child/\n" + "content foo foo/\n" + "override chrome://foo/bar.svg#hello " + "chrome://bar/bar.svg#hello\n", + "bin/chrome/foo/bar.js": "bar.js\n", + "bin/chrome/foo/child/baz.jsm": '//@line 2 "%s/baz.jsm"\nbaz.jsm: FOO is foo\n' % (test_path), - 'bin/modules/foo.jsm': 'foo.jsm\n', - 'bin/res/resource': 'resource\n', - 'bin/res/child/resource2': 'resource2\n', - - 'bin/app/baz.ini': 'baz.ini: FOO is bar\n', - 'bin/app/child/bar.ini': 'bar.ini\n', - 'bin/app/child2/qux.ini': 'qux.ini: BAR is defined\n', - 'bin/app/chrome.manifest': - 'manifest chrome/foo.manifest\n' - 'manifest components/components.manifest\n', - 'bin/app/chrome/foo.manifest': - 'content bar foo/child/\n' - 'content foo foo/\n' - 'override chrome://foo/bar.svg#hello ' - 'chrome://bar/bar.svg#hello\n', - 'bin/app/chrome/foo/bar.js': 'bar.js\n', - 'bin/app/chrome/foo/child/baz.jsm': - '//@line 2 "%s/baz.jsm"\nbaz.jsm: FOO is bar\n' % (test_path), - 'bin/app/chrome/foo/child/hoge.js': - '//@line 2 "%s/bar.js"\nbar.js: FOO is bar\n' % (test_path), - 'bin/app/chrome/foo/foo.css': 'foo.css: FOO is bar\n', - 'bin/app/chrome/foo/foo.js': 'foo.js\n', - 'bin/app/chrome/foo/qux.js': 'bar.js\n', - 'bin/app/components/bar.js': - '//@line 2 "%s/bar.js"\nbar.js: FOO is bar\n' % (test_path), - 'bin/app/components/components.manifest': - 'component {foo} foo.js\ncomponent {bar} bar.js\n', - 'bin/app/components/foo.js': 'foo.js\n', - 'bin/app/defaults/preferences/prefs.js': 'prefs.js\n', - 'bin/app/foo.css': 'foo.css: FOO is bar\n', - 'bin/app/foo.ini': 'foo.ini\n', - 'bin/app/modules/baz.jsm': - '//@line 2 "%s/baz.jsm"\nbaz.jsm: FOO is bar\n' % (test_path), - 'bin/app/modules/child/bar.jsm': 'bar.jsm\n', - 'bin/app/modules/child2/qux.jsm': - '//@line 2 "%s/qux.jsm"\nqux.jsm: BAR is defined\n' + "bin/chrome/foo/child/hoge.js": '//@line 2 "%s/bar.js"\nbar.js: FOO is foo\n' % (test_path), - 'bin/app/modules/foo.jsm': 'foo.jsm\n', - }) + "bin/chrome/foo/foo.css": "foo.css: FOO is foo\n", + "bin/chrome/foo/foo.js": "foo.js\n", + "bin/chrome/foo/qux.js": "bar.js\n", + "bin/components/bar.js": '//@line 2 "%s/bar.js"\nbar.js: FOO is foo\n' + % (test_path), + "bin/components/components.manifest": "component {foo} foo.js\ncomponent {bar} bar.js\n", # NOQA: E501 + "bin/components/foo.js": "foo.js\n", + "bin/defaults/pref/prefs.js": "prefs.js\n", + "bin/foo.ini": "foo.ini\n", + "bin/modules/baz.jsm": '//@line 2 "%s/baz.jsm"\nbaz.jsm: FOO is foo\n' + % (test_path), + "bin/modules/child/bar.jsm": "bar.jsm\n", + "bin/modules/child2/qux.jsm": '//@line 4 "%s/qux.jsm"\nqux.jsm: BAR is not defined\n' # NOQA: E501 + % (test_path), + "bin/modules/foo.jsm": "foo.jsm\n", + "bin/res/resource": "resource\n", + "bin/res/child/resource2": "resource2\n", + "bin/app/baz.ini": "baz.ini: FOO is bar\n", + "bin/app/child/bar.ini": "bar.ini\n", + "bin/app/child2/qux.ini": "qux.ini: BAR is defined\n", + "bin/app/chrome.manifest": "manifest chrome/foo.manifest\n" + "manifest components/components.manifest\n", + "bin/app/chrome/foo.manifest": "content bar foo/child/\n" + "content foo foo/\n" + "override chrome://foo/bar.svg#hello " + "chrome://bar/bar.svg#hello\n", + "bin/app/chrome/foo/bar.js": "bar.js\n", + "bin/app/chrome/foo/child/baz.jsm": '//@line 2 "%s/baz.jsm"\nbaz.jsm: FOO is bar\n' + % (test_path), + "bin/app/chrome/foo/child/hoge.js": '//@line 2 "%s/bar.js"\nbar.js: FOO is bar\n' + % (test_path), + "bin/app/chrome/foo/foo.css": "foo.css: FOO is bar\n", + "bin/app/chrome/foo/foo.js": "foo.js\n", + "bin/app/chrome/foo/qux.js": "bar.js\n", + "bin/app/components/bar.js": '//@line 2 "%s/bar.js"\nbar.js: FOO is bar\n' + % (test_path), + "bin/app/components/components.manifest": "component {foo} foo.js\ncomponent {bar} bar.js\n", # NOQA: E501 + "bin/app/components/foo.js": "foo.js\n", + "bin/app/defaults/preferences/prefs.js": "prefs.js\n", + "bin/app/foo.css": "foo.css: FOO is bar\n", + "bin/app/foo.ini": "foo.ini\n", + "bin/app/modules/baz.jsm": '//@line 2 "%s/baz.jsm"\nbaz.jsm: FOO is bar\n' + % (test_path), + "bin/app/modules/child/bar.jsm": "bar.jsm\n", + "bin/app/modules/child2/qux.jsm": '//@line 2 "%s/qux.jsm"\nqux.jsm: BAR is defined\n' # NOQA: E501 + % (test_path), + "bin/app/modules/foo.jsm": "foo.jsm\n", + }, + ) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/python/mozbuild/mozbuild/test/backend/test_recursivemake.py b/python/mozbuild/mozbuild/test/backend/test_recursivemake.py index 04fb3f513ac8..d71c668dc2a9 100644 --- a/python/mozbuild/mozbuild/test/backend/test_recursivemake.py +++ b/python/mozbuild/mozbuild/test/backend/test_recursivemake.py @@ -31,253 +31,308 @@ import mozpack.path as mozpath class TestRecursiveMakeTraversal(unittest.TestCase): def test_traversal(self): traversal = RecursiveMakeTraversal() - traversal.add('', dirs=['A', 'B', 'C']) - traversal.add('', dirs=['D']) - traversal.add('A') - traversal.add('B', dirs=['E', 'F']) - traversal.add('C', dirs=['G', 'H']) - traversal.add('D', dirs=['I', 'K']) - traversal.add('D', dirs=['J', 'L']) - traversal.add('E') - traversal.add('F') - traversal.add('G') - traversal.add('H') - traversal.add('I', dirs=['M', 'N']) - traversal.add('J', dirs=['O', 'P']) - traversal.add('K', dirs=['Q', 'R']) - traversal.add('L', dirs=['S']) - traversal.add('M') - traversal.add('N', dirs=['T']) - traversal.add('O') - traversal.add('P', dirs=['U']) - traversal.add('Q') - traversal.add('R', dirs=['V']) - traversal.add('S', dirs=['W']) - traversal.add('T') - traversal.add('U') - traversal.add('V') - traversal.add('W', dirs=['X']) - traversal.add('X') + traversal.add("", dirs=["A", "B", "C"]) + traversal.add("", dirs=["D"]) + traversal.add("A") + traversal.add("B", dirs=["E", "F"]) + traversal.add("C", dirs=["G", "H"]) + traversal.add("D", dirs=["I", "K"]) + traversal.add("D", dirs=["J", "L"]) + traversal.add("E") + traversal.add("F") + traversal.add("G") + traversal.add("H") + traversal.add("I", dirs=["M", "N"]) + traversal.add("J", dirs=["O", "P"]) + traversal.add("K", dirs=["Q", "R"]) + traversal.add("L", dirs=["S"]) + traversal.add("M") + traversal.add("N", dirs=["T"]) + traversal.add("O") + traversal.add("P", dirs=["U"]) + traversal.add("Q") + traversal.add("R", dirs=["V"]) + traversal.add("S", dirs=["W"]) + traversal.add("T") + traversal.add("U") + traversal.add("V") + traversal.add("W", dirs=["X"]) + traversal.add("X") - parallels = set(('G', 'H', 'I', 'J', 'O', 'P', 'Q', 'R', 'U')) + parallels = set(("G", "H", "I", "J", "O", "P", "Q", "R", "U")) def filter(current, subdirs): - return (current, [d for d in subdirs.dirs if d in parallels], - [d for d in subdirs.dirs if d not in parallels]) + return ( + current, + [d for d in subdirs.dirs if d in parallels], + [d for d in subdirs.dirs if d not in parallels], + ) start, deps = traversal.compute_dependencies(filter) - self.assertEqual(start, ('X',)) + self.assertEqual(start, ("X",)) self.maxDiff = None - self.assertEqual(deps, { - 'A': ('',), - 'B': ('A',), - 'C': ('F',), - 'D': ('G', 'H'), - 'E': ('B',), - 'F': ('E',), - 'G': ('C',), - 'H': ('C',), - 'I': ('D',), - 'J': ('D',), - 'K': ('T', 'O', 'U'), - 'L': ('Q', 'V'), - 'M': ('I',), - 'N': ('M',), - 'O': ('J',), - 'P': ('J',), - 'Q': ('K',), - 'R': ('K',), - 'S': ('L',), - 'T': ('N',), - 'U': ('P',), - 'V': ('R',), - 'W': ('S',), - 'X': ('W',), - }) + self.assertEqual( + deps, + { + "A": ("",), + "B": ("A",), + "C": ("F",), + "D": ("G", "H"), + "E": ("B",), + "F": ("E",), + "G": ("C",), + "H": ("C",), + "I": ("D",), + "J": ("D",), + "K": ("T", "O", "U"), + "L": ("Q", "V"), + "M": ("I",), + "N": ("M",), + "O": ("J",), + "P": ("J",), + "Q": ("K",), + "R": ("K",), + "S": ("L",), + "T": ("N",), + "U": ("P",), + "V": ("R",), + "W": ("S",), + "X": ("W",), + }, + ) - self.assertEqual(list(traversal.traverse('', filter)), - ['', 'A', 'B', 'E', 'F', 'C', 'G', 'H', 'D', 'I', - 'M', 'N', 'T', 'J', 'O', 'P', 'U', 'K', 'Q', 'R', - 'V', 'L', 'S', 'W', 'X']) + self.assertEqual( + list(traversal.traverse("", filter)), + [ + "", + "A", + "B", + "E", + "F", + "C", + "G", + "H", + "D", + "I", + "M", + "N", + "T", + "J", + "O", + "P", + "U", + "K", + "Q", + "R", + "V", + "L", + "S", + "W", + "X", + ], + ) - self.assertEqual(list(traversal.traverse('C', filter)), - ['C', 'G', 'H']) + self.assertEqual(list(traversal.traverse("C", filter)), ["C", "G", "H"]) def test_traversal_2(self): traversal = RecursiveMakeTraversal() - traversal.add('', dirs=['A', 'B', 'C']) - traversal.add('A') - traversal.add('B', dirs=['D', 'E', 'F']) - traversal.add('C', dirs=['G', 'H', 'I']) - traversal.add('D') - traversal.add('E') - traversal.add('F') - traversal.add('G') - traversal.add('H') - traversal.add('I') + traversal.add("", dirs=["A", "B", "C"]) + traversal.add("A") + traversal.add("B", dirs=["D", "E", "F"]) + traversal.add("C", dirs=["G", "H", "I"]) + traversal.add("D") + traversal.add("E") + traversal.add("F") + traversal.add("G") + traversal.add("H") + traversal.add("I") start, deps = traversal.compute_dependencies() - self.assertEqual(start, ('I',)) - self.assertEqual(deps, { - 'A': ('',), - 'B': ('A',), - 'C': ('F',), - 'D': ('B',), - 'E': ('D',), - 'F': ('E',), - 'G': ('C',), - 'H': ('G',), - 'I': ('H',), - }) + self.assertEqual(start, ("I",)) + self.assertEqual( + deps, + { + "A": ("",), + "B": ("A",), + "C": ("F",), + "D": ("B",), + "E": ("D",), + "F": ("E",), + "G": ("C",), + "H": ("G",), + "I": ("H",), + }, + ) def test_traversal_filter(self): traversal = RecursiveMakeTraversal() - traversal.add('', dirs=['A', 'B', 'C']) - traversal.add('A') - traversal.add('B', dirs=['D', 'E', 'F']) - traversal.add('C', dirs=['G', 'H', 'I']) - traversal.add('D') - traversal.add('E') - traversal.add('F') - traversal.add('G') - traversal.add('H') - traversal.add('I') + traversal.add("", dirs=["A", "B", "C"]) + traversal.add("A") + traversal.add("B", dirs=["D", "E", "F"]) + traversal.add("C", dirs=["G", "H", "I"]) + traversal.add("D") + traversal.add("E") + traversal.add("F") + traversal.add("G") + traversal.add("H") + traversal.add("I") def filter(current, subdirs): - if current == 'B': + if current == "B": current = None return current, [], subdirs.dirs start, deps = traversal.compute_dependencies(filter) - self.assertEqual(start, ('I',)) - self.assertEqual(deps, { - 'A': ('',), - 'C': ('F',), - 'D': ('A',), - 'E': ('D',), - 'F': ('E',), - 'G': ('C',), - 'H': ('G',), - 'I': ('H',), - }) + self.assertEqual(start, ("I",)) + self.assertEqual( + deps, + { + "A": ("",), + "C": ("F",), + "D": ("A",), + "E": ("D",), + "F": ("E",), + "G": ("C",), + "H": ("G",), + "I": ("H",), + }, + ) def test_traversal_parallel(self): traversal = RecursiveMakeTraversal() - traversal.add('', dirs=['A', 'B', 'C']) - traversal.add('A') - traversal.add('B', dirs=['D', 'E', 'F']) - traversal.add('C', dirs=['G', 'H', 'I']) - traversal.add('D') - traversal.add('E') - traversal.add('F') - traversal.add('G') - traversal.add('H') - traversal.add('I') - traversal.add('J') + traversal.add("", dirs=["A", "B", "C"]) + traversal.add("A") + traversal.add("B", dirs=["D", "E", "F"]) + traversal.add("C", dirs=["G", "H", "I"]) + traversal.add("D") + traversal.add("E") + traversal.add("F") + traversal.add("G") + traversal.add("H") + traversal.add("I") + traversal.add("J") def filter(current, subdirs): return current, subdirs.dirs, [] start, deps = traversal.compute_dependencies(filter) - self.assertEqual(start, ('A', 'D', 'E', 'F', 'G', 'H', 'I', 'J')) - self.assertEqual(deps, { - 'A': ('',), - 'B': ('',), - 'C': ('',), - 'D': ('B',), - 'E': ('B',), - 'F': ('B',), - 'G': ('C',), - 'H': ('C',), - 'I': ('C',), - 'J': ('',), - }) + self.assertEqual(start, ("A", "D", "E", "F", "G", "H", "I", "J")) + self.assertEqual( + deps, + { + "A": ("",), + "B": ("",), + "C": ("",), + "D": ("B",), + "E": ("B",), + "F": ("B",), + "G": ("C",), + "H": ("C",), + "I": ("C",), + "J": ("",), + }, + ) class TestRecursiveMakeBackend(BackendTester): def test_basic(self): """Ensure the RecursiveMakeBackend works without error.""" - env = self._consume('stub0', RecursiveMakeBackend) - self.assertTrue(os.path.exists(mozpath.join(env.topobjdir, - 'backend.RecursiveMakeBackend'))) - self.assertTrue(os.path.exists(mozpath.join(env.topobjdir, - 'backend.RecursiveMakeBackend.in'))) + env = self._consume("stub0", RecursiveMakeBackend) + self.assertTrue( + os.path.exists(mozpath.join(env.topobjdir, "backend.RecursiveMakeBackend")) + ) + self.assertTrue( + os.path.exists( + mozpath.join(env.topobjdir, "backend.RecursiveMakeBackend.in") + ) + ) def test_output_files(self): """Ensure proper files are generated.""" - env = self._consume('stub0', RecursiveMakeBackend) + env = self._consume("stub0", RecursiveMakeBackend) - expected = ['', 'dir1', 'dir2'] + expected = ["", "dir1", "dir2"] for d in expected: - out_makefile = mozpath.join(env.topobjdir, d, 'Makefile') - out_backend = mozpath.join(env.topobjdir, d, 'backend.mk') + out_makefile = mozpath.join(env.topobjdir, d, "Makefile") + out_backend = mozpath.join(env.topobjdir, d, "backend.mk") self.assertTrue(os.path.exists(out_makefile)) self.assertTrue(os.path.exists(out_backend)) def test_makefile_conversion(self): """Ensure Makefile.in is converted properly.""" - env = self._consume('stub0', RecursiveMakeBackend) + env = self._consume("stub0", RecursiveMakeBackend) - p = mozpath.join(env.topobjdir, 'Makefile') + p = mozpath.join(env.topobjdir, "Makefile") - lines = [l.strip() for l in open(p, 'rt').readlines()[1:] if not l.startswith('#')] - self.assertEqual(lines, [ - 'DEPTH := .', - 'topobjdir := %s' % env.topobjdir, - 'topsrcdir := %s' % env.topsrcdir, - 'srcdir := %s' % env.topsrcdir, - 'srcdir_rel := %s' % mozpath.relpath(env.topsrcdir, env.topobjdir), - 'relativesrcdir := .', - 'include $(DEPTH)/config/autoconf.mk', - '', - 'FOO := foo', - '', - 'include $(topsrcdir)/config/recurse.mk', - ]) + lines = [ + l.strip() for l in open(p, "rt").readlines()[1:] if not l.startswith("#") + ] + self.assertEqual( + lines, + [ + "DEPTH := .", + "topobjdir := %s" % env.topobjdir, + "topsrcdir := %s" % env.topsrcdir, + "srcdir := %s" % env.topsrcdir, + "srcdir_rel := %s" % mozpath.relpath(env.topsrcdir, env.topobjdir), + "relativesrcdir := .", + "include $(DEPTH)/config/autoconf.mk", + "", + "FOO := foo", + "", + "include $(topsrcdir)/config/recurse.mk", + ], + ) def test_missing_makefile_in(self): """Ensure missing Makefile.in results in Makefile creation.""" - env = self._consume('stub0', RecursiveMakeBackend) + env = self._consume("stub0", RecursiveMakeBackend) - p = mozpath.join(env.topobjdir, 'dir2', 'Makefile') + p = mozpath.join(env.topobjdir, "dir2", "Makefile") self.assertTrue(os.path.exists(p)) - lines = [l.strip() for l in open(p, 'rt').readlines()] + lines = [l.strip() for l in open(p, "rt").readlines()] self.assertEqual(len(lines), 10) - self.assertTrue(lines[0].startswith('# THIS FILE WAS AUTOMATICALLY')) + self.assertTrue(lines[0].startswith("# THIS FILE WAS AUTOMATICALLY")) def test_backend_mk(self): """Ensure backend.mk file is written out properly.""" - env = self._consume('stub0', RecursiveMakeBackend) + env = self._consume("stub0", RecursiveMakeBackend) - p = mozpath.join(env.topobjdir, 'backend.mk') + p = mozpath.join(env.topobjdir, "backend.mk") - lines = [l.strip() for l in open(p, 'rt').readlines()[2:]] - self.assertEqual(lines, [ - 'DIRS := dir1 dir2', - ]) + lines = [l.strip() for l in open(p, "rt").readlines()[2:]] + self.assertEqual( + lines, + [ + "DIRS := dir1 dir2", + ], + ) # Make env.substs writable to add ENABLE_TESTS env.substs = dict(env.substs) - env.substs['ENABLE_TESTS'] = '1' - self._consume('stub0', RecursiveMakeBackend, env=env) - p = mozpath.join(env.topobjdir, 'backend.mk') + env.substs["ENABLE_TESTS"] = "1" + self._consume("stub0", RecursiveMakeBackend, env=env) + p = mozpath.join(env.topobjdir, "backend.mk") - lines = [l.strip() for l in open(p, 'rt').readlines()[2:]] - self.assertEqual(lines, [ - 'DIRS := dir1 dir2 dir3', - ]) + lines = [l.strip() for l in open(p, "rt").readlines()[2:]] + self.assertEqual( + lines, + [ + "DIRS := dir1 dir2 dir3", + ], + ) def test_mtime_no_change(self): """Ensure mtime is not updated if file content does not change.""" - env = self._consume('stub0', RecursiveMakeBackend) + env = self._consume("stub0", RecursiveMakeBackend) - makefile_path = mozpath.join(env.topobjdir, 'Makefile') - backend_path = mozpath.join(env.topobjdir, 'backend.mk') + makefile_path = mozpath.join(env.topobjdir, "Makefile") + backend_path = mozpath.join(env.topobjdir, "backend.mk") makefile_mtime = os.path.getmtime(makefile_path) backend_mtime = os.path.getmtime(backend_path) @@ -291,42 +346,45 @@ class TestRecursiveMakeBackend(BackendTester): def test_substitute_config_files(self): """Ensure substituted config files are produced.""" - env = self._consume('substitute_config_files', RecursiveMakeBackend) + env = self._consume("substitute_config_files", RecursiveMakeBackend) - p = mozpath.join(env.topobjdir, 'foo') + p = mozpath.join(env.topobjdir, "foo") self.assertTrue(os.path.exists(p)) - lines = [l.strip() for l in open(p, 'rt').readlines()] - self.assertEqual(lines, [ - 'TEST = foo', - ]) + lines = [l.strip() for l in open(p, "rt").readlines()] + self.assertEqual( + lines, + [ + "TEST = foo", + ], + ) def test_install_substitute_config_files(self): """Ensure we recurse into the dirs that install substituted config files.""" - env = self._consume('install_substitute_config_files', RecursiveMakeBackend) + env = self._consume("install_substitute_config_files", RecursiveMakeBackend) - root_deps_path = mozpath.join(env.topobjdir, 'root-deps.mk') - lines = [l.strip() for l in open(root_deps_path, 'rt').readlines()] + root_deps_path = mozpath.join(env.topobjdir, "root-deps.mk") + lines = [l.strip() for l in open(root_deps_path, "rt").readlines()] # Make sure we actually recurse into the sub directory during export to # install the subst file. - self.assertTrue(any(l == 'recurse_export: sub/export' for l in lines)) + self.assertTrue(any(l == "recurse_export: sub/export" for l in lines)) def test_variable_passthru(self): """Ensure variable passthru is written out correctly.""" - env = self._consume('variable_passthru', RecursiveMakeBackend) + env = self._consume("variable_passthru", RecursiveMakeBackend) - backend_path = mozpath.join(env.topobjdir, 'backend.mk') - lines = [l.strip() for l in open(backend_path, 'rt').readlines()[2:]] + backend_path = mozpath.join(env.topobjdir, "backend.mk") + lines = [l.strip() for l in open(backend_path, "rt").readlines()[2:]] expected = { - 'RCFILE': [ - 'RCFILE := $(srcdir)/foo.rc', + "RCFILE": [ + "RCFILE := $(srcdir)/foo.rc", ], - 'RCINCLUDE': [ - 'RCINCLUDE := $(srcdir)/bar.rc', + "RCINCLUDE": [ + "RCINCLUDE := $(srcdir)/bar.rc", ], - 'WIN32_EXE_LDFLAGS': [ - 'WIN32_EXE_LDFLAGS += -subsystem:console', + "WIN32_EXE_LDFLAGS": [ + "WIN32_EXE_LDFLAGS += -subsystem:console", ], } @@ -337,43 +395,45 @@ class TestRecursiveMakeBackend(BackendTester): def test_sources(self): """Ensure SOURCES, HOST_SOURCES and WASM_SOURCES are handled properly.""" - env = self._consume('sources', RecursiveMakeBackend) + env = self._consume("sources", RecursiveMakeBackend) - backend_path = mozpath.join(env.topobjdir, 'backend.mk') - lines = [l.strip() for l in open(backend_path, 'rt').readlines()[2:]] + backend_path = mozpath.join(env.topobjdir, "backend.mk") + lines = [l.strip() for l in open(backend_path, "rt").readlines()[2:]] expected = { - 'ASFILES': [ - 'ASFILES += $(srcdir)/bar.s', - 'ASFILES += $(srcdir)/foo.asm', + "ASFILES": [ + "ASFILES += $(srcdir)/bar.s", + "ASFILES += $(srcdir)/foo.asm", ], - 'CMMSRCS': [ - 'CMMSRCS += $(srcdir)/bar.mm', - 'CMMSRCS += $(srcdir)/foo.mm', + "CMMSRCS": [ + "CMMSRCS += $(srcdir)/bar.mm", + "CMMSRCS += $(srcdir)/foo.mm", ], - 'CSRCS': [ - 'CSRCS += $(srcdir)/bar.c', - 'CSRCS += $(srcdir)/foo.c', + "CSRCS": [ + "CSRCS += $(srcdir)/bar.c", + "CSRCS += $(srcdir)/foo.c", ], - 'HOST_CPPSRCS': [ - 'HOST_CPPSRCS += $(srcdir)/bar.cpp', - 'HOST_CPPSRCS += $(srcdir)/foo.cpp', + "HOST_CPPSRCS": [ + "HOST_CPPSRCS += $(srcdir)/bar.cpp", + "HOST_CPPSRCS += $(srcdir)/foo.cpp", ], - 'HOST_CSRCS': [ - 'HOST_CSRCS += $(srcdir)/bar.c', - 'HOST_CSRCS += $(srcdir)/foo.c', + "HOST_CSRCS": [ + "HOST_CSRCS += $(srcdir)/bar.c", + "HOST_CSRCS += $(srcdir)/foo.c", ], - 'SSRCS': [ - 'SSRCS += $(srcdir)/baz.S', - 'SSRCS += $(srcdir)/foo.S', + "SSRCS": [ + "SSRCS += $(srcdir)/baz.S", + "SSRCS += $(srcdir)/foo.S", ], - 'WASM_CSRCS': [ - 'WASM_CSRCS += $(srcdir)/bar.c', - ('WASM_CSRCS += $(srcdir)/third_party/rust/rlbox_lucet_sandbox/' - 'c_src/lucet_sandbox_wrapper.c'), + "WASM_CSRCS": [ + "WASM_CSRCS += $(srcdir)/bar.c", + ( + "WASM_CSRCS += $(srcdir)/third_party/rust/rlbox_lucet_sandbox/" + "c_src/lucet_sandbox_wrapper.c" + ), ], - 'WASM_CPPSRCS': [ - 'WASM_CPPSRCS += $(srcdir)/bar.cpp', + "WASM_CPPSRCS": [ + "WASM_CPPSRCS += $(srcdir)/bar.cpp", ], } @@ -383,41 +443,47 @@ class TestRecursiveMakeBackend(BackendTester): def test_exports(self): """Ensure EXPORTS is handled properly.""" - env = self._consume('exports', RecursiveMakeBackend) + env = self._consume("exports", RecursiveMakeBackend) # EXPORTS files should appear in the dist_include install manifest. - m = InstallManifest(path=mozpath.join(env.topobjdir, - '_build_manifests', 'install', 'dist_include')) + m = InstallManifest( + path=mozpath.join( + env.topobjdir, "_build_manifests", "install", "dist_include" + ) + ) self.assertEqual(len(m), 7) - self.assertIn('foo.h', m) - self.assertIn('mozilla/mozilla1.h', m) - self.assertIn('mozilla/dom/dom2.h', m) + self.assertIn("foo.h", m) + self.assertIn("mozilla/mozilla1.h", m) + self.assertIn("mozilla/dom/dom2.h", m) def test_generated_files(self): """Ensure GENERATED_FILES is handled properly.""" - env = self._consume('generated-files', RecursiveMakeBackend) + env = self._consume("generated-files", RecursiveMakeBackend) - backend_path = mozpath.join(env.topobjdir, 'backend.mk') - lines = [l.strip() for l in open(backend_path, 'rt').readlines()[2:]] + backend_path = mozpath.join(env.topobjdir, "backend.mk") + lines = [l.strip() for l in open(backend_path, "rt").readlines()[2:]] expected = [ - 'include $(topsrcdir)/config/AB_rCD.mk', - 'PRE_COMPILE_TARGETS += $(MDDEPDIR)/bar.c.stub', - 'bar.c: $(MDDEPDIR)/bar.c.stub ;', - 'EXTRA_MDDEPEND_FILES += $(MDDEPDIR)/bar.c.pp', - '$(MDDEPDIR)/bar.c.stub: %s/generate-bar.py' % env.topsrcdir, - '$(REPORT_BUILD)', - '$(call py_action,file_generate,%s/generate-bar.py baz bar.c $(MDDEPDIR)/bar.c.pp $(MDDEPDIR)/bar.c.stub)' % env.topsrcdir, # noqa - '@$(TOUCH) $@', - '', - 'EXPORT_TARGETS += $(MDDEPDIR)/foo.h.stub', - 'foo.h: $(MDDEPDIR)/foo.h.stub ;', - 'EXTRA_MDDEPEND_FILES += $(MDDEPDIR)/foo.h.pp', - '$(MDDEPDIR)/foo.h.stub: %s/generate-foo.py $(srcdir)/foo-data' % (env.topsrcdir), - '$(REPORT_BUILD)', - '$(call py_action,file_generate,%s/generate-foo.py main foo.h $(MDDEPDIR)/foo.h.pp $(MDDEPDIR)/foo.h.stub $(srcdir)/foo-data)' % (env.topsrcdir), # noqa - '@$(TOUCH) $@', - '', + "include $(topsrcdir)/config/AB_rCD.mk", + "PRE_COMPILE_TARGETS += $(MDDEPDIR)/bar.c.stub", + "bar.c: $(MDDEPDIR)/bar.c.stub ;", + "EXTRA_MDDEPEND_FILES += $(MDDEPDIR)/bar.c.pp", + "$(MDDEPDIR)/bar.c.stub: %s/generate-bar.py" % env.topsrcdir, + "$(REPORT_BUILD)", + "$(call py_action,file_generate,%s/generate-bar.py baz bar.c $(MDDEPDIR)/bar.c.pp $(MDDEPDIR)/bar.c.stub)" # noqa + % env.topsrcdir, + "@$(TOUCH) $@", + "", + "EXPORT_TARGETS += $(MDDEPDIR)/foo.h.stub", + "foo.h: $(MDDEPDIR)/foo.h.stub ;", + "EXTRA_MDDEPEND_FILES += $(MDDEPDIR)/foo.h.pp", + "$(MDDEPDIR)/foo.h.stub: %s/generate-foo.py $(srcdir)/foo-data" + % (env.topsrcdir), + "$(REPORT_BUILD)", + "$(call py_action,file_generate,%s/generate-foo.py main foo.h $(MDDEPDIR)/foo.h.pp $(MDDEPDIR)/foo.h.stub $(srcdir)/foo-data)" # noqa + % (env.topsrcdir), + "@$(TOUCH) $@", + "", ] self.maxDiff = None @@ -425,29 +491,32 @@ class TestRecursiveMakeBackend(BackendTester): def test_generated_files_force(self): """Ensure GENERATED_FILES with .force is handled properly.""" - env = self._consume('generated-files-force', RecursiveMakeBackend) + env = self._consume("generated-files-force", RecursiveMakeBackend) - backend_path = mozpath.join(env.topobjdir, 'backend.mk') - lines = [l.strip() for l in open(backend_path, 'rt').readlines()[2:]] + backend_path = mozpath.join(env.topobjdir, "backend.mk") + lines = [l.strip() for l in open(backend_path, "rt").readlines()[2:]] expected = [ - 'include $(topsrcdir)/config/AB_rCD.mk', - 'PRE_COMPILE_TARGETS += $(MDDEPDIR)/bar.c.stub', - 'bar.c: $(MDDEPDIR)/bar.c.stub ;', - 'EXTRA_MDDEPEND_FILES += $(MDDEPDIR)/bar.c.pp', - '$(MDDEPDIR)/bar.c.stub: %s/generate-bar.py FORCE' % env.topsrcdir, - '$(REPORT_BUILD)', - '$(call py_action,file_generate,%s/generate-bar.py baz bar.c $(MDDEPDIR)/bar.c.pp $(MDDEPDIR)/bar.c.stub)' % env.topsrcdir, # noqa - '@$(TOUCH) $@', - '', - 'PRE_COMPILE_TARGETS += $(MDDEPDIR)/foo.c.stub', - 'foo.c: $(MDDEPDIR)/foo.c.stub ;', - 'EXTRA_MDDEPEND_FILES += $(MDDEPDIR)/foo.c.pp', - '$(MDDEPDIR)/foo.c.stub: %s/generate-foo.py $(srcdir)/foo-data' % (env.topsrcdir), - '$(REPORT_BUILD)', - '$(call py_action,file_generate,%s/generate-foo.py main foo.c $(MDDEPDIR)/foo.c.pp $(MDDEPDIR)/foo.c.stub $(srcdir)/foo-data)' % (env.topsrcdir), # noqa - '@$(TOUCH) $@', - '', + "include $(topsrcdir)/config/AB_rCD.mk", + "PRE_COMPILE_TARGETS += $(MDDEPDIR)/bar.c.stub", + "bar.c: $(MDDEPDIR)/bar.c.stub ;", + "EXTRA_MDDEPEND_FILES += $(MDDEPDIR)/bar.c.pp", + "$(MDDEPDIR)/bar.c.stub: %s/generate-bar.py FORCE" % env.topsrcdir, + "$(REPORT_BUILD)", + "$(call py_action,file_generate,%s/generate-bar.py baz bar.c $(MDDEPDIR)/bar.c.pp $(MDDEPDIR)/bar.c.stub)" # noqa + % env.topsrcdir, + "@$(TOUCH) $@", + "", + "PRE_COMPILE_TARGETS += $(MDDEPDIR)/foo.c.stub", + "foo.c: $(MDDEPDIR)/foo.c.stub ;", + "EXTRA_MDDEPEND_FILES += $(MDDEPDIR)/foo.c.pp", + "$(MDDEPDIR)/foo.c.stub: %s/generate-foo.py $(srcdir)/foo-data" + % (env.topsrcdir), + "$(REPORT_BUILD)", + "$(call py_action,file_generate,%s/generate-foo.py main foo.c $(MDDEPDIR)/foo.c.pp $(MDDEPDIR)/foo.c.stub $(srcdir)/foo-data)" # noqa + % (env.topsrcdir), + "@$(TOUCH) $@", + "", ] self.maxDiff = None @@ -455,25 +524,27 @@ class TestRecursiveMakeBackend(BackendTester): def test_localized_generated_files(self): """Ensure LOCALIZED_GENERATED_FILES is handled properly.""" - env = self._consume('localized-generated-files', RecursiveMakeBackend) + env = self._consume("localized-generated-files", RecursiveMakeBackend) - backend_path = mozpath.join(env.topobjdir, 'backend.mk') - lines = [l.strip() for l in open(backend_path, 'rt').readlines()[2:]] + backend_path = mozpath.join(env.topobjdir, "backend.mk") + lines = [l.strip() for l in open(backend_path, "rt").readlines()[2:]] expected = [ - 'include $(topsrcdir)/config/AB_rCD.mk', - 'MISC_TARGETS += $(MDDEPDIR)/foo.xyz.stub', - 'foo.xyz: $(MDDEPDIR)/foo.xyz.stub ;', - 'EXTRA_MDDEPEND_FILES += $(MDDEPDIR)/foo.xyz.pp', - '$(MDDEPDIR)/foo.xyz.stub: %s/generate-foo.py $(call MERGE_FILE,localized-input) $(srcdir)/non-localized-input $(if $(IS_LANGUAGE_REPACK),FORCE)' % env.topsrcdir, # noqa - '$(REPORT_BUILD)', - '$(call py_action,file_generate,--locale=$(AB_CD) %s/generate-foo.py main foo.xyz $(MDDEPDIR)/foo.xyz.pp $(MDDEPDIR)/foo.xyz.stub $(call MERGE_FILE,localized-input) $(srcdir)/non-localized-input)' % env.topsrcdir, # noqa - '@$(TOUCH) $@', - '', - 'LOCALIZED_FILES_0_FILES += foo.xyz', - 'LOCALIZED_FILES_0_DEST = $(FINAL_TARGET)/', - 'LOCALIZED_FILES_0_TARGET := misc', - 'INSTALL_TARGETS += LOCALIZED_FILES_0', + "include $(topsrcdir)/config/AB_rCD.mk", + "MISC_TARGETS += $(MDDEPDIR)/foo.xyz.stub", + "foo.xyz: $(MDDEPDIR)/foo.xyz.stub ;", + "EXTRA_MDDEPEND_FILES += $(MDDEPDIR)/foo.xyz.pp", + "$(MDDEPDIR)/foo.xyz.stub: %s/generate-foo.py $(call MERGE_FILE,localized-input) $(srcdir)/non-localized-input $(if $(IS_LANGUAGE_REPACK),FORCE)" # noqa + % env.topsrcdir, + "$(REPORT_BUILD)", + "$(call py_action,file_generate,--locale=$(AB_CD) %s/generate-foo.py main foo.xyz $(MDDEPDIR)/foo.xyz.pp $(MDDEPDIR)/foo.xyz.stub $(call MERGE_FILE,localized-input) $(srcdir)/non-localized-input)" # noqa + % env.topsrcdir, + "@$(TOUCH) $@", + "", + "LOCALIZED_FILES_0_FILES += foo.xyz", + "LOCALIZED_FILES_0_DEST = $(FINAL_TARGET)/", + "LOCALIZED_FILES_0_TARGET := misc", + "INSTALL_TARGETS += LOCALIZED_FILES_0", ] self.maxDiff = None @@ -481,29 +552,33 @@ class TestRecursiveMakeBackend(BackendTester): def test_localized_generated_files_force(self): """Ensure LOCALIZED_GENERATED_FILES with .force is handled properly.""" - env = self._consume('localized-generated-files-force', RecursiveMakeBackend) + env = self._consume("localized-generated-files-force", RecursiveMakeBackend) - backend_path = mozpath.join(env.topobjdir, 'backend.mk') - lines = [l.strip() for l in open(backend_path, 'rt').readlines()[2:]] + backend_path = mozpath.join(env.topobjdir, "backend.mk") + lines = [l.strip() for l in open(backend_path, "rt").readlines()[2:]] expected = [ - 'include $(topsrcdir)/config/AB_rCD.mk', - 'MISC_TARGETS += $(MDDEPDIR)/foo.xyz.stub', - 'foo.xyz: $(MDDEPDIR)/foo.xyz.stub ;', - 'EXTRA_MDDEPEND_FILES += $(MDDEPDIR)/foo.xyz.pp', - '$(MDDEPDIR)/foo.xyz.stub: %s/generate-foo.py $(call MERGE_FILE,localized-input) $(srcdir)/non-localized-input $(if $(IS_LANGUAGE_REPACK),FORCE)' % env.topsrcdir, # noqa - '$(REPORT_BUILD)', - '$(call py_action,file_generate,--locale=$(AB_CD) %s/generate-foo.py main foo.xyz $(MDDEPDIR)/foo.xyz.pp $(MDDEPDIR)/foo.xyz.stub $(call MERGE_FILE,localized-input) $(srcdir)/non-localized-input)' % env.topsrcdir, # noqa - '@$(TOUCH) $@', - '', - 'MISC_TARGETS += $(MDDEPDIR)/abc.xyz.stub', - 'abc.xyz: $(MDDEPDIR)/abc.xyz.stub ;', - 'EXTRA_MDDEPEND_FILES += $(MDDEPDIR)/abc.xyz.pp', - '$(MDDEPDIR)/abc.xyz.stub: %s/generate-foo.py $(call MERGE_FILE,localized-input) $(srcdir)/non-localized-input FORCE' % env.topsrcdir, # noqa - '$(REPORT_BUILD)', - '$(call py_action,file_generate,--locale=$(AB_CD) %s/generate-foo.py main abc.xyz $(MDDEPDIR)/abc.xyz.pp $(MDDEPDIR)/abc.xyz.stub $(call MERGE_FILE,localized-input) $(srcdir)/non-localized-input)' % env.topsrcdir, # noqa - '@$(TOUCH) $@', - '', + "include $(topsrcdir)/config/AB_rCD.mk", + "MISC_TARGETS += $(MDDEPDIR)/foo.xyz.stub", + "foo.xyz: $(MDDEPDIR)/foo.xyz.stub ;", + "EXTRA_MDDEPEND_FILES += $(MDDEPDIR)/foo.xyz.pp", + "$(MDDEPDIR)/foo.xyz.stub: %s/generate-foo.py $(call MERGE_FILE,localized-input) $(srcdir)/non-localized-input $(if $(IS_LANGUAGE_REPACK),FORCE)" # noqa + % env.topsrcdir, + "$(REPORT_BUILD)", + "$(call py_action,file_generate,--locale=$(AB_CD) %s/generate-foo.py main foo.xyz $(MDDEPDIR)/foo.xyz.pp $(MDDEPDIR)/foo.xyz.stub $(call MERGE_FILE,localized-input) $(srcdir)/non-localized-input)" # noqa + % env.topsrcdir, + "@$(TOUCH) $@", + "", + "MISC_TARGETS += $(MDDEPDIR)/abc.xyz.stub", + "abc.xyz: $(MDDEPDIR)/abc.xyz.stub ;", + "EXTRA_MDDEPEND_FILES += $(MDDEPDIR)/abc.xyz.pp", + "$(MDDEPDIR)/abc.xyz.stub: %s/generate-foo.py $(call MERGE_FILE,localized-input) $(srcdir)/non-localized-input FORCE" # noqa + % env.topsrcdir, + "$(REPORT_BUILD)", + "$(call py_action,file_generate,--locale=$(AB_CD) %s/generate-foo.py main abc.xyz $(MDDEPDIR)/abc.xyz.pp $(MDDEPDIR)/abc.xyz.stub $(call MERGE_FILE,localized-input) $(srcdir)/non-localized-input)" # noqa + % env.topsrcdir, + "@$(TOUCH) $@", + "", ] self.maxDiff = None @@ -512,35 +587,41 @@ class TestRecursiveMakeBackend(BackendTester): def test_localized_generated_files_AB_CD(self): """Ensure LOCALIZED_GENERATED_FILES is handled properly when {AB_CD} and {AB_rCD} are used.""" - env = self._consume('localized-generated-files-AB_CD', RecursiveMakeBackend) + env = self._consume("localized-generated-files-AB_CD", RecursiveMakeBackend) - backend_path = mozpath.join(env.topobjdir, 'backend.mk') - lines = [l.strip() for l in open(backend_path, 'rt').readlines()[2:]] + backend_path = mozpath.join(env.topobjdir, "backend.mk") + lines = [l.strip() for l in open(backend_path, "rt").readlines()[2:]] expected = [ - 'include $(topsrcdir)/config/AB_rCD.mk', - 'MISC_TARGETS += $(MDDEPDIR)/foo$(AB_CD).xyz.stub', - 'foo$(AB_CD).xyz: $(MDDEPDIR)/foo$(AB_CD).xyz.stub ;', - 'EXTRA_MDDEPEND_FILES += $(MDDEPDIR)/foo$(AB_CD).xyz.pp', - '$(MDDEPDIR)/foo$(AB_CD).xyz.stub: %s/generate-foo.py $(call MERGE_FILE,localized-input) $(srcdir)/non-localized-input $(if $(IS_LANGUAGE_REPACK),FORCE)' % env.topsrcdir, # noqa - '$(REPORT_BUILD)', - '$(call py_action,file_generate,--locale=$(AB_CD) %s/generate-foo.py main foo$(AB_CD).xyz $(MDDEPDIR)/foo$(AB_CD).xyz.pp $(MDDEPDIR)/foo$(AB_CD).xyz.stub $(call MERGE_FILE,localized-input) $(srcdir)/non-localized-input)' % env.topsrcdir, # noqa - '@$(TOUCH) $@', - '', - 'bar$(AB_rCD).xyz: $(MDDEPDIR)/bar$(AB_rCD).xyz.stub ;', - 'EXTRA_MDDEPEND_FILES += $(MDDEPDIR)/bar$(AB_rCD).xyz.pp', - '$(MDDEPDIR)/bar$(AB_rCD).xyz.stub: %s/generate-foo.py $(call MERGE_RELATIVE_FILE,localized-input,inner/locales) $(srcdir)/non-localized-input $(if $(IS_LANGUAGE_REPACK),FORCE)' % env.topsrcdir, # noqa - '$(REPORT_BUILD)', - '$(call py_action,file_generate,--locale=$(AB_CD) %s/generate-foo.py main bar$(AB_rCD).xyz $(MDDEPDIR)/bar$(AB_rCD).xyz.pp $(MDDEPDIR)/bar$(AB_rCD).xyz.stub $(call MERGE_RELATIVE_FILE,localized-input,inner/locales) $(srcdir)/non-localized-input)' % env.topsrcdir, # noqa - '@$(TOUCH) $@', - '', - 'zot$(AB_rCD).xyz: $(MDDEPDIR)/zot$(AB_rCD).xyz.stub ;', - 'EXTRA_MDDEPEND_FILES += $(MDDEPDIR)/zot$(AB_rCD).xyz.pp', - '$(MDDEPDIR)/zot$(AB_rCD).xyz.stub: %s/generate-foo.py $(call MERGE_RELATIVE_FILE,localized-input,locales) $(srcdir)/non-localized-input $(if $(IS_LANGUAGE_REPACK),FORCE)' % env.topsrcdir, # noqa - '$(REPORT_BUILD)', - '$(call py_action,file_generate,--locale=$(AB_CD) %s/generate-foo.py main zot$(AB_rCD).xyz $(MDDEPDIR)/zot$(AB_rCD).xyz.pp $(MDDEPDIR)/zot$(AB_rCD).xyz.stub $(call MERGE_RELATIVE_FILE,localized-input,locales) $(srcdir)/non-localized-input)' % env.topsrcdir, # noqa - '@$(TOUCH) $@', - '', + "include $(topsrcdir)/config/AB_rCD.mk", + "MISC_TARGETS += $(MDDEPDIR)/foo$(AB_CD).xyz.stub", + "foo$(AB_CD).xyz: $(MDDEPDIR)/foo$(AB_CD).xyz.stub ;", + "EXTRA_MDDEPEND_FILES += $(MDDEPDIR)/foo$(AB_CD).xyz.pp", + "$(MDDEPDIR)/foo$(AB_CD).xyz.stub: %s/generate-foo.py $(call MERGE_FILE,localized-input) $(srcdir)/non-localized-input $(if $(IS_LANGUAGE_REPACK),FORCE)" # noqa + % env.topsrcdir, + "$(REPORT_BUILD)", + "$(call py_action,file_generate,--locale=$(AB_CD) %s/generate-foo.py main foo$(AB_CD).xyz $(MDDEPDIR)/foo$(AB_CD).xyz.pp $(MDDEPDIR)/foo$(AB_CD).xyz.stub $(call MERGE_FILE,localized-input) $(srcdir)/non-localized-input)" # noqa + % env.topsrcdir, + "@$(TOUCH) $@", + "", + "bar$(AB_rCD).xyz: $(MDDEPDIR)/bar$(AB_rCD).xyz.stub ;", + "EXTRA_MDDEPEND_FILES += $(MDDEPDIR)/bar$(AB_rCD).xyz.pp", + "$(MDDEPDIR)/bar$(AB_rCD).xyz.stub: %s/generate-foo.py $(call MERGE_RELATIVE_FILE,localized-input,inner/locales) $(srcdir)/non-localized-input $(if $(IS_LANGUAGE_REPACK),FORCE)" # noqa + % env.topsrcdir, + "$(REPORT_BUILD)", + "$(call py_action,file_generate,--locale=$(AB_CD) %s/generate-foo.py main bar$(AB_rCD).xyz $(MDDEPDIR)/bar$(AB_rCD).xyz.pp $(MDDEPDIR)/bar$(AB_rCD).xyz.stub $(call MERGE_RELATIVE_FILE,localized-input,inner/locales) $(srcdir)/non-localized-input)" # noqa + % env.topsrcdir, + "@$(TOUCH) $@", + "", + "zot$(AB_rCD).xyz: $(MDDEPDIR)/zot$(AB_rCD).xyz.stub ;", + "EXTRA_MDDEPEND_FILES += $(MDDEPDIR)/zot$(AB_rCD).xyz.pp", + "$(MDDEPDIR)/zot$(AB_rCD).xyz.stub: %s/generate-foo.py $(call MERGE_RELATIVE_FILE,localized-input,locales) $(srcdir)/non-localized-input $(if $(IS_LANGUAGE_REPACK),FORCE)" # noqa + % env.topsrcdir, + "$(REPORT_BUILD)", + "$(call py_action,file_generate,--locale=$(AB_CD) %s/generate-foo.py main zot$(AB_rCD).xyz $(MDDEPDIR)/zot$(AB_rCD).xyz.pp $(MDDEPDIR)/zot$(AB_rCD).xyz.stub $(call MERGE_RELATIVE_FILE,localized-input,locales) $(srcdir)/non-localized-input)" # noqa + % env.topsrcdir, + "@$(TOUCH) $@", + "", ] self.maxDiff = None @@ -549,112 +630,123 @@ class TestRecursiveMakeBackend(BackendTester): def test_exports_generated(self): """Ensure EXPORTS that are listed in GENERATED_FILES are handled properly.""" - env = self._consume('exports-generated', RecursiveMakeBackend) + env = self._consume("exports-generated", RecursiveMakeBackend) # EXPORTS files should appear in the dist_include install manifest. - m = InstallManifest(path=mozpath.join(env.topobjdir, - '_build_manifests', 'install', 'dist_include')) + m = InstallManifest( + path=mozpath.join( + env.topobjdir, "_build_manifests", "install", "dist_include" + ) + ) self.assertEqual(len(m), 8) - self.assertIn('foo.h', m) - self.assertIn('mozilla/mozilla1.h', m) - self.assertIn('mozilla/dom/dom1.h', m) - self.assertIn('gfx/gfx.h', m) - self.assertIn('bar.h', m) - self.assertIn('mozilla/mozilla2.h', m) - self.assertIn('mozilla/dom/dom2.h', m) - self.assertIn('mozilla/dom/dom3.h', m) + self.assertIn("foo.h", m) + self.assertIn("mozilla/mozilla1.h", m) + self.assertIn("mozilla/dom/dom1.h", m) + self.assertIn("gfx/gfx.h", m) + self.assertIn("bar.h", m) + self.assertIn("mozilla/mozilla2.h", m) + self.assertIn("mozilla/dom/dom2.h", m) + self.assertIn("mozilla/dom/dom3.h", m) # EXPORTS files that are also GENERATED_FILES should be handled as # INSTALL_TARGETS. - backend_path = mozpath.join(env.topobjdir, 'backend.mk') - lines = [l.strip() for l in open(backend_path, 'rt').readlines()[2:]] + backend_path = mozpath.join(env.topobjdir, "backend.mk") + lines = [l.strip() for l in open(backend_path, "rt").readlines()[2:]] expected = [ - 'include $(topsrcdir)/config/AB_rCD.mk', - 'dist_include_FILES += bar.h', - 'dist_include_DEST := $(DEPTH)/dist/include/', - 'dist_include_TARGET := export', - 'INSTALL_TARGETS += dist_include', - 'dist_include_mozilla_FILES += mozilla2.h', - 'dist_include_mozilla_DEST := $(DEPTH)/dist/include/mozilla', - 'dist_include_mozilla_TARGET := export', - 'INSTALL_TARGETS += dist_include_mozilla', - 'dist_include_mozilla_dom_FILES += dom2.h', - 'dist_include_mozilla_dom_FILES += dom3.h', - 'dist_include_mozilla_dom_DEST := $(DEPTH)/dist/include/mozilla/dom', - 'dist_include_mozilla_dom_TARGET := export', - 'INSTALL_TARGETS += dist_include_mozilla_dom', + "include $(topsrcdir)/config/AB_rCD.mk", + "dist_include_FILES += bar.h", + "dist_include_DEST := $(DEPTH)/dist/include/", + "dist_include_TARGET := export", + "INSTALL_TARGETS += dist_include", + "dist_include_mozilla_FILES += mozilla2.h", + "dist_include_mozilla_DEST := $(DEPTH)/dist/include/mozilla", + "dist_include_mozilla_TARGET := export", + "INSTALL_TARGETS += dist_include_mozilla", + "dist_include_mozilla_dom_FILES += dom2.h", + "dist_include_mozilla_dom_FILES += dom3.h", + "dist_include_mozilla_dom_DEST := $(DEPTH)/dist/include/mozilla/dom", + "dist_include_mozilla_dom_TARGET := export", + "INSTALL_TARGETS += dist_include_mozilla_dom", ] self.maxDiff = None self.assertEqual(lines, expected) def test_resources(self): """Ensure RESOURCE_FILES is handled properly.""" - env = self._consume('resources', RecursiveMakeBackend) + env = self._consume("resources", RecursiveMakeBackend) # RESOURCE_FILES should appear in the dist_bin install manifest. - m = InstallManifest(path=os.path.join(env.topobjdir, - '_build_manifests', 'install', 'dist_bin')) + m = InstallManifest( + path=os.path.join(env.topobjdir, "_build_manifests", "install", "dist_bin") + ) self.assertEqual(len(m), 10) - self.assertIn('res/foo.res', m) - self.assertIn('res/fonts/font1.ttf', m) - self.assertIn('res/fonts/desktop/desktop2.ttf', m) + self.assertIn("res/foo.res", m) + self.assertIn("res/fonts/font1.ttf", m) + self.assertIn("res/fonts/desktop/desktop2.ttf", m) - self.assertIn('res/bar.res.in', m) - self.assertIn('res/tests/test.manifest', m) - self.assertIn('res/tests/extra.manifest', m) + self.assertIn("res/bar.res.in", m) + self.assertIn("res/tests/test.manifest", m) + self.assertIn("res/tests/extra.manifest", m) def test_test_manifests_files_written(self): """Ensure test manifests get turned into files.""" - env = self._consume('test-manifests-written', RecursiveMakeBackend) + env = self._consume("test-manifests-written", RecursiveMakeBackend) - tests_dir = mozpath.join(env.topobjdir, '_tests') - m_master = mozpath.join(tests_dir, 'testing', 'mochitest', 'tests', 'mochitest.ini') - x_master = mozpath.join(tests_dir, 'xpcshell', 'xpcshell.ini') + tests_dir = mozpath.join(env.topobjdir, "_tests") + m_master = mozpath.join( + tests_dir, "testing", "mochitest", "tests", "mochitest.ini" + ) + x_master = mozpath.join(tests_dir, "xpcshell", "xpcshell.ini") self.assertTrue(os.path.exists(m_master)) self.assertTrue(os.path.exists(x_master)) - lines = [l.strip() for l in open(x_master, 'rt').readlines()] - self.assertEqual(lines, [ - '# THIS FILE WAS AUTOMATICALLY GENERATED. DO NOT MODIFY BY HAND.', - '', - '[include:dir1/xpcshell.ini]', - '[include:xpcshell.ini]', - ]) + lines = [l.strip() for l in open(x_master, "rt").readlines()] + self.assertEqual( + lines, + [ + "# THIS FILE WAS AUTOMATICALLY GENERATED. DO NOT MODIFY BY HAND.", + "", + "[include:dir1/xpcshell.ini]", + "[include:xpcshell.ini]", + ], + ) def test_test_manifest_pattern_matches_recorded(self): """Pattern matches in test manifests' support-files should be recorded.""" - env = self._consume('test-manifests-written', RecursiveMakeBackend) - m = InstallManifest(path=mozpath.join(env.topobjdir, - '_build_manifests', 'install', '_test_files')) + env = self._consume("test-manifests-written", RecursiveMakeBackend) + m = InstallManifest( + path=mozpath.join( + env.topobjdir, "_build_manifests", "install", "_test_files" + ) + ) # This is not the most robust test in the world, but it gets the job # done. - entries = [e for e in m._dests.keys() if '**' in e] + entries = [e for e in m._dests.keys() if "**" in e] self.assertEqual(len(entries), 1) - self.assertIn('support/**', entries[0]) + self.assertIn("support/**", entries[0]) def test_test_manifest_deffered_installs_written(self): """Shared support files are written to their own data file by the backend.""" - env = self._consume('test-manifest-shared-support', RecursiveMakeBackend) + env = self._consume("test-manifest-shared-support", RecursiveMakeBackend) # First, read the generated for ini manifest contents. - test_files_manifest = mozpath.join(env.topobjdir, - '_build_manifests', - 'install', - '_test_files') + test_files_manifest = mozpath.join( + env.topobjdir, "_build_manifests", "install", "_test_files" + ) m = InstallManifest(path=test_files_manifest) # Then, synthesize one from the test-installs.pkl file. This should # allow us to re-create a subset of the above. - env = self._consume('test-manifest-shared-support', TestManifestBackend) - test_installs_path = mozpath.join(env.topobjdir, 'test-installs.pkl') + env = self._consume("test-manifest-shared-support", TestManifestBackend) + test_installs_path = mozpath.join(env.topobjdir, "test-installs.pkl") - with open(test_installs_path, 'rb') as fh: + with open(test_installs_path, "rb") as fh: test_installs = pickle.load(fh) - self.assertEqual(set(test_installs.keys()), - set(['child/test_sub.js', - 'child/data/**', - 'child/another-file.sjs'])) + self.assertEqual( + set(test_installs.keys()), + set(["child/test_sub.js", "child/data/**", "child/another-file.sjs"]), + ) for key in test_installs.keys(): self.assertIn(key, test_installs) @@ -673,67 +765,68 @@ class TestRecursiveMakeBackend(BackendTester): def test_xpidl_generation(self): """Ensure xpidl files and directories are written out.""" - env = self._consume('xpidl', RecursiveMakeBackend) + env = self._consume("xpidl", RecursiveMakeBackend) # Install manifests should contain entries. - install_dir = mozpath.join(env.topobjdir, '_build_manifests', - 'install') - self.assertTrue(os.path.isfile(mozpath.join(install_dir, 'xpidl'))) + install_dir = mozpath.join(env.topobjdir, "_build_manifests", "install") + self.assertTrue(os.path.isfile(mozpath.join(install_dir, "xpidl"))) - m = InstallManifest(path=mozpath.join(install_dir, 'xpidl')) - self.assertIn('.deps/my_module.pp', m) + m = InstallManifest(path=mozpath.join(install_dir, "xpidl")) + self.assertIn(".deps/my_module.pp", m) - m = InstallManifest(path=mozpath.join(install_dir, 'xpidl')) - self.assertIn('my_module.xpt', m) + m = InstallManifest(path=mozpath.join(install_dir, "xpidl")) + self.assertIn("my_module.xpt", m) - m = InstallManifest(path=mozpath.join(install_dir, 'dist_include')) - self.assertIn('foo.h', m) + m = InstallManifest(path=mozpath.join(install_dir, "dist_include")) + self.assertIn("foo.h", m) - p = mozpath.join(env.topobjdir, 'config/makefiles/xpidl') + p = mozpath.join(env.topobjdir, "config/makefiles/xpidl") self.assertTrue(os.path.isdir(p)) - self.assertTrue(os.path.isfile(mozpath.join(p, 'Makefile'))) + self.assertTrue(os.path.isfile(mozpath.join(p, "Makefile"))) def test_test_support_files_tracked(self): - env = self._consume('test-support-binaries-tracked', RecursiveMakeBackend) - m = InstallManifest(path=mozpath.join(env.topobjdir, - '_build_manifests', 'install', '_tests')) + env = self._consume("test-support-binaries-tracked", RecursiveMakeBackend) + m = InstallManifest( + path=mozpath.join(env.topobjdir, "_build_manifests", "install", "_tests") + ) self.assertEqual(len(m), 4) - self.assertIn('xpcshell/tests/mozbuildtest/test-library.dll', m) - self.assertIn('xpcshell/tests/mozbuildtest/test-one.exe', m) - self.assertIn('xpcshell/tests/mozbuildtest/test-two.exe', m) - self.assertIn('xpcshell/tests/mozbuildtest/host-test-library.dll', m) + self.assertIn("xpcshell/tests/mozbuildtest/test-library.dll", m) + self.assertIn("xpcshell/tests/mozbuildtest/test-one.exe", m) + self.assertIn("xpcshell/tests/mozbuildtest/test-two.exe", m) + self.assertIn("xpcshell/tests/mozbuildtest/host-test-library.dll", m) def test_old_install_manifest_deleted(self): # Simulate an install manifest from a previous backend version. Ensure # it is deleted. - env = self._get_environment('stub0') - purge_dir = mozpath.join(env.topobjdir, '_build_manifests', 'install') - manifest_path = mozpath.join(purge_dir, 'old_manifest') + env = self._get_environment("stub0") + purge_dir = mozpath.join(env.topobjdir, "_build_manifests", "install") + manifest_path = mozpath.join(purge_dir, "old_manifest") os.makedirs(purge_dir) m = InstallManifest() m.write(path=manifest_path) - with open(mozpath.join( - env.topobjdir, 'backend.RecursiveMakeBackend'), 'w') as f: - f.write('%s\n' % manifest_path) + with open( + mozpath.join(env.topobjdir, "backend.RecursiveMakeBackend"), "w" + ) as f: + f.write("%s\n" % manifest_path) self.assertTrue(os.path.exists(manifest_path)) - self._consume('stub0', RecursiveMakeBackend, env) + self._consume("stub0", RecursiveMakeBackend, env) self.assertFalse(os.path.exists(manifest_path)) def test_install_manifests_written(self): - env, objs = self._emit('stub0') + env, objs = self._emit("stub0") backend = RecursiveMakeBackend(env) m = InstallManifest() - backend._install_manifests['testing'] = m - m.add_link(__file__, 'self') + backend._install_manifests["testing"] = m + m.add_link(__file__, "self") backend.consume(objs) - man_dir = mozpath.join(env.topobjdir, '_build_manifests', 'install') + man_dir = mozpath.join(env.topobjdir, "_build_manifests", "install") self.assertTrue(os.path.isdir(man_dir)) - expected = ['testing'] + expected = ["testing"] for e in expected: full = mozpath.join(man_dir, e) self.assertTrue(os.path.exists(full)) @@ -744,442 +837,479 @@ class TestRecursiveMakeBackend(BackendTester): def test_ipdl_sources(self): """Test that PREPROCESSED_IPDL_SOURCES and IPDL_SOURCES are written to ipdlsrcs.mk correctly.""" - env = self._get_environment('ipdl_sources') + env = self._get_environment("ipdl_sources") # Make substs writable so we can set the value of IPDL_ROOT to reflect # the correct objdir. env.substs = dict(env.substs) - env.substs['IPDL_ROOT'] = env.topobjdir + env.substs["IPDL_ROOT"] = env.topobjdir - self._consume('ipdl_sources', RecursiveMakeBackend, env) + self._consume("ipdl_sources", RecursiveMakeBackend, env) - manifest_path = mozpath.join(env.topobjdir, 'ipdlsrcs.mk') - lines = [l.strip() for l in open(manifest_path, 'rt').readlines()] + manifest_path = mozpath.join(env.topobjdir, "ipdlsrcs.mk") + lines = [l.strip() for l in open(manifest_path, "rt").readlines()] # Handle Windows paths correctly - topsrcdir = env.topsrcdir.replace(os.sep, '/') + topsrcdir = env.topsrcdir.replace(os.sep, "/") expected = [ - "ALL_IPDLSRCS := bar1.ipdl foo1.ipdl %s/bar/bar.ipdl %s/bar/bar2.ipdlh %s/foo/foo.ipdl %s/foo/foo2.ipdlh" % tuple([topsrcdir] * 4), # noqa + "ALL_IPDLSRCS := bar1.ipdl foo1.ipdl %s/bar/bar.ipdl %s/bar/bar2.ipdlh %s/foo/foo.ipdl %s/foo/foo2.ipdlh" # noqa + % tuple([topsrcdir] * 4), "CPPSRCS := UnifiedProtocols0.cpp", "IPDLDIRS := %s %s/bar %s/foo" % (env.topobjdir, topsrcdir, topsrcdir), ] - found = [str for str in lines if str.startswith(('ALL_IPDLSRCS', - 'CPPSRCS', - 'IPDLDIRS'))] + found = [ + str + for str in lines + if str.startswith(("ALL_IPDLSRCS", "CPPSRCS", "IPDLDIRS")) + ] self.assertEqual(found, expected) def test_defines(self): """Test that DEFINES are written to backend.mk correctly.""" - env = self._consume('defines', RecursiveMakeBackend) + env = self._consume("defines", RecursiveMakeBackend) - backend_path = mozpath.join(env.topobjdir, 'backend.mk') - lines = [l.strip() for l in open(backend_path, 'rt').readlines()[2:]] + backend_path = mozpath.join(env.topobjdir, "backend.mk") + lines = [l.strip() for l in open(backend_path, "rt").readlines()[2:]] - var = 'DEFINES' + var = "DEFINES" defines = [val for val in lines if val.startswith(var)] - expected = ['DEFINES += -DFOO \'-DBAZ="ab\'\\\'\'cd"\' -UQUX -DBAR=7 -DVALUE=xyz'] + expected = ["DEFINES += -DFOO '-DBAZ=\"ab'\\''cd\"' -UQUX -DBAR=7 -DVALUE=xyz"] self.assertEqual(defines, expected) def test_local_includes(self): """Test that LOCAL_INCLUDES are written to backend.mk correctly.""" - env = self._consume('local_includes', RecursiveMakeBackend) + env = self._consume("local_includes", RecursiveMakeBackend) - backend_path = mozpath.join(env.topobjdir, 'backend.mk') - lines = [l.strip() for l in open(backend_path, 'rt').readlines()[2:]] + backend_path = mozpath.join(env.topobjdir, "backend.mk") + lines = [l.strip() for l in open(backend_path, "rt").readlines()[2:]] expected = [ - 'LOCAL_INCLUDES += -I$(srcdir)/bar/baz', - 'LOCAL_INCLUDES += -I$(srcdir)/foo', + "LOCAL_INCLUDES += -I$(srcdir)/bar/baz", + "LOCAL_INCLUDES += -I$(srcdir)/foo", ] - found = [str for str in lines if str.startswith('LOCAL_INCLUDES')] + found = [str for str in lines if str.startswith("LOCAL_INCLUDES")] self.assertEqual(found, expected) def test_generated_includes(self): """Test that GENERATED_INCLUDES are written to backend.mk correctly.""" - env = self._consume('generated_includes', RecursiveMakeBackend) + env = self._consume("generated_includes", RecursiveMakeBackend) - backend_path = mozpath.join(env.topobjdir, 'backend.mk') - lines = [l.strip() for l in open(backend_path, 'rt').readlines()[2:]] + backend_path = mozpath.join(env.topobjdir, "backend.mk") + lines = [l.strip() for l in open(backend_path, "rt").readlines()[2:]] expected = [ - 'LOCAL_INCLUDES += -I$(CURDIR)/bar/baz', - 'LOCAL_INCLUDES += -I$(CURDIR)/foo', + "LOCAL_INCLUDES += -I$(CURDIR)/bar/baz", + "LOCAL_INCLUDES += -I$(CURDIR)/foo", ] - found = [str for str in lines if str.startswith('LOCAL_INCLUDES')] + found = [str for str in lines if str.startswith("LOCAL_INCLUDES")] self.assertEqual(found, expected) def test_rust_library(self): """Test that a Rust library is written to backend.mk correctly.""" - env = self._consume('rust-library', RecursiveMakeBackend) + env = self._consume("rust-library", RecursiveMakeBackend) - backend_path = mozpath.join(env.topobjdir, 'backend.mk') - lines = [l.strip() for l in open(backend_path, 'rt').readlines()[2:] - # Strip out computed flags, they're a PITA to test. - if not l.startswith('COMPUTED_')] + backend_path = mozpath.join(env.topobjdir, "backend.mk") + lines = [ + l.strip() + for l in open(backend_path, "rt").readlines()[2:] + # Strip out computed flags, they're a PITA to test. + if not l.startswith("COMPUTED_") + ] expected = [ - 'RUST_LIBRARY_FILE := %s/x86_64-unknown-linux-gnu/release/libtest_library.a' % env.topobjdir, # noqa - 'CARGO_FILE := $(srcdir)/Cargo.toml', - 'CARGO_TARGET_DIR := %s' % env.topobjdir, + "RUST_LIBRARY_FILE := %s/x86_64-unknown-linux-gnu/release/libtest_library.a" + % env.topobjdir, # noqa + "CARGO_FILE := $(srcdir)/Cargo.toml", + "CARGO_TARGET_DIR := %s" % env.topobjdir, ] self.assertEqual(lines, expected) def test_host_rust_library(self): """Test that a Rust library is written to backend.mk correctly.""" - env = self._consume('host-rust-library', RecursiveMakeBackend) + env = self._consume("host-rust-library", RecursiveMakeBackend) - backend_path = mozpath.join(env.topobjdir, 'backend.mk') - lines = [l.strip() for l in open(backend_path, 'rt').readlines()[2:] - # Strip out computed flags, they're a PITA to test. - if not l.startswith('COMPUTED_')] + backend_path = mozpath.join(env.topobjdir, "backend.mk") + lines = [ + l.strip() + for l in open(backend_path, "rt").readlines()[2:] + # Strip out computed flags, they're a PITA to test. + if not l.startswith("COMPUTED_") + ] expected = [ - 'HOST_RUST_LIBRARY_FILE := %s/x86_64-unknown-linux-gnu/release/libhostrusttool.a' % env.topobjdir, # noqa - 'CARGO_FILE := $(srcdir)/Cargo.toml', - 'CARGO_TARGET_DIR := %s' % env.topobjdir, + "HOST_RUST_LIBRARY_FILE := %s/x86_64-unknown-linux-gnu/release/libhostrusttool.a" + % env.topobjdir, # noqa + "CARGO_FILE := $(srcdir)/Cargo.toml", + "CARGO_TARGET_DIR := %s" % env.topobjdir, ] self.assertEqual(lines, expected) def test_host_rust_library_with_features(self): """Test that a host Rust library with features is written to backend.mk correctly.""" - env = self._consume('host-rust-library-features', RecursiveMakeBackend) + env = self._consume("host-rust-library-features", RecursiveMakeBackend) - backend_path = mozpath.join(env.topobjdir, 'backend.mk') - lines = [l.strip() for l in open(backend_path, 'rt').readlines()[2:] - # Strip out computed flags, they're a PITA to test. - if not l.startswith('COMPUTED_')] + backend_path = mozpath.join(env.topobjdir, "backend.mk") + lines = [ + l.strip() + for l in open(backend_path, "rt").readlines()[2:] + # Strip out computed flags, they're a PITA to test. + if not l.startswith("COMPUTED_") + ] expected = [ - 'HOST_RUST_LIBRARY_FILE := %s/x86_64-unknown-linux-gnu/release/libhostrusttool.a' % env.topobjdir, # noqa - 'CARGO_FILE := $(srcdir)/Cargo.toml', - 'CARGO_TARGET_DIR := %s' % env.topobjdir, - 'HOST_RUST_LIBRARY_FEATURES := musthave cantlivewithout', + "HOST_RUST_LIBRARY_FILE := %s/x86_64-unknown-linux-gnu/release/libhostrusttool.a" + % env.topobjdir, # noqa + "CARGO_FILE := $(srcdir)/Cargo.toml", + "CARGO_TARGET_DIR := %s" % env.topobjdir, + "HOST_RUST_LIBRARY_FEATURES := musthave cantlivewithout", ] self.assertEqual(lines, expected) def test_rust_library_with_features(self): """Test that a Rust library with features is written to backend.mk correctly.""" - env = self._consume('rust-library-features', RecursiveMakeBackend) + env = self._consume("rust-library-features", RecursiveMakeBackend) - backend_path = mozpath.join(env.topobjdir, 'backend.mk') - lines = [l.strip() for l in open(backend_path, 'rt').readlines()[2:] - # Strip out computed flags, they're a PITA to test. - if not l.startswith('COMPUTED_')] + backend_path = mozpath.join(env.topobjdir, "backend.mk") + lines = [ + l.strip() + for l in open(backend_path, "rt").readlines()[2:] + # Strip out computed flags, they're a PITA to test. + if not l.startswith("COMPUTED_") + ] expected = [ - 'RUST_LIBRARY_FILE := %s/x86_64-unknown-linux-gnu/release/libfeature_library.a' % env.topobjdir, # noqa - 'CARGO_FILE := $(srcdir)/Cargo.toml', - 'CARGO_TARGET_DIR := %s' % env.topobjdir, - 'RUST_LIBRARY_FEATURES := musthave cantlivewithout', + "RUST_LIBRARY_FILE := %s/x86_64-unknown-linux-gnu/release/libfeature_library.a" + % env.topobjdir, # noqa + "CARGO_FILE := $(srcdir)/Cargo.toml", + "CARGO_TARGET_DIR := %s" % env.topobjdir, + "RUST_LIBRARY_FEATURES := musthave cantlivewithout", ] self.assertEqual(lines, expected) def test_rust_programs(self): """Test that {HOST_,}RUST_PROGRAMS are written to backend.mk correctly.""" - env = self._consume('rust-programs', RecursiveMakeBackend) + env = self._consume("rust-programs", RecursiveMakeBackend) - backend_path = mozpath.join(env.topobjdir, 'code/backend.mk') - lines = [l.strip() for l in open(backend_path, 'rt').readlines()[2:] - # Strip out computed flags, they're a PITA to test. - if not l.startswith('COMPUTED_')] + backend_path = mozpath.join(env.topobjdir, "code/backend.mk") + lines = [ + l.strip() + for l in open(backend_path, "rt").readlines()[2:] + # Strip out computed flags, they're a PITA to test. + if not l.startswith("COMPUTED_") + ] expected = [ - 'CARGO_FILE := %s/code/Cargo.toml' % env.topsrcdir, - 'CARGO_TARGET_DIR := .', - 'RUST_PROGRAMS += i686-pc-windows-msvc/release/target.exe', - 'RUST_CARGO_PROGRAMS += target', - 'HOST_RUST_PROGRAMS += i686-pc-windows-msvc/release/host.exe', - 'HOST_RUST_CARGO_PROGRAMS += host', + "CARGO_FILE := %s/code/Cargo.toml" % env.topsrcdir, + "CARGO_TARGET_DIR := .", + "RUST_PROGRAMS += i686-pc-windows-msvc/release/target.exe", + "RUST_CARGO_PROGRAMS += target", + "HOST_RUST_PROGRAMS += i686-pc-windows-msvc/release/host.exe", + "HOST_RUST_CARGO_PROGRAMS += host", ] self.assertEqual(lines, expected) - root_deps_path = mozpath.join(env.topobjdir, 'root-deps.mk') - lines = [l.strip() for l in open(root_deps_path, 'rt').readlines()] + root_deps_path = mozpath.join(env.topobjdir, "root-deps.mk") + lines = [l.strip() for l in open(root_deps_path, "rt").readlines()] - self.assertTrue(any(l == 'recurse_compile: code/host code/target' for l in lines)) + self.assertTrue( + any(l == "recurse_compile: code/host code/target" for l in lines) + ) def test_final_target(self): """Test that FINAL_TARGET is written to backend.mk correctly.""" - env = self._consume('final_target', RecursiveMakeBackend) + env = self._consume("final_target", RecursiveMakeBackend) final_target_rule = "FINAL_TARGET = $(if $(XPI_NAME),$(DIST)/xpi-stage/$(XPI_NAME),$(DIST)/bin)$(DIST_SUBDIR:%=/%)" # noqa expected = dict() expected[env.topobjdir] = [] - expected[mozpath.join(env.topobjdir, 'both')] = [ - 'XPI_NAME = mycrazyxpi', - 'DIST_SUBDIR = asubdir', - final_target_rule + expected[mozpath.join(env.topobjdir, "both")] = [ + "XPI_NAME = mycrazyxpi", + "DIST_SUBDIR = asubdir", + final_target_rule, ] - expected[mozpath.join(env.topobjdir, 'dist-subdir')] = [ - 'DIST_SUBDIR = asubdir', - final_target_rule + expected[mozpath.join(env.topobjdir, "dist-subdir")] = [ + "DIST_SUBDIR = asubdir", + final_target_rule, ] - expected[mozpath.join(env.topobjdir, 'xpi-name')] = [ - 'XPI_NAME = mycrazyxpi', - final_target_rule + expected[mozpath.join(env.topobjdir, "xpi-name")] = [ + "XPI_NAME = mycrazyxpi", + final_target_rule, ] - expected[mozpath.join(env.topobjdir, 'final-target')] = [ - 'FINAL_TARGET = $(DEPTH)/random-final-target' + expected[mozpath.join(env.topobjdir, "final-target")] = [ + "FINAL_TARGET = $(DEPTH)/random-final-target" ] for key, expected_rules in six.iteritems(expected): - backend_path = mozpath.join(key, 'backend.mk') - lines = [l.strip() for l in open(backend_path, 'rt').readlines()[2:]] - found = [str for str in lines if - str.startswith('FINAL_TARGET') or str.startswith('XPI_NAME') or - str.startswith('DIST_SUBDIR')] + backend_path = mozpath.join(key, "backend.mk") + lines = [l.strip() for l in open(backend_path, "rt").readlines()[2:]] + found = [ + str + for str in lines + if str.startswith("FINAL_TARGET") + or str.startswith("XPI_NAME") + or str.startswith("DIST_SUBDIR") + ] self.assertEqual(found, expected_rules) def test_final_target_pp_files(self): """Test that FINAL_TARGET_PP_FILES is written to backend.mk correctly.""" - env = self._consume('dist-files', RecursiveMakeBackend) + env = self._consume("dist-files", RecursiveMakeBackend) - backend_path = mozpath.join(env.topobjdir, 'backend.mk') - lines = [l.strip() for l in open(backend_path, 'rt').readlines()[2:]] + backend_path = mozpath.join(env.topobjdir, "backend.mk") + lines = [l.strip() for l in open(backend_path, "rt").readlines()[2:]] expected = [ - 'DIST_FILES_0 += $(srcdir)/install.rdf', - 'DIST_FILES_0 += $(srcdir)/main.js', - 'DIST_FILES_0_PATH := $(DEPTH)/dist/bin/', - 'DIST_FILES_0_TARGET := misc', - 'PP_TARGETS += DIST_FILES_0', + "DIST_FILES_0 += $(srcdir)/install.rdf", + "DIST_FILES_0 += $(srcdir)/main.js", + "DIST_FILES_0_PATH := $(DEPTH)/dist/bin/", + "DIST_FILES_0_TARGET := misc", + "PP_TARGETS += DIST_FILES_0", ] - found = [str for str in lines if 'DIST_FILES' in str] + found = [str for str in lines if "DIST_FILES" in str] self.assertEqual(found, expected) def test_localized_files(self): """Test that LOCALIZED_FILES is written to backend.mk correctly.""" - env = self._consume('localized-files', RecursiveMakeBackend) + env = self._consume("localized-files", RecursiveMakeBackend) - backend_path = mozpath.join(env.topobjdir, 'backend.mk') - lines = [l.strip() for l in open(backend_path, 'rt').readlines()[2:]] + backend_path = mozpath.join(env.topobjdir, "backend.mk") + lines = [l.strip() for l in open(backend_path, "rt").readlines()[2:]] expected = [ - 'LOCALIZED_FILES_0_FILES += $(wildcard $(LOCALE_SRCDIR)/abc/*.abc)', - 'LOCALIZED_FILES_0_FILES += $(call MERGE_FILE,bar.ini)', - 'LOCALIZED_FILES_0_FILES += $(call MERGE_FILE,foo.js)', - 'LOCALIZED_FILES_0_DEST = $(FINAL_TARGET)/', - 'LOCALIZED_FILES_0_TARGET := misc', - 'INSTALL_TARGETS += LOCALIZED_FILES_0', + "LOCALIZED_FILES_0_FILES += $(wildcard $(LOCALE_SRCDIR)/abc/*.abc)", + "LOCALIZED_FILES_0_FILES += $(call MERGE_FILE,bar.ini)", + "LOCALIZED_FILES_0_FILES += $(call MERGE_FILE,foo.js)", + "LOCALIZED_FILES_0_DEST = $(FINAL_TARGET)/", + "LOCALIZED_FILES_0_TARGET := misc", + "INSTALL_TARGETS += LOCALIZED_FILES_0", ] - found = [str for str in lines if 'LOCALIZED_FILES' in str] + found = [str for str in lines if "LOCALIZED_FILES" in str] self.assertEqual(found, expected) def test_localized_pp_files(self): """Test that LOCALIZED_PP_FILES is written to backend.mk correctly.""" - env = self._consume('localized-pp-files', RecursiveMakeBackend) + env = self._consume("localized-pp-files", RecursiveMakeBackend) - backend_path = mozpath.join(env.topobjdir, 'backend.mk') - lines = [l.strip() for l in open(backend_path, 'rt').readlines()[2:]] + backend_path = mozpath.join(env.topobjdir, "backend.mk") + lines = [l.strip() for l in open(backend_path, "rt").readlines()[2:]] expected = [ - 'LOCALIZED_PP_FILES_0 += $(call MERGE_FILE,bar.ini)', - 'LOCALIZED_PP_FILES_0 += $(call MERGE_FILE,foo.js)', - 'LOCALIZED_PP_FILES_0_PATH = $(FINAL_TARGET)/', - 'LOCALIZED_PP_FILES_0_TARGET := misc', - 'LOCALIZED_PP_FILES_0_FLAGS := --silence-missing-directive-warnings', - 'PP_TARGETS += LOCALIZED_PP_FILES_0', + "LOCALIZED_PP_FILES_0 += $(call MERGE_FILE,bar.ini)", + "LOCALIZED_PP_FILES_0 += $(call MERGE_FILE,foo.js)", + "LOCALIZED_PP_FILES_0_PATH = $(FINAL_TARGET)/", + "LOCALIZED_PP_FILES_0_TARGET := misc", + "LOCALIZED_PP_FILES_0_FLAGS := --silence-missing-directive-warnings", + "PP_TARGETS += LOCALIZED_PP_FILES_0", ] - found = [str for str in lines if 'LOCALIZED_PP_FILES' in str] + found = [str for str in lines if "LOCALIZED_PP_FILES" in str] self.assertEqual(found, expected) def test_config(self): """Test that CONFIGURE_SUBST_FILES are properly handled.""" - env = self._consume('test_config', RecursiveMakeBackend) + env = self._consume("test_config", RecursiveMakeBackend) self.assertEqual( - open(os.path.join(env.topobjdir, 'file'), 'r').readlines(), [ - '#ifdef foo\n', - 'bar baz\n', - '@bar@\n', - ]) + open(os.path.join(env.topobjdir, "file"), "r").readlines(), + [ + "#ifdef foo\n", + "bar baz\n", + "@bar@\n", + ], + ) def test_prog_lib_c_only(self): """Test that C-only binary artifacts are marked as such.""" - env = self._consume('prog-lib-c-only', RecursiveMakeBackend) + env = self._consume("prog-lib-c-only", RecursiveMakeBackend) # PROGRAM C-onlyness. - with open(os.path.join(env.topobjdir, 'c-program', 'backend.mk'), 'r') as fh: + with open(os.path.join(env.topobjdir, "c-program", "backend.mk"), "r") as fh: lines = fh.readlines() lines = [line.rstrip() for line in lines] - self.assertIn('PROG_IS_C_ONLY_c_test_program := 1', lines) + self.assertIn("PROG_IS_C_ONLY_c_test_program := 1", lines) - with open(os.path.join(env.topobjdir, 'cxx-program', 'backend.mk'), 'r') as fh: + with open(os.path.join(env.topobjdir, "cxx-program", "backend.mk"), "r") as fh: lines = fh.readlines() lines = [line.rstrip() for line in lines] # Test for only the absence of the variable, not the precise # form of the variable assignment. for line in lines: - self.assertNotIn('PROG_IS_C_ONLY_cxx_test_program', line) + self.assertNotIn("PROG_IS_C_ONLY_cxx_test_program", line) # SIMPLE_PROGRAMS C-onlyness. - with open(os.path.join(env.topobjdir, 'c-simple-programs', 'backend.mk'), 'r') as fh: + with open( + os.path.join(env.topobjdir, "c-simple-programs", "backend.mk"), "r" + ) as fh: lines = fh.readlines() lines = [line.rstrip() for line in lines] - self.assertIn('PROG_IS_C_ONLY_c_simple_program := 1', lines) + self.assertIn("PROG_IS_C_ONLY_c_simple_program := 1", lines) - with open(os.path.join(env.topobjdir, 'cxx-simple-programs', 'backend.mk'), 'r') as fh: + with open( + os.path.join(env.topobjdir, "cxx-simple-programs", "backend.mk"), "r" + ) as fh: lines = fh.readlines() lines = [line.rstrip() for line in lines] for line in lines: - self.assertNotIn('PROG_IS_C_ONLY_cxx_simple_program', line) + self.assertNotIn("PROG_IS_C_ONLY_cxx_simple_program", line) # Libraries C-onlyness. - with open(os.path.join(env.topobjdir, 'c-library', 'backend.mk'), 'r') as fh: + with open(os.path.join(env.topobjdir, "c-library", "backend.mk"), "r") as fh: lines = fh.readlines() lines = [line.rstrip() for line in lines] - self.assertIn('LIB_IS_C_ONLY := 1', lines) + self.assertIn("LIB_IS_C_ONLY := 1", lines) - with open(os.path.join(env.topobjdir, 'cxx-library', 'backend.mk'), 'r') as fh: + with open(os.path.join(env.topobjdir, "cxx-library", "backend.mk"), "r") as fh: lines = fh.readlines() lines = [line.rstrip() for line in lines] for line in lines: - self.assertNotIn('LIB_IS_C_ONLY', line) + self.assertNotIn("LIB_IS_C_ONLY", line) def test_linkage(self): - env = self._consume('linkage', RecursiveMakeBackend) + env = self._consume("linkage", RecursiveMakeBackend) expected_linkage = { - 'prog': { - 'SHARED_LIBS': ['qux/qux.so', - '../shared/baz.so'], - 'STATIC_LIBS': ['../real/foo.a'], - 'OS_LIBS': ['-lfoo', '-lbaz', '-lbar'], + "prog": { + "SHARED_LIBS": ["qux/qux.so", "../shared/baz.so"], + "STATIC_LIBS": ["../real/foo.a"], + "OS_LIBS": ["-lfoo", "-lbaz", "-lbar"], }, - 'shared': { - 'OS_LIBS': ['-lfoo'], - 'SHARED_LIBS': ['../prog/qux/qux.so'], - 'STATIC_LIBS': [], + "shared": { + "OS_LIBS": ["-lfoo"], + "SHARED_LIBS": ["../prog/qux/qux.so"], + "STATIC_LIBS": [], }, - 'static': { - 'STATIC_LIBS': ['../real/foo.a'], - 'OS_LIBS': ['-lbar'], - 'SHARED_LIBS': ['../prog/qux/qux.so'], + "static": { + "STATIC_LIBS": ["../real/foo.a"], + "OS_LIBS": ["-lbar"], + "SHARED_LIBS": ["../prog/qux/qux.so"], + }, + "real": { + "STATIC_LIBS": [], + "SHARED_LIBS": ["../prog/qux/qux.so"], + "OS_LIBS": ["-lbaz"], }, - 'real': { - 'STATIC_LIBS': [], - 'SHARED_LIBS': ['../prog/qux/qux.so'], - 'OS_LIBS': ['-lbaz'], - } } actual_linkage = {} for name in expected_linkage.keys(): - with open(os.path.join(env.topobjdir, name, 'backend.mk'), 'r') as fh: + with open(os.path.join(env.topobjdir, name, "backend.mk"), "r") as fh: actual_linkage[name] = [line.rstrip() for line in fh.readlines()] for name in expected_linkage: for var in expected_linkage[name]: for val in expected_linkage[name][var]: val = os.path.normpath(val) - line = '%s += %s' % (var, val) - self.assertIn(line, - actual_linkage[name]) + line = "%s += %s" % (var, val) + self.assertIn(line, actual_linkage[name]) actual_linkage[name].remove(line) for line in actual_linkage[name]: - self.assertNotIn('%s +=' % var, line) + self.assertNotIn("%s +=" % var, line) def test_list_files(self): - env = self._consume('linkage', RecursiveMakeBackend) + env = self._consume("linkage", RecursiveMakeBackend) expected_list_files = { - 'prog/MyProgram_exe.list': [ - '../static/bar/bar1.o', - '../static/bar/bar2.o', - '../static/bar/bar_helper/bar_helper1.o', + "prog/MyProgram_exe.list": [ + "../static/bar/bar1.o", + "../static/bar/bar2.o", + "../static/bar/bar_helper/bar_helper1.o", ], - 'shared/baz_so.list': [ - 'baz/baz1.o', + "shared/baz_so.list": [ + "baz/baz1.o", ], } actual_list_files = {} for name in expected_list_files.keys(): - with open(os.path.join(env.topobjdir, name), 'r') as fh: - actual_list_files[name] = [line.rstrip() - for line in fh.readlines()] + with open(os.path.join(env.topobjdir, name), "r") as fh: + actual_list_files[name] = [line.rstrip() for line in fh.readlines()] for name in expected_list_files: - self.assertEqual(actual_list_files[name], - [os.path.normpath(f) for f in expected_list_files[name]]) + self.assertEqual( + actual_list_files[name], + [os.path.normpath(f) for f in expected_list_files[name]], + ) # We don't produce a list file for a shared library composed only of # object files in its directory, but instead list them in a variable. - with open(os.path.join(env.topobjdir, 'prog', 'qux', 'backend.mk'), 'r') as fh: + with open(os.path.join(env.topobjdir, "prog", "qux", "backend.mk"), "r") as fh: lines = [line.rstrip() for line in fh.readlines()] - self.assertIn('qux.so_OBJS := qux1.o', lines) + self.assertIn("qux.so_OBJS := qux1.o", lines) def test_jar_manifests(self): - env = self._consume('jar-manifests', RecursiveMakeBackend) + env = self._consume("jar-manifests", RecursiveMakeBackend) - with open(os.path.join(env.topobjdir, 'backend.mk'), 'r') as fh: + with open(os.path.join(env.topobjdir, "backend.mk"), "r") as fh: lines = fh.readlines() lines = [line.rstrip() for line in lines] - self.assertIn('JAR_MANIFEST := %s/jar.mn' % env.topsrcdir, lines) + self.assertIn("JAR_MANIFEST := %s/jar.mn" % env.topsrcdir, lines) def test_test_manifests_duplicate_support_files(self): """Ensure duplicate support-files in test manifests work.""" - env = self._consume('test-manifests-duplicate-support-files', - RecursiveMakeBackend) + env = self._consume( + "test-manifests-duplicate-support-files", RecursiveMakeBackend + ) - p = os.path.join(env.topobjdir, '_build_manifests', 'install', '_test_files') + p = os.path.join(env.topobjdir, "_build_manifests", "install", "_test_files") m = InstallManifest(p) - self.assertIn('testing/mochitest/tests/support-file.txt', m) + self.assertIn("testing/mochitest/tests/support-file.txt", m) def test_install_manifests_package_tests(self): """Ensure test suites honor package_tests=False.""" - env = self._consume('test-manifests-package-tests', RecursiveMakeBackend) + env = self._consume("test-manifests-package-tests", RecursiveMakeBackend) - man_dir = mozpath.join(env.topobjdir, '_build_manifests', 'install') + man_dir = mozpath.join(env.topobjdir, "_build_manifests", "install") self.assertTrue(os.path.isdir(man_dir)) - full = mozpath.join(man_dir, '_test_files') + full = mozpath.join(man_dir, "_test_files") self.assertTrue(os.path.exists(full)) m = InstallManifest(path=full) # Only mochitest.js should be in the install manifest. - self.assertTrue('testing/mochitest/tests/mochitest.js' in m) + self.assertTrue("testing/mochitest/tests/mochitest.js" in m) # The path is odd here because we do not normalize at test manifest # processing time. This is a fragile test because there's currently no # way to iterate the manifest. - self.assertFalse('instrumentation/./not_packaged.java' in m) + self.assertFalse("instrumentation/./not_packaged.java" in m) def test_program_paths(self): """PROGRAMs with various moz.build settings that change the destination should produce the expected paths in backend.mk.""" - env = self._consume('program-paths', RecursiveMakeBackend) + env = self._consume("program-paths", RecursiveMakeBackend) expected = [ - ('dist-bin', '$(DEPTH)/dist/bin/dist-bin.prog'), - ('dist-subdir', '$(DEPTH)/dist/bin/foo/dist-subdir.prog'), - ('final-target', '$(DEPTH)/final/target/final-target.prog'), - ('not-installed', 'not-installed.prog'), + ("dist-bin", "$(DEPTH)/dist/bin/dist-bin.prog"), + ("dist-subdir", "$(DEPTH)/dist/bin/foo/dist-subdir.prog"), + ("final-target", "$(DEPTH)/final/target/final-target.prog"), + ("not-installed", "not-installed.prog"), ] - prefix = 'PROGRAM = ' + prefix = "PROGRAM = " for (subdir, expected_program) in expected: - with io.open(os.path.join(env.topobjdir, subdir, 'backend.mk'), 'r') as fh: + with io.open(os.path.join(env.topobjdir, subdir, "backend.mk"), "r") as fh: lines = fh.readlines() - program = [line.rstrip().split(prefix, 1)[1] for line in lines - if line.startswith(prefix)][0] + program = [ + line.rstrip().split(prefix, 1)[1] + for line in lines + if line.startswith(prefix) + ][0] self.assertEqual(program, expected_program) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/python/mozbuild/mozbuild/test/codecoverage/test_lcov_rewrite.py b/python/mozbuild/mozbuild/test/codecoverage/test_lcov_rewrite.py index c66c683322b8..fb88b0ca6938 100644 --- a/python/mozbuild/mozbuild/test/codecoverage/test_lcov_rewrite.py +++ b/python/mozbuild/mozbuild/test/codecoverage/test_lcov_rewrite.py @@ -19,10 +19,10 @@ import mozunit here = os.path.dirname(__file__) BUILDCONFIG = { - 'topobjdir': buildconfig.topobjdir, - 'MOZ_APP_NAME': buildconfig.substs.get('MOZ_APP_NAME', 'nightly'), - 'OMNIJAR_NAME': buildconfig.substs.get('OMNIJAR_NAME', 'omni.ja'), - 'MOZ_MACBUNDLE_NAME': buildconfig.substs.get('MOZ_MACBUNDLE_NAME', 'Nightly.app'), + "topobjdir": buildconfig.topobjdir, + "MOZ_APP_NAME": buildconfig.substs.get("MOZ_APP_NAME", "nightly"), + "OMNIJAR_NAME": buildconfig.substs.get("OMNIJAR_NAME", "omni.ja"), + "MOZ_MACBUNDLE_NAME": buildconfig.substs.get("MOZ_MACBUNDLE_NAME", "Nightly.app"), } basic_file = """TN:Compartment_5f7f5c30251800 @@ -110,9 +110,9 @@ end_of_record """ -class TempFile(): +class TempFile: def __init__(self, content): - self.file = NamedTemporaryFile(mode='w', delete=False, encoding='utf-8') + self.file = NamedTemporaryFile(mode="w", delete=False, encoding="utf-8") self.file.write(content) self.file.close() @@ -124,7 +124,6 @@ class TempFile(): class TestLcovParser(unittest.TestCase): - def parser_roundtrip(self, lcov_string): with TempFile(lcov_string) as fname: file_obj = lcov_rewriter.LcovFile([fname]) @@ -172,21 +171,24 @@ bazbarfoo class TestLineRemapping(unittest.TestCase): def setUp(self): - chrome_map_file = os.path.join(buildconfig.topobjdir, 'chrome-map.json') + chrome_map_file = os.path.join(buildconfig.topobjdir, "chrome-map.json") self._old_chrome_info_file = None if os.path.isfile(chrome_map_file): - backup_file = os.path.join(buildconfig.topobjdir, 'chrome-map-backup.json') + backup_file = os.path.join(buildconfig.topobjdir, "chrome-map-backup.json") self._old_chrome_info_file = backup_file self._chrome_map_file = chrome_map_file shutil.move(chrome_map_file, backup_file) empty_chrome_info = [ - {}, {}, {}, BUILDCONFIG, + {}, + {}, + {}, + BUILDCONFIG, ] - with open(chrome_map_file, 'w') as fh: + with open(chrome_map_file, "w") as fh: json.dump(empty_chrome_info, fh) - self.lcov_rewriter = lcov_rewriter.LcovFileRewriter(chrome_map_file, '', '', []) + self.lcov_rewriter = lcov_rewriter.LcovFileRewriter(chrome_map_file, "", "", []) self.pp_rewriter = self.lcov_rewriter.pp_rewriter def tearDown(self): @@ -195,52 +197,52 @@ class TestLineRemapping(unittest.TestCase): def test_map_multiple_included(self): with TempFile(multiple_included_files) as fname: - actual = chrome_map.generate_pp_info(fname, '/src/dir') + actual = chrome_map.generate_pp_info(fname, "/src/dir") expected = { - "2,3": ('foo.js', 1), - "4,5": ('path/bar.js', 2), - "6,7": ('foo.js', 3), - "8,9": ('path/bar.js', 2), - "10,11": ('path2/test.js', 3), - "12,13": ('path/baz.js', 1), - "14,15": ('f.js', 6), + "2,3": ("foo.js", 1), + "4,5": ("path/bar.js", 2), + "6,7": ("foo.js", 3), + "8,9": ("path/bar.js", 2), + "10,11": ("path2/test.js", 3), + "12,13": ("path/baz.js", 1), + "14,15": ("f.js", 6), } self.assertEqual(actual, expected) def test_map_srcdir_prefix(self): with TempFile(srcdir_prefix_files) as fname: - actual = chrome_map.generate_pp_info(fname, '/src/dir') + actual = chrome_map.generate_pp_info(fname, "/src/dir") expected = { - "2,3": ('foo.js', 1), - "4,5": ('path/file.js', 2), - "6,7": ('foo.js', 3), + "2,3": ("foo.js", 1), + "4,5": ("path/file.js", 2), + "6,7": ("foo.js", 3), } self.assertEqual(actual, expected) def test_remap_lcov(self): pp_remap = { - "1941,2158": ('dropPreview.js', 6), - "2159,2331": ('updater.js', 6), - "2584,2674": ('intro.js', 6), - "2332,2443": ('undo.js', 6), - "864,985": ('cells.js', 6), - "2444,2454": ('search.js', 6), - "1567,1712": ('drop.js', 6), - "2455,2583": ('customize.js', 6), - "1713,1940": ('dropTargetShim.js', 6), - "1402,1548": ('drag.js', 6), - "1549,1566": ('dragDataHelper.js', 6), - "453,602": ('page.js', 141), - "2675,2678": ('newTab.js', 70), - "56,321": ('transformations.js', 6), - "603,863": ('grid.js', 6), - "322,452": ('page.js', 6), - "986,1401": ('sites.js', 6) + "1941,2158": ("dropPreview.js", 6), + "2159,2331": ("updater.js", 6), + "2584,2674": ("intro.js", 6), + "2332,2443": ("undo.js", 6), + "864,985": ("cells.js", 6), + "2444,2454": ("search.js", 6), + "1567,1712": ("drop.js", 6), + "2455,2583": ("customize.js", 6), + "1713,1940": ("dropTargetShim.js", 6), + "1402,1548": ("drag.js", 6), + "1549,1566": ("dragDataHelper.js", 6), + "453,602": ("page.js", 141), + "2675,2678": ("newTab.js", 70), + "56,321": ("transformations.js", 6), + "603,863": ("grid.js", 6), + "322,452": ("page.js", 6), + "986,1401": ("sites.js", 6), } - fpath = os.path.join(here, 'sample_lcov.info') + fpath = os.path.join(here, "sample_lcov.info") # Read original records lcov_file = lcov_rewriter.LcovFile([fpath]) @@ -272,78 +274,76 @@ class TestLineRemapping(unittest.TestCase): # Read rewritten lcov. with TempFile(out.getvalue()) as fname: lcov_file = lcov_rewriter.LcovFile([fname]) - records = [lcov_file.parse_record(r) for _, _, r in lcov_file.iterate_records()] + records = [ + lcov_file.parse_record(r) for _, _, r in lcov_file.iterate_records() + ] self.assertEqual(len(records), 17) # Lines/functions are only "moved" between records, not duplicated or omited. - self.assertEqual(original_line_count, - sum(r.line_count for r in records)) - self.assertEqual(original_covered_line_count, - sum(r.covered_line_count for r in records)) - self.assertEqual(original_function_count, - sum(r.function_count for r in records)) - self.assertEqual(original_covered_function_count, - sum(r.covered_function_count for r in records)) + self.assertEqual(original_line_count, sum(r.line_count for r in records)) + self.assertEqual( + original_covered_line_count, sum(r.covered_line_count for r in records) + ) + self.assertEqual( + original_function_count, sum(r.function_count for r in records) + ) + self.assertEqual( + original_covered_function_count, + sum(r.covered_function_count for r in records), + ) class TestUrlFinder(unittest.TestCase): def setUp(self): - chrome_map_file = os.path.join(buildconfig.topobjdir, 'chrome-map.json') + chrome_map_file = os.path.join(buildconfig.topobjdir, "chrome-map.json") self._old_chrome_info_file = None if os.path.isfile(chrome_map_file): - backup_file = os.path.join(buildconfig.topobjdir, 'chrome-map-backup.json') + backup_file = os.path.join(buildconfig.topobjdir, "chrome-map-backup.json") self._old_chrome_info_file = backup_file self._chrome_map_file = chrome_map_file shutil.move(chrome_map_file, backup_file) dummy_chrome_info = [ { - 'resource://activity-stream/': [ - 'dist/bin/browser/chrome/browser/res/activity-stream', + "resource://activity-stream/": [ + "dist/bin/browser/chrome/browser/res/activity-stream", ], - 'chrome://browser/content/': [ - 'dist/bin/browser/chrome/browser/content/browser', + "chrome://browser/content/": [ + "dist/bin/browser/chrome/browser/content/browser", ], }, { - 'chrome://global/content/netError.xhtml': - 'chrome://browser/content/aboutNetError.xhtml', + "chrome://global/content/netError.xhtml": "chrome://browser/content/aboutNetError.xhtml", # NOQA: E501 }, { - 'dist/bin/components/MainProcessSingleton.js': [ - 'path1', - None + "dist/bin/components/MainProcessSingleton.js": ["path1", None], + "dist/bin/browser/features/firefox@getpocket.com/bootstrap.js": [ + "path4", + None, ], - 'dist/bin/browser/features/firefox@getpocket.com/bootstrap.js': [ - 'path4', - None + "dist/bin/modules/osfile/osfile_async_worker.js": [ + "toolkit/components/osfile/modules/osfile_async_worker.js", + None, ], - 'dist/bin/modules/osfile/osfile_async_worker.js': [ - 'toolkit/components/osfile/modules/osfile_async_worker.js', - None + "dist/bin/browser/chrome/browser/res/activity-stream/lib/": [ + "browser/components/newtab/lib/*", + None, ], - 'dist/bin/browser/chrome/browser/res/activity-stream/lib/': [ - 'browser/components/newtab/lib/*', - None + "dist/bin/browser/chrome/browser/content/browser/aboutNetError.xhtml": [ + "browser/base/content/aboutNetError.xhtml", + None, ], - 'dist/bin/browser/chrome/browser/content/browser/aboutNetError.xhtml': [ - 'browser/base/content/aboutNetError.xhtml', - None - ], - 'dist/bin/modules/AppConstants.jsm': [ - 'toolkit/modules/AppConstants.jsm', + "dist/bin/modules/AppConstants.jsm": [ + "toolkit/modules/AppConstants.jsm", { - '101,102': [ - 'toolkit/modules/AppConstants.jsm', - 135 - ], - } + "101,102": ["toolkit/modules/AppConstants.jsm", 135], + }, ], }, BUILDCONFIG, ] - with open(chrome_map_file, 'w') as fh: + with open(chrome_map_file, "w") as fh: json.dump(dummy_chrome_info, fh) def tearDown(self): @@ -351,67 +351,96 @@ class TestUrlFinder(unittest.TestCase): shutil.move(self._old_chrome_info_file, self._chrome_map_file) def test_jar_paths(self): - app_name = BUILDCONFIG['MOZ_APP_NAME'] - omnijar_name = BUILDCONFIG['OMNIJAR_NAME'] + app_name = BUILDCONFIG["MOZ_APP_NAME"] + omnijar_name = BUILDCONFIG["OMNIJAR_NAME"] paths = [ - ('jar:file:///home/worker/workspace/build/application/' + app_name + - '/' + omnijar_name + '!/components/MainProcessSingleton.js', 'path1'), - ('jar:file:///home/worker/workspace/build/application/' + app_name + - '/browser/features/firefox@getpocket.com.xpi!/bootstrap.js', 'path4'), + ( + "jar:file:///home/worker/workspace/build/application/" + + app_name + + "/" + + omnijar_name + + "!/components/MainProcessSingleton.js", + "path1", + ), + ( + "jar:file:///home/worker/workspace/build/application/" + + app_name + + "/browser/features/firefox@getpocket.com.xpi!/bootstrap.js", + "path4", + ), ] - url_finder = lcov_rewriter.UrlFinder(self._chrome_map_file, '', '', []) + url_finder = lcov_rewriter.UrlFinder(self._chrome_map_file, "", "", []) for path, expected in paths: self.assertEqual(url_finder.rewrite_url(path)[0], expected) def test_wrong_scheme_paths(self): paths = [ - 'http://www.mozilla.org/aFile.js', - 'https://www.mozilla.org/aFile.js', - 'data:something', - 'about:newtab', - 'javascript:something', + "http://www.mozilla.org/aFile.js", + "https://www.mozilla.org/aFile.js", + "data:something", + "about:newtab", + "javascript:something", ] - url_finder = lcov_rewriter.UrlFinder(self._chrome_map_file, '', '', []) + url_finder = lcov_rewriter.UrlFinder(self._chrome_map_file, "", "", []) for path in paths: self.assertIsNone(url_finder.rewrite_url(path)) def test_chrome_resource_paths(self): paths = [ # Path with default url prefix - ('resource://gre/modules/osfile/osfile_async_worker.js', - ('toolkit/components/osfile/modules/osfile_async_worker.js', None)), + ( + "resource://gre/modules/osfile/osfile_async_worker.js", + ("toolkit/components/osfile/modules/osfile_async_worker.js", None), + ), # Path with url prefix that is in chrome map - ('resource://activity-stream/lib/PrefsFeed.jsm', - ('browser/components/newtab/lib/PrefsFeed.jsm', None)), + ( + "resource://activity-stream/lib/PrefsFeed.jsm", + ("browser/components/newtab/lib/PrefsFeed.jsm", None), + ), # Path which is in url overrides - ('chrome://global/content/netError.xhtml', - ('browser/base/content/aboutNetError.xhtml', None)), + ( + "chrome://global/content/netError.xhtml", + ("browser/base/content/aboutNetError.xhtml", None), + ), # Path which ends with > eval - ('resource://gre/modules/osfile/osfile_async_worker.js line 3 > eval', None), + ( + "resource://gre/modules/osfile/osfile_async_worker.js line 3 > eval", + None, + ), # Path which ends with > Function - ('resource://gre/modules/osfile/osfile_async_worker.js line 3 > Function', None), + ( + "resource://gre/modules/osfile/osfile_async_worker.js line 3 > Function", + None, + ), # Path which contains "->" - ('resource://gre/modules/addons/XPIProvider.jsm -> resource://gre/modules/osfile/osfile_async_worker.js', # noqa - ('toolkit/components/osfile/modules/osfile_async_worker.js', None)), + ( + "resource://gre/modules/addons/XPIProvider.jsm -> resource://gre/modules/osfile/osfile_async_worker.js", # noqa + ("toolkit/components/osfile/modules/osfile_async_worker.js", None), + ), # Path with pp_info - ('resource://gre/modules/AppConstants.jsm', ('toolkit/modules/AppConstants.jsm', { - '101,102': [ - 'toolkit/modules/AppConstants.jsm', - 135 - ], - })), + ( + "resource://gre/modules/AppConstants.jsm", + ( + "toolkit/modules/AppConstants.jsm", + { + "101,102": ["toolkit/modules/AppConstants.jsm", 135], + }, + ), + ), # Path with query - ('resource://activity-stream/lib/PrefsFeed.jsm?q=0.9098419174803978', - ('browser/components/newtab/lib/PrefsFeed.jsm', None)), + ( + "resource://activity-stream/lib/PrefsFeed.jsm?q=0.9098419174803978", + ("browser/components/newtab/lib/PrefsFeed.jsm", None), + ), ] - url_finder = lcov_rewriter.UrlFinder(self._chrome_map_file, '', 'dist/bin/', []) + url_finder = lcov_rewriter.UrlFinder(self._chrome_map_file, "", "dist/bin/", []) for path, expected in paths: self.assertEqual(url_finder.rewrite_url(path), expected) -if __name__ == '__main__': +if __name__ == "__main__": mozunit.main() diff --git a/taskcluster/taskgraph/transforms/bouncer_aliases.py b/taskcluster/taskgraph/transforms/bouncer_aliases.py index b14686977f12..776d8aec7d23 100644 --- a/taskcluster/taskgraph/transforms/bouncer_aliases.py +++ b/taskcluster/taskgraph/transforms/bouncer_aliases.py @@ -11,7 +11,9 @@ import logging from taskgraph.transforms.base import TransformSequence from taskgraph.transforms.bouncer_submission import craft_bouncer_product_name -from taskgraph.transforms.bouncer_submission_partners import craft_partner_bouncer_product_name +from taskgraph.transforms.bouncer_submission_partners import ( + craft_partner_bouncer_product_name, +) from taskgraph.util.partners import get_partners_to_be_published from taskgraph.util.schema import resolve_keyed_by from taskgraph.util.scriptworker import get_release_config @@ -25,59 +27,81 @@ transforms = TransformSequence() def make_task_worker(config, jobs): for job in jobs: resolve_keyed_by( - job, 'worker-type', item_name=job['name'], - **{'release-level': config.params.release_level()} + job, + "worker-type", + item_name=job["name"], + **{"release-level": config.params.release_level()} ) resolve_keyed_by( - job, 'scopes', item_name=job['name'], - **{'release-level': config.params.release_level()} + job, + "scopes", + item_name=job["name"], + **{"release-level": config.params.release_level()} ) resolve_keyed_by( - job, 'bouncer-products-per-alias', - item_name=job['name'], project=config.params['project'] + job, + "bouncer-products-per-alias", + item_name=job["name"], + project=config.params["project"], ) - if 'partner-bouncer-products-per-alias' in job: + if "partner-bouncer-products-per-alias" in job: resolve_keyed_by( - job, 'partner-bouncer-products-per-alias', - item_name=job['name'], project=config.params['project'] + job, + "partner-bouncer-products-per-alias", + item_name=job["name"], + project=config.params["project"], ) - job['worker']['entries'] = craft_bouncer_entries(config, job) + job["worker"]["entries"] = craft_bouncer_entries(config, job) - del job['bouncer-products-per-alias'] - if 'partner-bouncer-products-per-alias' in job: - del job['partner-bouncer-products-per-alias'] + del job["bouncer-products-per-alias"] + if "partner-bouncer-products-per-alias" in job: + del job["partner-bouncer-products-per-alias"] - if job['worker']['entries']: + if job["worker"]["entries"]: yield job else: - logger.warn('No bouncer entries defined in bouncer submission task for "{}". \ -Job deleted.'.format(job['name'])) + logger.warn( + 'No bouncer entries defined in bouncer submission task for "{}". \ +Job deleted.'.format( + job["name"] + ) + ) def craft_bouncer_entries(config, job): release_config = get_release_config(config) - product = job['shipping-product'] - current_version = release_config['version'] - bouncer_products_per_alias = job['bouncer-products-per-alias'] + product = job["shipping-product"] + current_version = release_config["version"] + bouncer_products_per_alias = job["bouncer-products-per-alias"] entries = { bouncer_alias: craft_bouncer_product_name( - product, bouncer_product, current_version, + product, + bouncer_product, + current_version, ) for bouncer_alias, bouncer_product in bouncer_products_per_alias.items() } - partner_bouncer_products_per_alias = job.get('partner-bouncer-products-per-alias') + partner_bouncer_products_per_alias = job.get("partner-bouncer-products-per-alias") if partner_bouncer_products_per_alias: partners = get_partners_to_be_published(config) for partner, sub_config_name, _ in partners: - entries.update({ - bouncer_alias.replace('PARTNER', '{}-{}'.format(partner, sub_config_name)): - craft_partner_bouncer_product_name( - product, bouncer_product, current_version, partner, sub_config_name) - for bouncer_alias, bouncer_product in partner_bouncer_products_per_alias.items() - }) + entries.update( + { + bouncer_alias.replace( + "PARTNER", "{}-{}".format(partner, sub_config_name) + ): craft_partner_bouncer_product_name( + product, + bouncer_product, + current_version, + partner, + sub_config_name, + ) + for bouncer_alias, bouncer_product in partner_bouncer_products_per_alias.items() # NOQA: E501 + } + ) return entries diff --git a/taskcluster/taskgraph/transforms/mar_signing.py b/taskcluster/taskgraph/transforms/mar_signing.py index 448e962313bf..0622d35b2a2d 100644 --- a/taskcluster/taskgraph/transforms/mar_signing.py +++ b/taskcluster/taskgraph/transforms/mar_signing.py @@ -9,7 +9,10 @@ from __future__ import absolute_import, print_function, unicode_literals import os from taskgraph.transforms.base import TransformSequence -from taskgraph.util.attributes import copy_attributes_from_dependent_job, sorted_unique_list +from taskgraph.util.attributes import ( + copy_attributes_from_dependent_job, + sorted_unique_list, +) from taskgraph.util.scriptworker import ( get_signing_cert_scope_per_platform, ) @@ -18,14 +21,15 @@ from taskgraph.util.taskcluster import get_artifact_prefix from taskgraph.util.treeherder import join_symbol, inherit_treeherder_from_dep import logging + logger = logging.getLogger(__name__) SIGNING_FORMATS = { - 'mar-signing-autograph-stage': { - 'target.complete.mar': ['autograph_stage_mar384'], + "mar-signing-autograph-stage": { + "target.complete.mar": ["autograph_stage_mar384"], }, - 'default': { - 'target.complete.mar': ['autograph_hash_only_mar384'], + "default": { + "target.complete.mar": ["autograph_hash_only_mar384"], }, } @@ -35,34 +39,36 @@ transforms = TransformSequence() def generate_partials_artifacts(job, release_history, platform, locale=None): artifact_prefix = get_artifact_prefix(job) if locale: - artifact_prefix = '{}/{}'.format(artifact_prefix, locale) + artifact_prefix = "{}/{}".format(artifact_prefix, locale) else: - locale = 'en-US' + locale = "en-US" artifacts = get_partials_artifacts_from_params(release_history, platform, locale) - upstream_artifacts = [{ - "taskId": {"task-reference": '<partials>'}, - "taskType": 'partials', - "paths": [ - "{}/{}".format(artifact_prefix, path) - for path, version in artifacts - # TODO Use mozilla-version to avoid comparing strings. Otherwise Firefox 100 will be - # considered smaller than Firefox 56 - if version is None or version >= '56' - ], - "formats": ["autograph_hash_only_mar384"], - }] + upstream_artifacts = [ + { + "taskId": {"task-reference": "<partials>"}, + "taskType": "partials", + "paths": [ + "{}/{}".format(artifact_prefix, path) + for path, version in artifacts + # TODO Use mozilla-version to avoid comparing strings. Otherwise Firefox 100 will + # be considered smaller than Firefox 56 + if version is None or version >= "56" + ], + "formats": ["autograph_hash_only_mar384"], + } + ] old_mar_upstream_artifacts = { - "taskId": {"task-reference": '<partials>'}, - "taskType": 'partials', + "taskId": {"task-reference": "<partials>"}, + "taskType": "partials", "paths": [ "{}/{}".format(artifact_prefix, path) for path, version in artifacts # TODO Use mozilla-version to avoid comparing strings. Otherwise Firefox 100 will be # considered smaller than Firefox 56 - if version is not None and version < '56' + if version is not None and version < "56" ], "formats": ["mar"], } @@ -76,16 +82,18 @@ def generate_partials_artifacts(job, release_history, platform, locale=None): def generate_complete_artifacts(job, kind): upstream_artifacts = [] if kind not in SIGNING_FORMATS: - kind = 'default' + kind = "default" for artifact in job.release_artifacts: basename = os.path.basename(artifact) if basename in SIGNING_FORMATS[kind]: - upstream_artifacts.append({ - "taskId": {"task-reference": '<{}>'.format(job.kind)}, - "taskType": 'build', - "paths": [artifact], - "formats": SIGNING_FORMATS[kind][basename], - }) + upstream_artifacts.append( + { + "taskId": {"task-reference": "<{}>".format(job.kind)}, + "taskType": "build", + "paths": [artifact], + "formats": SIGNING_FORMATS[kind][basename], + } + ) return upstream_artifacts @@ -93,15 +101,15 @@ def generate_complete_artifacts(job, kind): @transforms.add def make_task_description(config, jobs): for job in jobs: - dep_job = job['primary-dependency'] - locale = dep_job.attributes.get('locale') + dep_job = job["primary-dependency"] + locale = dep_job.attributes.get("locale") treeherder = inherit_treeherder_from_dep(job, dep_job) treeherder.setdefault( - 'symbol', join_symbol(job.get('treeherder-group', 'ms'), locale or 'N') + "symbol", join_symbol(job.get("treeherder-group", "ms"), locale or "N") ) - label = job.get('label', "{}-{}".format(config.kind, dep_job.label)) + label = job.get("label", "{}-{}".format(config.kind, dep_job.label)) dependencies = {dep_job.kind: dep_job.label} signing_dependencies = dep_job.dependencies @@ -110,25 +118,24 @@ def make_task_description(config, jobs): dependencies.update(signing_dependencies) attributes = copy_attributes_from_dependent_job(dep_job) - attributes['required_signoffs'] = sorted_unique_list( - attributes.get('required_signoffs', []), - job.pop('required_signoffs') + attributes["required_signoffs"] = sorted_unique_list( + attributes.get("required_signoffs", []), job.pop("required_signoffs") ) - attributes['shipping_phase'] = job['shipping-phase'] + attributes["shipping_phase"] = job["shipping-phase"] if locale: - attributes['locale'] = locale + attributes["locale"] = locale - build_platform = attributes.get('build_platform') - if config.kind == 'partials-signing': + build_platform = attributes.get("build_platform") + if config.kind == "partials-signing": upstream_artifacts = generate_partials_artifacts( - dep_job, config.params['release_history'], build_platform, locale) + dep_job, config.params["release_history"], build_platform, locale + ) else: upstream_artifacts = generate_complete_artifacts(dep_job, config.kind) is_shippable = job.get( - 'shippable', # First check current job - dep_job.attributes.get( - 'shippable')) # Then dep job for 'shippable' + "shippable", dep_job.attributes.get("shippable") # First check current job + ) # Then dep job for 'shippable' signing_cert_scope = get_signing_cert_scope_per_platform( build_platform, is_shippable, config ) @@ -136,19 +143,23 @@ def make_task_description(config, jobs): scopes = [signing_cert_scope] task = { - 'label': label, - 'description': "{} {}".format( - dep_job.description, job['description-suffix']), - 'worker-type': job.get('worker-type', 'linux-signing'), - 'worker': {'implementation': 'scriptworker-signing', - 'upstream-artifacts': upstream_artifacts, - 'max-run-time': 3600}, - 'dependencies': dependencies, - 'attributes': attributes, - 'scopes': scopes, - 'run-on-projects': job.get('run-on-projects', - dep_job.attributes.get('run_on_projects')), - 'treeherder': treeherder, + "label": label, + "description": "{} {}".format( + dep_job.description, job["description-suffix"] + ), + "worker-type": job.get("worker-type", "linux-signing"), + "worker": { + "implementation": "scriptworker-signing", + "upstream-artifacts": upstream_artifacts, + "max-run-time": 3600, + }, + "dependencies": dependencies, + "attributes": attributes, + "scopes": scopes, + "run-on-projects": job.get( + "run-on-projects", dep_job.attributes.get("run_on_projects") + ), + "treeherder": treeherder, } yield task diff --git a/taskcluster/taskgraph/transforms/repackage_signing_partner.py b/taskcluster/taskgraph/transforms/repackage_signing_partner.py index 875b537c4662..7f93216c4ce6 100644 --- a/taskcluster/taskgraph/transforms/repackage_signing_partner.py +++ b/taskcluster/taskgraph/transforms/repackage_signing_partner.py @@ -12,22 +12,22 @@ from taskgraph.loader.single_dep import schema from taskgraph.transforms.base import TransformSequence from taskgraph.util.attributes import copy_attributes_from_dependent_job from taskgraph.util.partners import get_partner_config_by_kind -from taskgraph.util.scriptworker import ( - get_signing_cert_scope_per_platform, -) +from taskgraph.util.scriptworker import get_signing_cert_scope_per_platform from taskgraph.util.taskcluster import get_artifact_path from taskgraph.transforms.task import task_description_schema from voluptuous import Optional transforms = TransformSequence() -repackage_signing_description_schema = schema.extend({ - Optional('label'): text_type, - Optional('extra'): object, - Optional('shipping-product'): task_description_schema['shipping-product'], - Optional('shipping-phase'): task_description_schema['shipping-phase'], - Optional('priority'): task_description_schema['priority'], -}) +repackage_signing_description_schema = schema.extend( + { + Optional("label"): text_type, + Optional("extra"): object, + Optional("shipping-product"): task_description_schema["shipping-product"], + Optional("shipping-phase"): task_description_schema["shipping-phase"], + Optional("priority"): task_description_schema["priority"], + } +) transforms.add_validate(repackage_signing_description_schema) @@ -35,29 +35,26 @@ transforms.add_validate(repackage_signing_description_schema) @transforms.add def make_repackage_signing_description(config, jobs): for job in jobs: - dep_job = job['primary-dependency'] - repack_id = dep_job.task['extra']['repack_id'] + dep_job = job["primary-dependency"] + repack_id = dep_job.task["extra"]["repack_id"] attributes = dep_job.attributes - build_platform = dep_job.attributes.get('build_platform') - is_shippable = dep_job.attributes.get('shippable') + build_platform = dep_job.attributes.get("build_platform") + is_shippable = dep_job.attributes.get("shippable") # Mac & windows label = dep_job.label.replace("repackage-", "repackage-signing-") # Linux label = label.replace("chunking-dummy-", "repackage-signing-") - description = ( - "Signing of repackaged artifacts for partner repack id '{repack_id}' for build '" - "{build_platform}/{build_type}'".format( - repack_id=repack_id, - build_platform=attributes.get('build_platform'), - build_type=attributes.get('build_type') - ) + description = "Signing of repackaged artifacts for partner repack id '{repack_id}' for build '" "{build_platform}/{build_type}'".format( # NOQA: E501 + repack_id=repack_id, + build_platform=attributes.get("build_platform"), + build_type=attributes.get("build_type"), ) - if 'linux' in build_platform: + if "linux" in build_platform: # we want the repack job, via the dependencies for the the chunking-dummy dep_job for dep in dep_job.dependencies.values(): - if dep.startswith('release-partner-repack'): + if dep.startswith("release-partner-repack"): dependencies = {"repack": dep} break else: @@ -65,73 +62,90 @@ def make_repackage_signing_description(config, jobs): dependencies = {"repackage": dep_job.label} attributes = copy_attributes_from_dependent_job(dep_job) - attributes['repackage_type'] = 'repackage-signing' + attributes["repackage_type"] = "repackage-signing" signing_cert_scope = get_signing_cert_scope_per_platform( build_platform, is_shippable, config ) scopes = [signing_cert_scope] - if 'win' in build_platform: - upstream_artifacts = [{ - "taskId": {"task-reference": "<repackage>"}, - "taskType": "repackage", - "paths": [ - get_artifact_path(dep_job, "{}/target.installer.exe".format(repack_id)), - ], - "formats": ["autograph_authenticode", "autograph_gpg"] - }] - - partner_config = get_partner_config_by_kind(config, config.kind) - partner, subpartner, _ = repack_id.split('/') - repack_stub_installer = partner_config[partner][subpartner].get( - 'repack_stub_installer') - if build_platform.startswith('win32') and repack_stub_installer: - upstream_artifacts.append({ + if "win" in build_platform: + upstream_artifacts = [ + { "taskId": {"task-reference": "<repackage>"}, "taskType": "repackage", "paths": [ - get_artifact_path(dep_job, "{}/target.stub-installer.exe".format( - repack_id)), + get_artifact_path( + dep_job, "{}/target.installer.exe".format(repack_id) + ), ], - "formats": ["autograph_authenticode", "autograph_gpg"] - }) - elif 'mac' in build_platform: - upstream_artifacts = [{ - "taskId": {"task-reference": "<repackage>"}, - "taskType": "repackage", - "paths": [ - get_artifact_path(dep_job, "{}/target.dmg".format(repack_id)), - ], - "formats": ["autograph_gpg"] - }] - elif 'linux' in build_platform: - upstream_artifacts = [{ - "taskId": {"task-reference": "<repack>"}, - "taskType": "repackage", - "paths": [ - get_artifact_path(dep_job, "{}/target.tar.bz2".format(repack_id)), - ], - "formats": ["autograph_gpg"] - }] + "formats": ["autograph_authenticode", "autograph_gpg"], + } + ] + + partner_config = get_partner_config_by_kind(config, config.kind) + partner, subpartner, _ = repack_id.split("/") + repack_stub_installer = partner_config[partner][subpartner].get( + "repack_stub_installer" + ) + if build_platform.startswith("win32") and repack_stub_installer: + upstream_artifacts.append( + { + "taskId": {"task-reference": "<repackage>"}, + "taskType": "repackage", + "paths": [ + get_artifact_path( + dep_job, + "{}/target.stub-installer.exe".format(repack_id), + ), + ], + "formats": ["autograph_authenticode", "autograph_gpg"], + } + ) + elif "mac" in build_platform: + upstream_artifacts = [ + { + "taskId": {"task-reference": "<repackage>"}, + "taskType": "repackage", + "paths": [ + get_artifact_path(dep_job, "{}/target.dmg".format(repack_id)), + ], + "formats": ["autograph_gpg"], + } + ] + elif "linux" in build_platform: + upstream_artifacts = [ + { + "taskId": {"task-reference": "<repack>"}, + "taskType": "repackage", + "paths": [ + get_artifact_path( + dep_job, "{}/target.tar.bz2".format(repack_id) + ), + ], + "formats": ["autograph_gpg"], + } + ] task = { - 'label': label, - 'description': description, - 'worker-type': 'linux-signing', - 'worker': {'implementation': 'scriptworker-signing', - 'upstream-artifacts': upstream_artifacts, - 'max-run-time': 3600}, - 'scopes': scopes, - 'dependencies': dependencies, - 'attributes': attributes, - 'run-on-projects': dep_job.attributes.get('run_on_projects'), - 'extra': { - 'repack_id': repack_id, - } + "label": label, + "description": description, + "worker-type": "linux-signing", + "worker": { + "implementation": "scriptworker-signing", + "upstream-artifacts": upstream_artifacts, + "max-run-time": 3600, + }, + "scopes": scopes, + "dependencies": dependencies, + "attributes": attributes, + "run-on-projects": dep_job.attributes.get("run_on_projects"), + "extra": { + "repack_id": repack_id, + }, } # we may have reduced the priority for partner jobs, otherwise task.py will set it - if job.get('priority'): - task['priority'] = job['priority'] + if job.get("priority"): + task["priority"] = job["priority"] yield task diff --git a/testing/addtest.py b/testing/addtest.py index 9a18fce86a17..36ea64628fb4 100644 --- a/testing/addtest.py +++ b/testing/addtest.py @@ -1,4 +1,3 @@ - from __future__ import absolute_import, unicode_literals, print_function import io @@ -50,7 +49,7 @@ add_task(async function test_TODO() { filename = os.path.basename(self.test) if not os.path.isfile(manifest_file): - print('Could not open manifest file {}'.format(manifest_file)) + print("Could not open manifest file {}".format(manifest_file)) return write_to_ini_file(manifest_file, filename) @@ -64,22 +63,29 @@ class MochitestCreator(Creator): def _get_template_contents(self): mochitest_templates = os.path.abspath( - os.path.join(os.path.dirname(__file__), 'mochitest', 'static') + os.path.join(os.path.dirname(__file__), "mochitest", "static") ) template_file_name = None template_file_name = self.templates.get(self.suite) if template_file_name is None: - print("Sorry, `addtest` doesn't currently know how to add {}".format(self.suite)) + print( + "Sorry, `addtest` doesn't currently know how to add {}".format( + self.suite + ) + ) return None template_file_name = template_file_name % {"doc": self.doc} template_file = os.path.join(mochitest_templates, template_file_name) if not os.path.isfile(template_file): - print("Sorry, `addtest` doesn't currently know how to add {} with document type {}" - .format(self.suite, self.doc)) + print( + "Sorry, `addtest` doesn't currently know how to add {} with document type {}".format( # NOQA: E501 + self.suite, self.doc + ) + ) return None with open(template_file) as f: @@ -90,13 +96,13 @@ class MochitestCreator(Creator): guessed_ini = { "mochitest-plain": "mochitest.ini", "mochitest-chrome": "chrome.ini", - "mochitest-browser-chrome": "browser.ini" + "mochitest-browser-chrome": "browser.ini", }[self.suite] manifest_file = os.path.join(os.path.dirname(self.test), guessed_ini) filename = os.path.basename(self.test) if not os.path.isfile(manifest_file): - print('Could not open manifest file {}'.format(manifest_file)) + print("Could not open manifest file {}".format(manifest_file)) return write_to_ini_file(manifest_file, filename) @@ -135,20 +141,32 @@ class WebPlatformTestsCreator(Creator): @classmethod def get_parser(cls, parser): - parser.add_argument("--long-timeout", action="store_true", - help="Test should be given a long timeout " - "(typically 60s rather than 10s, but varies depending on environment)") - parser.add_argument("-m", "--reference", dest="ref", help="Path to the reference file") - parser.add_argument("--mismatch", action="store_true", - help="Create a mismatch reftest") - parser.add_argument("--wait", action="store_true", - help="Create a reftest that waits until takeScreenshot() is called") + parser.add_argument( + "--long-timeout", + action="store_true", + help="Test should be given a long timeout " + "(typically 60s rather than 10s, but varies depending on environment)", + ) + parser.add_argument( + "-m", "--reference", dest="ref", help="Path to the reference file" + ) + parser.add_argument( + "--mismatch", action="store_true", help="Create a mismatch reftest" + ) + parser.add_argument( + "--wait", + action="store_true", + help="Create a reftest that waits until takeScreenshot() is called", + ) def check_args(self): if self.wpt_type(self.test) is None: - print("""Test path %s is not in wpt directories: + print( + """Test path %s is not in wpt directories: testing/web-platform/tests for tests that may be shared -testing/web-platform/mozilla/tests for Gecko-only tests""" % self.test) +testing/web-platform/mozilla/tests for Gecko-only tests""" + % self.test + ) return False if not self.reftest: @@ -178,8 +196,11 @@ testing/web-platform/mozilla/tests for Gecko-only tests""" % self.test) yield (ref_path, self._get_template_contents(reference=True)) def _get_template_contents(self, reference=False): - args = {"documentElement": "<html class=reftest-wait>\n" - if self.kwargs["wait"] else ""} + args = { + "documentElement": "<html class=reftest-wait>\n" + if self.kwargs["wait"] + else "" + } if self.test.rsplit(".", 1)[1] == "js": template = self.template_js @@ -192,9 +213,14 @@ testing/web-platform/mozilla/tests for Gecko-only tests""" % self.test) if self.reftest: if not reference: - args = {"match": "match" if not self.kwargs["mismatch"] else "mismatch", - "ref": (self.ref_url(self.kwargs["ref"]) - if self.kwargs["ref"] else '""')} + args = { + "match": "match" if not self.kwargs["mismatch"] else "mismatch", + "ref": ( + self.ref_url(self.kwargs["ref"]) + if self.kwargs["ref"] + else '""' + ), + } template += self.template_body_reftest % args if self.kwargs["wait"]: template += self.template_body_reftest_wait @@ -236,7 +262,7 @@ testing/web-platform/mozilla/tests for Gecko-only tests""" % self.test) # Path is an absolute URL relative to the tests root if path.startswith("/_mozilla/"): base = self.local_path - path = path[len("/_mozilla/"):] + path = path[len("/_mozilla/") :] else: base = self.upstream_path path = path[1:] @@ -249,7 +275,8 @@ testing/web-platform/mozilla/tests for Gecko-only tests""" % self.test) return path else: test_rel_path = self.src_rel_path( - os.path.join(os.path.dirname(self.test), path)) + os.path.join(os.path.dirname(self.test), path) + ) if self.wpt_type(test_rel_path) is not None: return test_rel_path # Returning None indicates that the path wasn't valid @@ -288,36 +315,38 @@ def write_to_ini_file(manifest_file, filename): manifest = manifestparser.TestManifest(manifests=[manifest_file]) insert_before = None - if any(t['name'] == filename for t in manifest.tests): + if any(t["name"] == filename for t in manifest.tests): print("{} is already in the manifest.".format(filename)) return for test in manifest.tests: - if test.get('name') > filename: - insert_before = test.get('name') + if test.get("name") > filename: + insert_before = test.get("name") break with open(manifest_file, "r") as f: contents = f.readlines() - filename = '[{}]\n'.format(filename) + filename = "[{}]\n".format(filename) if not insert_before: contents.append(filename) else: - insert_before = '[{}]'.format(insert_before) + insert_before = "[{}]".format(insert_before) for i in range(len(contents)): if contents[i].startswith(insert_before): contents.insert(i, filename) break - with io.open(manifest_file, "w", newline='\n') as f: + with io.open(manifest_file, "w", newline="\n") as f: f.write("".join(contents)) -TEST_CREATORS = {"mochitest": MochitestCreator, - "web-platform-tests": WebPlatformTestsCreator, - "xpcshell": XpcshellCreator} +TEST_CREATORS = { + "mochitest": MochitestCreator, + "web-platform-tests": WebPlatformTestsCreator, + "xpcshell": XpcshellCreator, +} def creator_for_suite(suite): diff --git a/testing/mochitest/mochitest_options.py b/testing/mochitest/mochitest_options.py index 5e60241806f1..af567ef5c0ab 100644 --- a/testing/mochitest/mochitest_options.py +++ b/testing/mochitest/mochitest_options.py @@ -26,6 +26,7 @@ try: MozbuildObject, MachCommandConditions as conditions, ) + build_obj = MozbuildObject.from_environment(cwd=here) except ImportError: build_obj = None @@ -34,42 +35,44 @@ except ImportError: # Maps test flavors to data needed to run them ALL_FLAVORS = { - 'mochitest': { - 'suite': 'plain', - 'aliases': ('plain', 'mochitest'), - 'enabled_apps': ('firefox', 'android'), - 'extra_args': { - 'flavor': 'plain', + "mochitest": { + "suite": "plain", + "aliases": ("plain", "mochitest"), + "enabled_apps": ("firefox", "android"), + "extra_args": { + "flavor": "plain", }, - 'install_subdir': 'tests', + "install_subdir": "tests", }, - 'chrome': { - 'suite': 'chrome', - 'aliases': ('chrome', 'mochitest-chrome'), - 'enabled_apps': ('firefox'), - 'extra_args': { - 'flavor': 'chrome', - } + "chrome": { + "suite": "chrome", + "aliases": ("chrome", "mochitest-chrome"), + "enabled_apps": ("firefox"), + "extra_args": { + "flavor": "chrome", + }, }, - 'browser-chrome': { - 'suite': 'browser', - 'aliases': ('browser', 'browser-chrome', 'mochitest-browser-chrome', 'bc'), - 'enabled_apps': ('firefox', 'thunderbird'), - 'extra_args': { - 'flavor': 'browser', - } + "browser-chrome": { + "suite": "browser", + "aliases": ("browser", "browser-chrome", "mochitest-browser-chrome", "bc"), + "enabled_apps": ("firefox", "thunderbird"), + "extra_args": { + "flavor": "browser", + }, }, - 'a11y': { - 'suite': 'a11y', - 'aliases': ('a11y', 'mochitest-a11y', 'accessibility'), - 'enabled_apps': ('firefox',), - 'extra_args': { - 'flavor': 'a11y', - } + "a11y": { + "suite": "a11y", + "aliases": ("a11y", "mochitest-a11y", "accessibility"), + "enabled_apps": ("firefox",), + "extra_args": { + "flavor": "a11y", + }, }, } -SUPPORTED_FLAVORS = list(chain.from_iterable([f['aliases'] for f in ALL_FLAVORS.values()])) -CANONICAL_FLAVORS = sorted([f['aliases'][0] for f in ALL_FLAVORS.values()]) +SUPPORTED_FLAVORS = list( + chain.from_iterable([f["aliases"] for f in ALL_FLAVORS.values()]) +) +CANONICAL_FLAVORS = sorted([f["aliases"][0] for f in ALL_FLAVORS.values()]) def get_default_valgrind_suppression_files(): @@ -105,7 +108,7 @@ def get_default_valgrind_suppression_files(): return rv -class ArgumentContainer(): +class ArgumentContainer: __metaclass__ = ABCMeta @abstractproperty @@ -127,520 +130,764 @@ class ArgumentContainer(): class MochitestArguments(ArgumentContainer): """General mochitest arguments.""" + LOG_LEVELS = ("DEBUG", "INFO", "WARNING", "ERROR", "FATAL") args = [ - [["test_paths"], - {"nargs": "*", - "metavar": "TEST", - "default": [], - "help": "Test to run. Can be a single test file or a directory of tests " - "(to run recursively). If omitted, the entire suite is run.", - }], - [["-f", "--flavor"], - {"choices": SUPPORTED_FLAVORS, - "metavar": "{{{}}}".format(', '.join(CANONICAL_FLAVORS)), - "default": None, - "help": "Only run tests of this flavor.", - }], - [["--keep-open"], - {"nargs": "?", - "type": strtobool, - "const": "true", - "default": None, - "help": "Always keep the browser open after tests complete. Or always close the " - "browser with --keep-open=false", - }], - [["--appname"], - {"dest": "app", - "default": None, - "help": "Override the default binary used to run tests with the path provided, e.g " - "/usr/bin/firefox. If you have run ./mach package beforehand, you can " - "specify 'dist' to run tests against the distribution bundle's binary.", - }], - [["--utility-path"], - {"dest": "utilityPath", - "default": build_obj.bindir if build_obj is not None else None, - "help": "absolute path to directory containing utility programs " - "(xpcshell, ssltunnel, certutil)", - "suppress": True, - }], - [["--certificate-path"], - {"dest": "certPath", - "default": None, - "help": "absolute path to directory containing certificate store to use testing profile", - "suppress": True, - }], - [["--no-autorun"], - {"action": "store_false", - "dest": "autorun", - "default": True, - "help": "Do not start running tests automatically.", - }], - [["--timeout"], - {"type": int, - "default": None, - "help": "The per-test timeout in seconds (default: 60 seconds).", - }], - [["--max-timeouts"], - {"type": int, - "dest": "maxTimeouts", - "default": None, - "help": "The maximum number of timeouts permitted before halting testing.", - }], - [["--total-chunks"], - {"type": int, - "dest": "totalChunks", - "help": "Total number of chunks to split tests into.", - "default": None, - }], - [["--this-chunk"], - {"type": int, - "dest": "thisChunk", - "help": "If running tests by chunks, the chunk number to run.", - "default": None, - }], - [["--chunk-by-runtime"], - {"action": "store_true", - "dest": "chunkByRuntime", - "help": "Group tests such that each chunk has roughly the same runtime.", - "default": False, - }], - [["--chunk-by-dir"], - {"type": int, - "dest": "chunkByDir", - "help": "Group tests together in the same chunk that are in the same top " - "chunkByDir directories.", - "default": 0, - }], - [["--run-by-manifest"], - {"action": "store_true", - "dest": "runByManifest", - "help": "Run each manifest in a single browser instance with a fresh profile.", - "default": False, - "suppress": True, - }], - [["--shuffle"], - {"action": "store_true", - "help": "Shuffle execution order of tests.", - "default": False, - }], - [["--console-level"], - {"dest": "consoleLevel", - "choices": LOG_LEVELS, - "default": "INFO", - "help": "One of {} to determine the level of console logging.".format( - ', '.join(LOG_LEVELS)), - "suppress": True, - }], - [["--bisect-chunk"], - {"dest": "bisectChunk", - "default": None, - "help": "Specify the failing test name to find the previous tests that may be " - "causing the failure.", - }], - [["--start-at"], - {"dest": "startAt", - "default": "", - "help": "Start running the test sequence at this test.", - }], - [["--end-at"], - {"dest": "endAt", - "default": "", - "help": "Stop running the test sequence at this test.", - }], - [["--subsuite"], - {"default": None, - "help": "Subsuite of tests to run. Unlike tags, subsuites also remove tests from " - "the default set. Only one can be specified at once.", - }], - [["--setenv"], - {"action": "append", - "dest": "environment", - "metavar": "NAME=VALUE", - "default": [], - "help": "Sets the given variable in the application's environment.", - }], - [["--exclude-extension"], - {"action": "append", - "dest": "extensionsToExclude", - "default": [], - "help": "Excludes the given extension from being installed in the test profile.", - "suppress": True, - }], - [["--browser-arg"], - {"action": "append", - "dest": "browserArgs", - "default": [], - "help": "Provides an argument to the test application (e.g Firefox).", - "suppress": True, - }], - [["--leak-threshold"], - {"type": int, - "dest": "defaultLeakThreshold", - "default": 0, - "help": "Fail if the number of bytes leaked in default processes through " - "refcounted objects (or bytes in classes with MOZ_COUNT_CTOR and " - "MOZ_COUNT_DTOR) is greater than the given number.", - "suppress": True, - }], - [["--fatal-assertions"], - {"action": "store_true", - "dest": "fatalAssertions", - "default": False, - "help": "Abort testing whenever an assertion is hit (requires a debug build to " - "be effective).", - "suppress": True, - }], - [["--extra-profile-file"], - {"action": "append", - "dest": "extraProfileFiles", - "default": [], - "help": "Copy specified files/dirs to testing profile. Can be specified more " - "than once.", - "suppress": True, - }], - [["--install-extension"], - {"action": "append", - "dest": "extensionsToInstall", - "default": [], - "help": "Install the specified extension in the testing profile. Can be a path " - "to a .xpi file.", - }], - [["--profile-path"], - {"dest": "profilePath", - "default": None, - "help": "Directory where the profile will be stored. This directory will be " - "deleted after the tests are finished.", - "suppress": True, - }], - [["--testing-modules-dir"], - {"dest": "testingModulesDir", - "default": None, - "help": "Directory where testing-only JS modules are located.", - "suppress": True, - }], - [["--repeat"], - {"type": int, - "default": 0, - "help": "Repeat the tests the given number of times.", - }], - [["--run-until-failure"], - {"action": "store_true", - "dest": "runUntilFailure", - "default": False, - "help": "Run tests repeatedly but stop the first time a test fails. Default cap " - "is 30 runs, which can be overridden with the --repeat parameter.", - }], - [["--manifest"], - {"dest": "manifestFile", - "default": None, - "help": "Path to a manifestparser (.ini formatted) manifest of tests to run.", - "suppress": True, - }], - [["--extra-mozinfo-json"], - {"dest": "extra_mozinfo_json", - "default": None, - "help": "Filter tests based on a given mozinfo file.", - "suppress": True, - }], - [["--testrun-manifest-file"], - {"dest": "testRunManifestFile", - "default": 'tests.json', - "help": "Overrides the default filename of the tests.json manifest file that is " - "generated by the harness and used by SimpleTest. Only useful when running " - "multiple test runs simulatenously on the same machine.", - "suppress": True, - }], - [["--dump-tests"], - {"dest": "dump_tests", - "default": None, - "help": "Specify path to a filename to dump all the tests that will be run", - "suppress": True, - }], - [["--failure-file"], - {"dest": "failureFile", - "default": None, - "help": "Filename of the output file where we can store a .json list of failures " - "to be run in the future with --run-only-tests.", - "suppress": True, - }], - [["--run-slower"], - {"action": "store_true", - "dest": "runSlower", - "default": False, - "help": "Delay execution between tests.", - }], - [["--httpd-path"], - {"dest": "httpdPath", - "default": None, - "help": "Path to the httpd.js file.", - "suppress": True, - }], - [["--setpref"], - {"action": "append", - "metavar": "PREF=VALUE", - "default": [], - "dest": "extraPrefs", - "help": "Defines an extra user preference.", - }], - [["--jsconsole"], - {"action": "store_true", - "default": False, - "help": "Open the Browser Console.", - }], - [["--jsdebugger"], - {"action": "store_true", - "default": False, - "help": "Start the browser JS debugger before running the test. Implies --no-autorun.", - }], - [["--jsdebugger-path"], - {"default": None, - "dest": "jsdebuggerPath", - "help": "Path to a Firefox binary that will be used to run the toolbox. Should " - "be used together with --jsdebugger." - }], - [["--debug-on-failure"], - {"action": "store_true", - "default": False, - "dest": "debugOnFailure", - "help": "Breaks execution and enters the JS debugger on a test failure. Should " - "be used together with --jsdebugger." - }], - [["--disable-e10s"], - {"action": "store_false", - "default": True, - "dest": "e10s", - "help": "Run tests with electrolysis preferences and test filtering disabled.", - }], - [["--enable-fission"], - {"action": "store_true", - "default": False, - "help": "Run tests with fission (site isolation) enabled.", - }], - [["--enable-xorigin-tests"], - {"action": "store_true", - "default": False, - "dest": "xOriginTests", - "help": "Run tests in a cross origin iframe.", - }], - [["--store-chrome-manifest"], - {"action": "store", - "help": "Destination path to write a copy of any chrome manifest " - "written by the harness.", - "default": None, - "suppress": True, - }], - [["--jscov-dir-prefix"], - {"action": "store", - "help": "Directory to store per-test line coverage data as json " - "(browser-chrome only). To emit lcov formatted data, set " - "JS_CODE_COVERAGE_OUTPUT_DIR in the environment.", - "default": None, - "suppress": True, - }], - [["--dmd"], - {"action": "store_true", - "default": False, - "help": "Run tests with DMD active.", - }], - [["--dump-output-directory"], - {"default": None, - "dest": "dumpOutputDirectory", - "help": "Specifies the directory in which to place dumped memory reports.", - }], - [["--dump-about-memory-after-test"], - {"action": "store_true", - "default": False, - "dest": "dumpAboutMemoryAfterTest", - "help": "Dump an about:memory log after each test in the directory specified " - "by --dump-output-directory.", - }], - [["--dump-dmd-after-test"], - {"action": "store_true", - "default": False, - "dest": "dumpDMDAfterTest", - "help": "Dump a DMD log (and an accompanying about:memory log) after each test. " - "These will be dumped into your default temp directory, NOT the directory " - "specified by --dump-output-directory. The logs are numbered by test, and " - "each test will include output that indicates the DMD output filename.", - }], - [["--screenshot-on-fail"], - {"action": "store_true", - "default": False, - "dest": "screenshotOnFail", - "help": "Take screenshots on all test failures. Set $MOZ_UPLOAD_DIR to a directory " - "for storing the screenshots." - }], - [["--quiet"], - {"action": "store_true", - "dest": "quiet", - "default": False, - "help": "Do not print test log lines unless a failure occurs.", - }], - [["--headless"], - {"action": "store_true", - "dest": "headless", - "default": False, - "help": "Run tests in headless mode.", - }], - [["--pidfile"], - {"dest": "pidFile", - "default": "", - "help": "Name of the pidfile to generate.", - "suppress": True, - }], - [["--use-test-media-devices"], - {"action": "store_true", - "default": False, - "dest": "useTestMediaDevices", - "help": "Use test media device drivers for media testing.", - }], - [["--gmp-path"], - {"default": None, - "help": "Path to fake GMP plugin. Will be deduced from the binary if not passed.", - "suppress": True, - }], - [["--xre-path"], - {"dest": "xrePath", - "default": None, # individual scripts will set a sane default - "help": "Absolute path to directory containing XRE (probably xulrunner).", - "suppress": True, - }], - [["--symbols-path"], - {"dest": "symbolsPath", - "default": None, - "help": "Absolute path to directory containing breakpad symbols, or the URL of a " - "zip file containing symbols", - "suppress": True, - }], - [["--debugger"], - {"default": None, - "help": "Debugger binary to run tests in. Program name or path.", - }], - [["--debugger-args"], - {"dest": "debuggerArgs", - "default": None, - "help": "Arguments to pass to the debugger.", - }], - [["--valgrind"], - {"default": None, - "help": "Valgrind binary to run tests with. Program name or path.", - }], - [["--valgrind-args"], - {"dest": "valgrindArgs", - "default": None, - "help": "Comma-separated list of extra arguments to pass to Valgrind.", - }], - [["--valgrind-supp-files"], - {"dest": "valgrindSuppFiles", - "default": None, - "help": "Comma-separated list of suppression files to pass to Valgrind.", - }], - [["--debugger-interactive"], - {"action": "store_true", - "dest": "debuggerInteractive", - "default": None, - "help": "Prevents the test harness from redirecting stdout and stderr for " - "interactive debuggers.", - "suppress": True, - }], - [["--tag"], - {"action": "append", - "dest": "test_tags", - "default": None, - "help": "Filter out tests that don't have the given tag. Can be used multiple " - "times in which case the test must contain at least one of the given tags.", - }], - [["--marionette"], - {"default": None, - "help": "host:port to use when connecting to Marionette", - }], - [["--marionette-socket-timeout"], - {"default": None, - "help": "Timeout while waiting to receive a message from the marionette server.", - "suppress": True, - }], - [["--marionette-startup-timeout"], - {"default": None, - "help": "Timeout while waiting for marionette server startup.", - "suppress": True, - }], - [["--cleanup-crashes"], - {"action": "store_true", - "dest": "cleanupCrashes", - "default": False, - "help": "Delete pending crash reports before running tests.", - "suppress": True, - }], - [["--websocket-process-bridge-port"], - {"default": "8191", - "dest": "websocket_process_bridge_port", - "help": "Port for websocket/process bridge. Default 8191.", - }], - [["--failure-pattern-file"], - {"default": None, - "dest": "failure_pattern_file", - "help": "File describes all failure patterns of the tests.", - "suppress": True, - }], - [["--sandbox-read-whitelist"], - {"default": [], - "dest": "sandboxReadWhitelist", - "action": "append", - "help": "Path to add to the sandbox whitelist.", - "suppress": True, - }], - [["--verify"], - {"action": "store_true", - "default": False, - "help": "Run tests in verification mode: Run many times in different " - "ways, to see if there are intermittent failures.", - }], - [["--verify-fission"], - {"action": "store_true", - "default": False, - "help": "Run tests once without Fission, once with Fission", - }], - [["--verify-max-time"], - {"type": int, - "default": 3600, - "help": "Maximum time, in seconds, to run in --verify mode.", - }], - [["--enable-webrender"], - {"action": "store_true", - "dest": "enable_webrender", - "default": False, - "help": "Enable the WebRender compositor in Gecko.", - }], - [["--profiler"], - {"action": "store_true", - "dest": "profiler", - "default": False, - "help": "Run the Firefox Profiler and get a performance profile of the " - "mochitest. This is useful to find performance issues, and also " - "to see what exactly the test is doing. To get profiler options run: " - "`MOZ_PROFILER_HELP=1 ./mach run`" - }], - [["--profiler-save-only"], - {"action": "store_true", - "dest": "profilerSaveOnly", - "default": False, - "help": "Run the Firefox Profiler and save it to the path specified by the " - "MOZ_UPLOAD_DIR environment variable." - }], + [ + ["test_paths"], + { + "nargs": "*", + "metavar": "TEST", + "default": [], + "help": "Test to run. Can be a single test file or a directory of tests " + "(to run recursively). If omitted, the entire suite is run.", + }, + ], + [ + ["-f", "--flavor"], + { + "choices": SUPPORTED_FLAVORS, + "metavar": "{{{}}}".format(", ".join(CANONICAL_FLAVORS)), + "default": None, + "help": "Only run tests of this flavor.", + }, + ], + [ + ["--keep-open"], + { + "nargs": "?", + "type": strtobool, + "const": "true", + "default": None, + "help": "Always keep the browser open after tests complete. Or always close the " + "browser with --keep-open=false", + }, + ], + [ + ["--appname"], + { + "dest": "app", + "default": None, + "help": ( + "Override the default binary used to run tests with the path provided, e.g " + "/usr/bin/firefox. If you have run ./mach package beforehand, you can " + "specify 'dist' to run tests against the distribution bundle's binary." + ), + }, + ], + [ + ["--utility-path"], + { + "dest": "utilityPath", + "default": build_obj.bindir if build_obj is not None else None, + "help": "absolute path to directory containing utility programs " + "(xpcshell, ssltunnel, certutil)", + "suppress": True, + }, + ], + [ + ["--certificate-path"], + { + "dest": "certPath", + "default": None, + "help": "absolute path to directory containing certificate store to use testing profile", # NOQA: E501 + "suppress": True, + }, + ], + [ + ["--no-autorun"], + { + "action": "store_false", + "dest": "autorun", + "default": True, + "help": "Do not start running tests automatically.", + }, + ], + [ + ["--timeout"], + { + "type": int, + "default": None, + "help": "The per-test timeout in seconds (default: 60 seconds).", + }, + ], + [ + ["--max-timeouts"], + { + "type": int, + "dest": "maxTimeouts", + "default": None, + "help": "The maximum number of timeouts permitted before halting testing.", + }, + ], + [ + ["--total-chunks"], + { + "type": int, + "dest": "totalChunks", + "help": "Total number of chunks to split tests into.", + "default": None, + }, + ], + [ + ["--this-chunk"], + { + "type": int, + "dest": "thisChunk", + "help": "If running tests by chunks, the chunk number to run.", + "default": None, + }, + ], + [ + ["--chunk-by-runtime"], + { + "action": "store_true", + "dest": "chunkByRuntime", + "help": "Group tests such that each chunk has roughly the same runtime.", + "default": False, + }, + ], + [ + ["--chunk-by-dir"], + { + "type": int, + "dest": "chunkByDir", + "help": "Group tests together in the same chunk that are in the same top " + "chunkByDir directories.", + "default": 0, + }, + ], + [ + ["--run-by-manifest"], + { + "action": "store_true", + "dest": "runByManifest", + "help": "Run each manifest in a single browser instance with a fresh profile.", + "default": False, + "suppress": True, + }, + ], + [ + ["--shuffle"], + { + "action": "store_true", + "help": "Shuffle execution order of tests.", + "default": False, + }, + ], + [ + ["--console-level"], + { + "dest": "consoleLevel", + "choices": LOG_LEVELS, + "default": "INFO", + "help": "One of {} to determine the level of console logging.".format( + ", ".join(LOG_LEVELS) + ), + "suppress": True, + }, + ], + [ + ["--bisect-chunk"], + { + "dest": "bisectChunk", + "default": None, + "help": "Specify the failing test name to find the previous tests that may be " + "causing the failure.", + }, + ], + [ + ["--start-at"], + { + "dest": "startAt", + "default": "", + "help": "Start running the test sequence at this test.", + }, + ], + [ + ["--end-at"], + { + "dest": "endAt", + "default": "", + "help": "Stop running the test sequence at this test.", + }, + ], + [ + ["--subsuite"], + { + "default": None, + "help": "Subsuite of tests to run. Unlike tags, subsuites also remove tests from " + "the default set. Only one can be specified at once.", + }, + ], + [ + ["--setenv"], + { + "action": "append", + "dest": "environment", + "metavar": "NAME=VALUE", + "default": [], + "help": "Sets the given variable in the application's environment.", + }, + ], + [ + ["--exclude-extension"], + { + "action": "append", + "dest": "extensionsToExclude", + "default": [], + "help": "Excludes the given extension from being installed in the test profile.", + "suppress": True, + }, + ], + [ + ["--browser-arg"], + { + "action": "append", + "dest": "browserArgs", + "default": [], + "help": "Provides an argument to the test application (e.g Firefox).", + "suppress": True, + }, + ], + [ + ["--leak-threshold"], + { + "type": int, + "dest": "defaultLeakThreshold", + "default": 0, + "help": "Fail if the number of bytes leaked in default processes through " + "refcounted objects (or bytes in classes with MOZ_COUNT_CTOR and " + "MOZ_COUNT_DTOR) is greater than the given number.", + "suppress": True, + }, + ], + [ + ["--fatal-assertions"], + { + "action": "store_true", + "dest": "fatalAssertions", + "default": False, + "help": "Abort testing whenever an assertion is hit (requires a debug build to " + "be effective).", + "suppress": True, + }, + ], + [ + ["--extra-profile-file"], + { + "action": "append", + "dest": "extraProfileFiles", + "default": [], + "help": "Copy specified files/dirs to testing profile. Can be specified more " + "than once.", + "suppress": True, + }, + ], + [ + ["--install-extension"], + { + "action": "append", + "dest": "extensionsToInstall", + "default": [], + "help": "Install the specified extension in the testing profile. Can be a path " + "to a .xpi file.", + }, + ], + [ + ["--profile-path"], + { + "dest": "profilePath", + "default": None, + "help": "Directory where the profile will be stored. This directory will be " + "deleted after the tests are finished.", + "suppress": True, + }, + ], + [ + ["--testing-modules-dir"], + { + "dest": "testingModulesDir", + "default": None, + "help": "Directory where testing-only JS modules are located.", + "suppress": True, + }, + ], + [ + ["--repeat"], + { + "type": int, + "default": 0, + "help": "Repeat the tests the given number of times.", + }, + ], + [ + ["--run-until-failure"], + { + "action": "store_true", + "dest": "runUntilFailure", + "default": False, + "help": "Run tests repeatedly but stop the first time a test fails. Default cap " + "is 30 runs, which can be overridden with the --repeat parameter.", + }, + ], + [ + ["--manifest"], + { + "dest": "manifestFile", + "default": None, + "help": "Path to a manifestparser (.ini formatted) manifest of tests to run.", + "suppress": True, + }, + ], + [ + ["--extra-mozinfo-json"], + { + "dest": "extra_mozinfo_json", + "default": None, + "help": "Filter tests based on a given mozinfo file.", + "suppress": True, + }, + ], + [ + ["--testrun-manifest-file"], + { + "dest": "testRunManifestFile", + "default": "tests.json", + "help": "Overrides the default filename of the tests.json manifest file that is " + "generated by the harness and used by SimpleTest. Only useful when running " + "multiple test runs simulatenously on the same machine.", + "suppress": True, + }, + ], + [ + ["--dump-tests"], + { + "dest": "dump_tests", + "default": None, + "help": "Specify path to a filename to dump all the tests that will be run", + "suppress": True, + }, + ], + [ + ["--failure-file"], + { + "dest": "failureFile", + "default": None, + "help": "Filename of the output file where we can store a .json list of failures " + "to be run in the future with --run-only-tests.", + "suppress": True, + }, + ], + [ + ["--run-slower"], + { + "action": "store_true", + "dest": "runSlower", + "default": False, + "help": "Delay execution between tests.", + }, + ], + [ + ["--httpd-path"], + { + "dest": "httpdPath", + "default": None, + "help": "Path to the httpd.js file.", + "suppress": True, + }, + ], + [ + ["--setpref"], + { + "action": "append", + "metavar": "PREF=VALUE", + "default": [], + "dest": "extraPrefs", + "help": "Defines an extra user preference.", + }, + ], + [ + ["--jsconsole"], + { + "action": "store_true", + "default": False, + "help": "Open the Browser Console.", + }, + ], + [ + ["--jsdebugger"], + { + "action": "store_true", + "default": False, + "help": "Start the browser JS debugger before running the test. Implies --no-autorun.", # NOQA: E501 + }, + ], + [ + ["--jsdebugger-path"], + { + "default": None, + "dest": "jsdebuggerPath", + "help": "Path to a Firefox binary that will be used to run the toolbox. Should " + "be used together with --jsdebugger.", + }, + ], + [ + ["--debug-on-failure"], + { + "action": "store_true", + "default": False, + "dest": "debugOnFailure", + "help": "Breaks execution and enters the JS debugger on a test failure. Should " + "be used together with --jsdebugger.", + }, + ], + [ + ["--disable-e10s"], + { + "action": "store_false", + "default": True, + "dest": "e10s", + "help": "Run tests with electrolysis preferences and test filtering disabled.", + }, + ], + [ + ["--enable-fission"], + { + "action": "store_true", + "default": False, + "help": "Run tests with fission (site isolation) enabled.", + }, + ], + [ + ["--enable-xorigin-tests"], + { + "action": "store_true", + "default": False, + "dest": "xOriginTests", + "help": "Run tests in a cross origin iframe.", + }, + ], + [ + ["--store-chrome-manifest"], + { + "action": "store", + "help": "Destination path to write a copy of any chrome manifest " + "written by the harness.", + "default": None, + "suppress": True, + }, + ], + [ + ["--jscov-dir-prefix"], + { + "action": "store", + "help": "Directory to store per-test line coverage data as json " + "(browser-chrome only). To emit lcov formatted data, set " + "JS_CODE_COVERAGE_OUTPUT_DIR in the environment.", + "default": None, + "suppress": True, + }, + ], + [ + ["--dmd"], + { + "action": "store_true", + "default": False, + "help": "Run tests with DMD active.", + }, + ], + [ + ["--dump-output-directory"], + { + "default": None, + "dest": "dumpOutputDirectory", + "help": "Specifies the directory in which to place dumped memory reports.", + }, + ], + [ + ["--dump-about-memory-after-test"], + { + "action": "store_true", + "default": False, + "dest": "dumpAboutMemoryAfterTest", + "help": "Dump an about:memory log after each test in the directory specified " + "by --dump-output-directory.", + }, + ], + [ + ["--dump-dmd-after-test"], + { + "action": "store_true", + "default": False, + "dest": "dumpDMDAfterTest", + "help": "Dump a DMD log (and an accompanying about:memory log) after each test. " + "These will be dumped into your default temp directory, NOT the directory " + "specified by --dump-output-directory. The logs are numbered by test, and " + "each test will include output that indicates the DMD output filename.", + }, + ], + [ + ["--screenshot-on-fail"], + { + "action": "store_true", + "default": False, + "dest": "screenshotOnFail", + "help": "Take screenshots on all test failures. Set $MOZ_UPLOAD_DIR to a directory " # NOQA: E501 + "for storing the screenshots.", + }, + ], + [ + ["--quiet"], + { + "action": "store_true", + "dest": "quiet", + "default": False, + "help": "Do not print test log lines unless a failure occurs.", + }, + ], + [ + ["--headless"], + { + "action": "store_true", + "dest": "headless", + "default": False, + "help": "Run tests in headless mode.", + }, + ], + [ + ["--pidfile"], + { + "dest": "pidFile", + "default": "", + "help": "Name of the pidfile to generate.", + "suppress": True, + }, + ], + [ + ["--use-test-media-devices"], + { + "action": "store_true", + "default": False, + "dest": "useTestMediaDevices", + "help": "Use test media device drivers for media testing.", + }, + ], + [ + ["--gmp-path"], + { + "default": None, + "help": "Path to fake GMP plugin. Will be deduced from the binary if not passed.", + "suppress": True, + }, + ], + [ + ["--xre-path"], + { + "dest": "xrePath", + "default": None, # individual scripts will set a sane default + "help": "Absolute path to directory containing XRE (probably xulrunner).", + "suppress": True, + }, + ], + [ + ["--symbols-path"], + { + "dest": "symbolsPath", + "default": None, + "help": "Absolute path to directory containing breakpad symbols, or the URL of a " + "zip file containing symbols", + "suppress": True, + }, + ], + [ + ["--debugger"], + { + "default": None, + "help": "Debugger binary to run tests in. Program name or path.", + }, + ], + [ + ["--debugger-args"], + { + "dest": "debuggerArgs", + "default": None, + "help": "Arguments to pass to the debugger.", + }, + ], + [ + ["--valgrind"], + { + "default": None, + "help": "Valgrind binary to run tests with. Program name or path.", + }, + ], + [ + ["--valgrind-args"], + { + "dest": "valgrindArgs", + "default": None, + "help": "Comma-separated list of extra arguments to pass to Valgrind.", + }, + ], + [ + ["--valgrind-supp-files"], + { + "dest": "valgrindSuppFiles", + "default": None, + "help": "Comma-separated list of suppression files to pass to Valgrind.", + }, + ], + [ + ["--debugger-interactive"], + { + "action": "store_true", + "dest": "debuggerInteractive", + "default": None, + "help": "Prevents the test harness from redirecting stdout and stderr for " + "interactive debuggers.", + "suppress": True, + }, + ], + [ + ["--tag"], + { + "action": "append", + "dest": "test_tags", + "default": None, + "help": "Filter out tests that don't have the given tag. Can be used multiple " + "times in which case the test must contain at least one of the given tags.", + }, + ], + [ + ["--marionette"], + { + "default": None, + "help": "host:port to use when connecting to Marionette", + }, + ], + [ + ["--marionette-socket-timeout"], + { + "default": None, + "help": "Timeout while waiting to receive a message from the marionette server.", + "suppress": True, + }, + ], + [ + ["--marionette-startup-timeout"], + { + "default": None, + "help": "Timeout while waiting for marionette server startup.", + "suppress": True, + }, + ], + [ + ["--cleanup-crashes"], + { + "action": "store_true", + "dest": "cleanupCrashes", + "default": False, + "help": "Delete pending crash reports before running tests.", + "suppress": True, + }, + ], + [ + ["--websocket-process-bridge-port"], + { + "default": "8191", + "dest": "websocket_process_bridge_port", + "help": "Port for websocket/process bridge. Default 8191.", + }, + ], + [ + ["--failure-pattern-file"], + { + "default": None, + "dest": "failure_pattern_file", + "help": "File describes all failure patterns of the tests.", + "suppress": True, + }, + ], + [ + ["--sandbox-read-whitelist"], + { + "default": [], + "dest": "sandboxReadWhitelist", + "action": "append", + "help": "Path to add to the sandbox whitelist.", + "suppress": True, + }, + ], + [ + ["--verify"], + { + "action": "store_true", + "default": False, + "help": "Run tests in verification mode: Run many times in different " + "ways, to see if there are intermittent failures.", + }, + ], + [ + ["--verify-fission"], + { + "action": "store_true", + "default": False, + "help": "Run tests once without Fission, once with Fission", + }, + ], + [ + ["--verify-max-time"], + { + "type": int, + "default": 3600, + "help": "Maximum time, in seconds, to run in --verify mode.", + }, + ], + [ + ["--enable-webrender"], + { + "action": "store_true", + "dest": "enable_webrender", + "default": False, + "help": "Enable the WebRender compositor in Gecko.", + }, + ], + [ + ["--profiler"], + { + "action": "store_true", + "dest": "profiler", + "default": False, + "help": "Run the Firefox Profiler and get a performance profile of the " + "mochitest. This is useful to find performance issues, and also " + "to see what exactly the test is doing. To get profiler options run: " + "`MOZ_PROFILER_HELP=1 ./mach run`", + }, + ], + [ + ["--profiler-save-only"], + { + "action": "store_true", + "dest": "profilerSaveOnly", + "default": False, + "help": "Run the Firefox Profiler and save it to the path specified by the " + "MOZ_UPLOAD_DIR environment variable.", + }, + ], ] defaults = { # Bug 1065098 - The gmplugin process fails to produce a leak # log for some reason. - 'ignoreMissingLeaks': ["gmplugin"], - 'extensionsToExclude': ['specialpowers'], + "ignoreMissingLeaks": ["gmplugin"], + "extensionsToExclude": ["specialpowers"], # Set server information on the args object - 'webServer': '127.0.0.1', - 'httpPort': DEFAULT_PORTS['http'], - 'sslPort': DEFAULT_PORTS['https'], - 'webSocketPort': '9988', + "webServer": "127.0.0.1", + "httpPort": DEFAULT_PORTS["http"], + "sslPort": DEFAULT_PORTS["https"], + "webSocketPort": "9988", # The default websocket port is incorrect in mozprofile; it is # set to the SSL proxy setting. See: # see https://bugzilla.mozilla.org/show_bug.cgi?id=916517 @@ -651,53 +898,58 @@ class MochitestArguments(ArgumentContainer): """Validate generic options.""" # and android doesn't use 'app' the same way, so skip validation - if parser.app != 'android': + if parser.app != "android": if options.app is None: if build_obj: from mozbuild.base import BinaryNotFoundException + try: options.app = build_obj.get_binary_path() except BinaryNotFoundException as e: - print('{}\n\n{}\n'.format(e, e.help())) + print("{}\n\n{}\n".format(e, e.help())) sys.exit(1) else: parser.error( - "could not find the application path, --appname must be specified") + "could not find the application path, --appname must be specified" + ) elif options.app == "dist" and build_obj: - options.app = build_obj.get_binary_path(where='staged-package') + options.app = build_obj.get_binary_path(where="staged-package") options.app = self.get_full_path(options.app, parser.oldcwd) if not os.path.exists(options.app): - parser.error("Error: Path {} doesn't exist. Are you executing " - "$objdir/_tests/testing/mochitest/runtests.py?".format( - options.app)) + parser.error( + "Error: Path {} doesn't exist. Are you executing " + "$objdir/_tests/testing/mochitest/runtests.py?".format(options.app) + ) if options.flavor is None: - options.flavor = 'plain' + options.flavor = "plain" for value in ALL_FLAVORS.values(): - if options.flavor in value['aliases']: - options.flavor = value['suite'] + if options.flavor in value["aliases"]: + options.flavor = value["suite"] break if options.gmp_path is None and options.app and build_obj: # Need to fix the location of gmp_fake which might not be shipped in the binary gmp_modules = ( - ('gmp-fake', '1.0'), - ('gmp-clearkey', '0.1'), - ('gmp-fakeopenh264', '1.0') + ("gmp-fake", "1.0"), + ("gmp-clearkey", "0.1"), + ("gmp-fakeopenh264", "1.0"), ) options.gmp_path = os.pathsep.join( - os.path.join(build_obj.bindir, *p) for p in gmp_modules) + os.path.join(build_obj.bindir, *p) for p in gmp_modules + ) if options.totalChunks is not None and options.thisChunk is None: - parser.error( - "thisChunk must be specified when totalChunks is specified") + parser.error("thisChunk must be specified when totalChunks is specified") if options.extra_mozinfo_json: if not os.path.isfile(options.extra_mozinfo_json): - parser.error("Error: couldn't find mozinfo.json at '%s'." - % options.extra_mozinfo_json) + parser.error( + "Error: couldn't find mozinfo.json at '%s'." + % options.extra_mozinfo_json + ) options.extra_mozinfo_json = json.load(open(options.extra_mozinfo_json)) @@ -706,25 +958,24 @@ class MochitestArguments(ArgumentContainer): parser.error("thisChunk must be between 1 and totalChunks") if options.chunkByDir and options.chunkByRuntime: - parser.error( - "can only use one of --chunk-by-dir or --chunk-by-runtime") + parser.error("can only use one of --chunk-by-dir or --chunk-by-runtime") if options.xrePath is None: # default xrePath to the app path if not provided # but only if an app path was explicitly provided - if options.app != parser.get_default('app'): + if options.app != parser.get_default("app"): options.xrePath = os.path.dirname(options.app) if mozinfo.isMac: options.xrePath = os.path.join( - os.path.dirname( - options.xrePath), - "Resources") + os.path.dirname(options.xrePath), "Resources" + ) elif build_obj is not None: # otherwise default to dist/bin options.xrePath = build_obj.bindir else: parser.error( - "could not find xre directory, --xre-path must be specified") + "could not find xre directory, --xre-path must be specified" + ) # allow relative paths if options.xrePath: @@ -739,24 +990,25 @@ class MochitestArguments(ArgumentContainer): if options.certPath: options.certPath = self.get_full_path(options.certPath, parser.oldcwd) elif build_obj: - options.certPath = os.path.join(build_obj.topsrcdir, 'build', 'pgo', 'certs') + options.certPath = os.path.join( + build_obj.topsrcdir, "build", "pgo", "certs" + ) if options.symbolsPath and len(urlparse(options.symbolsPath).scheme) < 2: options.symbolsPath = self.get_full_path(options.symbolsPath, parser.oldcwd) elif not options.symbolsPath and build_obj: - options.symbolsPath = os.path.join(build_obj.distdir, 'crashreporter-symbols') + options.symbolsPath = os.path.join( + build_obj.distdir, "crashreporter-symbols" + ) if options.debugOnFailure and not options.jsdebugger: - parser.error( - "--debug-on-failure requires --jsdebugger.") + parser.error("--debug-on-failure requires --jsdebugger.") if options.jsdebuggerPath and not options.jsdebugger: - parser.error( - "--jsdebugger-path requires --jsdebugger.") + parser.error("--jsdebugger-path requires --jsdebugger.") if options.debuggerArgs and not options.debugger: - parser.error( - "--debugger-args requires --debugger.") + parser.error("--debugger-args requires --debugger.") if options.valgrind or options.debugger: # valgrind and some debuggers may cause Gecko to start slowly. Make sure @@ -765,24 +1017,30 @@ class MochitestArguments(ArgumentContainer): options.marionette_socket_timeout = 540 if options.store_chrome_manifest: - options.store_chrome_manifest = os.path.abspath(options.store_chrome_manifest) + options.store_chrome_manifest = os.path.abspath( + options.store_chrome_manifest + ) if not os.path.isdir(os.path.dirname(options.store_chrome_manifest)): parser.error( "directory for %s does not exist as a destination to copy a " - "chrome manifest." % options.store_chrome_manifest) + "chrome manifest." % options.store_chrome_manifest + ) if options.jscov_dir_prefix: options.jscov_dir_prefix = os.path.abspath(options.jscov_dir_prefix) if not os.path.isdir(options.jscov_dir_prefix): parser.error( "directory %s does not exist as a destination for coverage " - "data." % options.jscov_dir_prefix) + "data." % options.jscov_dir_prefix + ) if options.testingModulesDir is None: # Try to guess the testing modules directory. - possible = [os.path.join(here, os.path.pardir, 'modules')] + possible = [os.path.join(here, os.path.pardir, "modules")] if build_obj: - possible.insert(0, os.path.join(build_obj.topobjdir, '_tests', 'modules')) + possible.insert( + 0, os.path.join(build_obj.topobjdir, "_tests", "modules") + ) for p in possible: if os.path.isdir(p): @@ -791,39 +1049,40 @@ class MochitestArguments(ArgumentContainer): # Paths to specialpowers and mochijar from the tests archive. options.stagedAddons = [ - os.path.join(here, 'extensions', 'specialpowers'), - os.path.join(here, 'mochijar'), + os.path.join(here, "extensions", "specialpowers"), + os.path.join(here, "mochijar"), ] if build_obj: - objdir_xpi_stage = os.path.join(build_obj.distdir, 'xpi-stage') + objdir_xpi_stage = os.path.join(build_obj.distdir, "xpi-stage") if os.path.isdir(objdir_xpi_stage): options.stagedAddons = [ - os.path.join(objdir_xpi_stage, 'specialpowers'), - os.path.join(objdir_xpi_stage, 'mochijar'), + os.path.join(objdir_xpi_stage, "specialpowers"), + os.path.join(objdir_xpi_stage, "mochijar"), ] - plugins_dir = os.path.join(build_obj.distdir, 'plugins') - if os.path.isdir(plugins_dir) and plugins_dir not in options.extraProfileFiles: + plugins_dir = os.path.join(build_obj.distdir, "plugins") + if ( + os.path.isdir(plugins_dir) + and plugins_dir not in options.extraProfileFiles + ): options.extraProfileFiles.append(plugins_dir) # Even if buildbot is updated, we still want this, as the path we pass in # to the app must be absolute and have proper slashes. if options.testingModulesDir is not None: - options.testingModulesDir = os.path.normpath( - options.testingModulesDir) + options.testingModulesDir = os.path.normpath(options.testingModulesDir) if not os.path.isabs(options.testingModulesDir): - options.testingModulesDir = os.path.abspath( - options.testingModulesDir) + options.testingModulesDir = os.path.abspath(options.testingModulesDir) if not os.path.isdir(options.testingModulesDir): - parser.error('--testing-modules-dir not a directory: %s' % - options.testingModulesDir) + parser.error( + "--testing-modules-dir not a directory: %s" + % options.testingModulesDir + ) - options.testingModulesDir = options.testingModulesDir.replace( - '\\', - '/') - if options.testingModulesDir[-1] != '/': - options.testingModulesDir += '/' + options.testingModulesDir = options.testingModulesDir.replace("\\", "/") + if options.testingModulesDir[-1] != "/": + options.testingModulesDir += "/" if options.runUntilFailure: if not options.repeat: @@ -834,13 +1093,16 @@ class MochitestArguments(ArgumentContainer): if options.dumpAboutMemoryAfterTest or options.dumpDMDAfterTest: if not os.path.isdir(options.dumpOutputDirectory): - parser.error('--dump-output-directory not a directory: %s' % - options.dumpOutputDirectory) + parser.error( + "--dump-output-directory not a directory: %s" + % options.dumpOutputDirectory + ) if options.useTestMediaDevices: if not mozinfo.isLinux: parser.error( - '--use-test-media-devices is only supported on Linux currently') + "--use-test-media-devices is only supported on Linux currently" + ) gst01 = spawn.find_executable("gst-launch-0.1") gst010 = spawn.find_executable("gst-launch-0.10") @@ -849,18 +1111,21 @@ class MochitestArguments(ArgumentContainer): if not (gst01 or gst10 or gst010): parser.error( - 'Missing gst-launch-{0.1,0.10,1.0}, required for ' - '--use-test-media-devices') + "Missing gst-launch-{0.1,0.10,1.0}, required for " + "--use-test-media-devices" + ) if not pactl: parser.error( - 'Missing binary pactl required for ' - '--use-test-media-devices') + "Missing binary pactl required for " "--use-test-media-devices" + ) # The a11y and chrome flavors can't run with e10s. - if options.flavor in ('a11y', 'chrome') and options.e10s: - parser.error("mochitest-{} does not support e10s, try again with " - "--disable-e10s.".format(options.flavor)) + if options.flavor in ("a11y", "chrome") and options.e10s: + parser.error( + "mochitest-{} does not support e10s, try again with " + "--disable-e10s.".format(options.flavor) + ) if options.enable_fission: options.extraPrefs.append("fission.autostart=true") @@ -885,8 +1150,9 @@ class MochitestArguments(ArgumentContainer): # need to be normalized here for the mach case. if options.test_paths and build_obj: # Normalize test paths so they are relative to test root - options.test_paths = [build_obj._wrap_path_argument(p).relpath() - for p in options.test_paths] + options.test_paths = [ + build_obj._wrap_path_argument(p).relpath() for p in options.test_paths + ] return options @@ -895,98 +1161,124 @@ class AndroidArguments(ArgumentContainer): """Android specific arguments.""" args = [ - [["--no-install"], - {"action": "store_true", - "default": False, - "help": "Skip the installation of the APK.", - }], - [["--deviceSerial"], - {"dest": "deviceSerial", - "help": "adb serial number of remote device. This is required " - "when more than one device is connected to the host. " - "Use 'adb devices' to see connected devices.", - "default": None, - }], - [["--adbpath"], - {"dest": "adbPath", - "default": None, - "help": "Path to adb binary.", - "suppress": True, - }], - [["--remote-webserver"], - {"dest": "remoteWebServer", - "default": None, - "help": "IP address of the remote web server.", - }], - [["--http-port"], - {"dest": "httpPort", - "default": DEFAULT_PORTS['http'], - "help": "http port of the remote web server.", - "suppress": True, - }], - [["--ssl-port"], - {"dest": "sslPort", - "default": DEFAULT_PORTS['https'], - "help": "ssl port of the remote web server.", - "suppress": True, - }], - [["--remoteTestRoot"], - {"dest": "remoteTestRoot", - "default": None, - "help": "Remote directory to use as test root " - "(eg. /data/local/tmp/test_root).", - "suppress": True, - }], - [["--enable-coverage"], - {"action": "store_true", - "default": False, - "help": "Enable collecting code coverage information when running " - "junit tests.", - }], - [["--coverage-output-dir"], - {"action": "store", - "default": None, - "help": "When using --enable-java-coverage, save the code coverage report " - "files to this directory.", - }], + [ + ["--no-install"], + { + "action": "store_true", + "default": False, + "help": "Skip the installation of the APK.", + }, + ], + [ + ["--deviceSerial"], + { + "dest": "deviceSerial", + "help": "adb serial number of remote device. This is required " + "when more than one device is connected to the host. " + "Use 'adb devices' to see connected devices.", + "default": None, + }, + ], + [ + ["--adbpath"], + { + "dest": "adbPath", + "default": None, + "help": "Path to adb binary.", + "suppress": True, + }, + ], + [ + ["--remote-webserver"], + { + "dest": "remoteWebServer", + "default": None, + "help": "IP address of the remote web server.", + }, + ], + [ + ["--http-port"], + { + "dest": "httpPort", + "default": DEFAULT_PORTS["http"], + "help": "http port of the remote web server.", + "suppress": True, + }, + ], + [ + ["--ssl-port"], + { + "dest": "sslPort", + "default": DEFAULT_PORTS["https"], + "help": "ssl port of the remote web server.", + "suppress": True, + }, + ], + [ + ["--remoteTestRoot"], + { + "dest": "remoteTestRoot", + "default": None, + "help": "Remote directory to use as test root " + "(eg. /data/local/tmp/test_root).", + "suppress": True, + }, + ], + [ + ["--enable-coverage"], + { + "action": "store_true", + "default": False, + "help": "Enable collecting code coverage information when running " + "junit tests.", + }, + ], + [ + ["--coverage-output-dir"], + { + "action": "store", + "default": None, + "help": "When using --enable-java-coverage, save the code coverage report " + "files to this directory.", + }, + ], ] defaults = { # we don't want to exclude specialpowers on android just yet - 'extensionsToExclude': [], + "extensionsToExclude": [], # mochijar doesn't get installed via marionette on android - 'extensionsToInstall': [os.path.join(here, 'mochijar')], - 'logFile': 'mochitest.log', - 'utilityPath': None, + "extensionsToInstall": [os.path.join(here, "mochijar")], + "logFile": "mochitest.log", + "utilityPath": None, } def validate(self, parser, options, context): """Validate android options.""" if build_obj: - options.log_mach = '-' + options.log_mach = "-" - objdir_xpi_stage = os.path.join(build_obj.distdir, 'xpi-stage') + objdir_xpi_stage = os.path.join(build_obj.distdir, "xpi-stage") if os.path.isdir(objdir_xpi_stage): options.extensionsToInstall = [ - os.path.join(objdir_xpi_stage, 'mochijar'), - os.path.join(objdir_xpi_stage, 'specialpowers'), + os.path.join(objdir_xpi_stage, "mochijar"), + os.path.join(objdir_xpi_stage, "specialpowers"), ] if options.remoteWebServer is None: if os.name != "nt": options.remoteWebServer = moznetwork.get_ip() else: - parser.error( - "you must specify a --remote-webserver=<ip address>") + parser.error("you must specify a --remote-webserver=<ip address>") options.webServer = options.remoteWebServer if options.app is None: options.app = "org.mozilla.geckoview.test" - if build_obj and 'MOZ_HOST_BIN' in os.environ: - options.xrePath = os.environ['MOZ_HOST_BIN'] + if build_obj and "MOZ_HOST_BIN" in os.environ: + options.xrePath = os.environ["MOZ_HOST_BIN"] # Only reset the xrePath if it wasn't provided if options.xrePath is None: @@ -996,7 +1288,7 @@ class AndroidArguments(ArgumentContainer): options.topsrcdir = build_obj.topsrcdir if options.pidFile != "": - f = open(options.pidFile, 'w') + f = open(options.pidFile, "w") f.write("%s" % os.getpid()) f.close() @@ -1004,16 +1296,17 @@ class AndroidArguments(ArgumentContainer): parser.error("--coverage-output-dir must be used with --enable-coverage") if options.enable_coverage: if not options.autorun: - parser.error( - "--enable-coverage cannot be used with --no-autorun") + parser.error("--enable-coverage cannot be used with --no-autorun") if not options.coverage_output_dir: parser.error( - "--coverage-output-dir must be specified when using --enable-coverage") + "--coverage-output-dir must be specified when using --enable-coverage" + ) parent_dir = os.path.dirname(options.coverage_output_dir) if not os.path.isdir(options.coverage_output_dir): parser.error( - "The directory for the coverage output does not exist: %s" % - parent_dir) + "The directory for the coverage output does not exist: %s" + % parent_dir + ) # allow us to keep original application around for cleanup while # running tests @@ -1022,8 +1315,8 @@ class AndroidArguments(ArgumentContainer): container_map = { - 'generic': [MochitestArguments], - 'android': [MochitestArguments, AndroidArguments], + "generic": [MochitestArguments], + "android": [MochitestArguments, AndroidArguments], } @@ -1034,37 +1327,44 @@ class MochitestArgumentParser(ArgumentParser): context = {} def __init__(self, app=None, **kwargs): - ArgumentParser.__init__(self, usage=self.__doc__, conflict_handler='resolve', **kwargs) + ArgumentParser.__init__( + self, usage=self.__doc__, conflict_handler="resolve", **kwargs + ) self.oldcwd = os.getcwd() self.app = app if not self.app and build_obj: if conditions.is_android(build_obj): - self.app = 'android' + self.app = "android" if not self.app: # platform can't be determined and app wasn't specified explicitly, # so just use generic arguments and hope for the best - self.app = 'generic' + self.app = "generic" if self.app not in container_map: - self.error("Unrecognized app '{}'! Must be one of: {}".format( - self.app, ', '.join(container_map.keys()))) + self.error( + "Unrecognized app '{}'! Must be one of: {}".format( + self.app, ", ".join(container_map.keys()) + ) + ) defaults = {} for container in self.containers: defaults.update(container.defaults) - group = self.add_argument_group(container.__class__.__name__, container.__doc__) + group = self.add_argument_group( + container.__class__.__name__, container.__doc__ + ) for cli, kwargs in container.args: # Allocate new lists so references to original don't get mutated. # allowing multiple uses within a single process. - if "default" in kwargs and isinstance(kwargs['default'], list): + if "default" in kwargs and isinstance(kwargs["default"], list): kwargs["default"] = [] - if 'suppress' in kwargs: - if kwargs['suppress']: - kwargs['help'] = SUPPRESS - del kwargs['suppress'] + if "suppress" in kwargs: + if kwargs["suppress"]: + kwargs["help"] = SUPPRESS + del kwargs["suppress"] group.add_argument(*cli, **kwargs) diff --git a/testing/mozbase/mozpower/tests/test_macintelpower.py b/testing/mozbase/mozpower/tests/test_macintelpower.py index 54e089a6c5ca..72fb6767f29e 100644 --- a/testing/mozbase/mozpower/tests/test_macintelpower.py +++ b/testing/mozbase/mozpower/tests/test_macintelpower.py @@ -8,12 +8,11 @@ import time def test_macintelpower_init(macintelpower_obj): - """Tests that the MacIntelPower object is correctly initialized. - """ + """Tests that the MacIntelPower object is correctly initialized.""" assert macintelpower_obj.ipg_path assert macintelpower_obj.ipg - assert macintelpower_obj._os == 'darwin' - assert macintelpower_obj._cpu == 'intel' + assert macintelpower_obj._os == "darwin" + assert macintelpower_obj._cpu == "intel" def test_macintelpower_measuring(macintelpower_obj): @@ -43,12 +42,12 @@ def test_macintelpower_measuring(macintelpower_obj): return test_data with mock.patch( - 'mozpower.intel_power_gadget.IPGResultsHandler.clean_ipg_data' - ) as _: + "mozpower.intel_power_gadget.IPGResultsHandler.clean_ipg_data" + ) as _: with mock.patch( - 'mozpower.intel_power_gadget.IPGResultsHandler.' - 'format_ipg_data_to_partial_perfherder' - ) as formatter: + "mozpower.intel_power_gadget.IPGResultsHandler." + "format_ipg_data_to_partial_perfherder" + ) as formatter: formatter.side_effect = formatter_side_effect macintelpower_obj.finalize_power_measurements(wait_interval=2, timeout=30) @@ -63,17 +62,16 @@ def test_macintelpower_measuring(macintelpower_obj): # Check that the IPGResultHandler's methods were # called - macintelpower_obj.ipg_results_handler. \ - clean_ipg_data.assert_called() - macintelpower_obj.ipg_results_handler. \ - format_ipg_data_to_partial_perfherder.assert_called_once_with( - macintelpower_obj.end_time - macintelpower_obj.start_time, 'power-testing' - ) + macintelpower_obj.ipg_results_handler.clean_ipg_data.assert_called() + macintelpower_obj.ipg_results_handler.format_ipg_data_to_partial_perfherder.assert_called_once_with( # NOQA: E501 + macintelpower_obj.end_time - macintelpower_obj.start_time, + "power-testing", + ) # Make sure we can get the expected perfherder data # after formatting assert macintelpower_obj.get_perfherder_data() == test_data -if __name__ == '__main__': +if __name__ == "__main__": mozunit.main() diff --git a/testing/mozharness/mozharness/mozilla/building/buildbase.py b/testing/mozharness/mozharness/mozilla/building/buildbase.py index 490540b9b9e4..a08d0163b814 100755 --- a/testing/mozharness/mozharness/mozilla/building/buildbase.py +++ b/testing/mozharness/mozharness/mozilla/building/buildbase.py @@ -21,19 +21,21 @@ from datetime import datetime import six -from mozharness.base.config import (DEFAULT_CONFIG_PATH, BaseConfig, - parse_config_file) +from mozharness.base.config import DEFAULT_CONFIG_PATH, BaseConfig, parse_config_file from mozharness.base.errors import MakefileErrorList from mozharness.base.log import ERROR, FATAL, OutputParser -from mozharness.base.python import (PerfherderResourceOptionsMixin, - VirtualenvMixin) +from mozharness.base.python import PerfherderResourceOptionsMixin, VirtualenvMixin from mozharness.base.script import PostScriptRun from mozharness.base.vcs.vcsbase import MercurialScript -from mozharness.mozilla.automation import (EXIT_STATUS_DICT, TBPL_FAILURE, - TBPL_RETRY, TBPL_STATUS_DICT, - TBPL_SUCCESS, - TBPL_WORST_LEVEL_TUPLE, - AutomationMixin) +from mozharness.mozilla.automation import ( + EXIT_STATUS_DICT, + TBPL_FAILURE, + TBPL_RETRY, + TBPL_STATUS_DICT, + TBPL_SUCCESS, + TBPL_WORST_LEVEL_TUPLE, + AutomationMixin, +) from mozharness.mozilla.secrets import SecretsMixin AUTOMATION_EXIT_CODES = sorted(EXIT_STATUS_DICT.values()) @@ -42,9 +44,9 @@ MISSING_CFG_KEY_MSG = "The key '%s' could not be determined \ Please add this to your config." ERROR_MSGS = { - 'comments_undetermined': '"comments" could not be determined. This may be \ + "comments_undetermined": '"comments" could not be determined. This may be \ because it was a forced build.', - 'tooltool_manifest_undetermined': '"tooltool_manifest_src" not set, \ + "tooltool_manifest_undetermined": '"tooltool_manifest_src" not set, \ Skipping run_tooltool...', } @@ -53,17 +55,17 @@ Skipping run_tooltool...', TBPL_UPLOAD_ERRORS = [ { - 'regex': re.compile("Connection timed out"), - 'level': TBPL_RETRY, + "regex": re.compile("Connection timed out"), + "level": TBPL_RETRY, }, { - 'regex': re.compile("Connection reset by peer"), - 'level': TBPL_RETRY, + "regex": re.compile("Connection reset by peer"), + "level": TBPL_RETRY, }, { - 'regex': re.compile("Connection refused"), - 'level': TBPL_RETRY, - } + "regex": re.compile("Connection refused"), + "level": TBPL_RETRY, + }, ] @@ -78,12 +80,13 @@ class MakeUploadOutputParser(OutputParser): # let's check for retry errors which will give log levels: # tbpl status as RETRY and mozharness status as WARNING for error_check in self.tbpl_error_list: - if error_check['regex'].search(line): + if error_check["regex"].search(line): self.num_warnings += 1 self.warning(line) self.tbpl_status = self.worst_level( - error_check['level'], self.tbpl_status, - levels=TBPL_WORST_LEVEL_TUPLE + error_check["level"], + self.tbpl_status, + levels=TBPL_WORST_LEVEL_TUPLE, ) break else: @@ -109,61 +112,65 @@ def get_mozconfig_path(script, config, dirs): :param dirs: The directories specified for this build. :type dirs: dict """ - COMPOSITE_KEYS = {'mozconfig_variant', 'app_name', 'mozconfig_platform'} + COMPOSITE_KEYS = {"mozconfig_variant", "app_name", "mozconfig_platform"} have_composite_mozconfig = COMPOSITE_KEYS <= set(config.keys()) - have_partial_composite_mozconfig = len( - COMPOSITE_KEYS & set(config.keys())) > 0 - have_src_mozconfig = 'src_mozconfig' in config - have_src_mozconfig_manifest = 'src_mozconfig_manifest' in config + have_partial_composite_mozconfig = len(COMPOSITE_KEYS & set(config.keys())) > 0 + have_src_mozconfig = "src_mozconfig" in config + have_src_mozconfig_manifest = "src_mozconfig_manifest" in config # first determine the mozconfig path if have_partial_composite_mozconfig and not have_composite_mozconfig: raise MozconfigPathError( "All or none of 'app_name', 'mozconfig_platform' and `mozconfig_variant' must be " - "in the config in order to determine the mozconfig.") + "in the config in order to determine the mozconfig." + ) elif have_composite_mozconfig and have_src_mozconfig: raise MozconfigPathError( "'src_mozconfig' or 'mozconfig_variant' must be " - "in the config but not both in order to determine the mozconfig.") + "in the config but not both in order to determine the mozconfig." + ) elif have_composite_mozconfig and have_src_mozconfig_manifest: raise MozconfigPathError( "'src_mozconfig_manifest' or 'mozconfig_variant' must be " - "in the config but not both in order to determine the mozconfig.") + "in the config but not both in order to determine the mozconfig." + ) elif have_src_mozconfig and have_src_mozconfig_manifest: raise MozconfigPathError( "'src_mozconfig' or 'src_mozconfig_manifest' must be " - "in the config but not both in order to determine the mozconfig.") + "in the config but not both in order to determine the mozconfig." + ) elif have_composite_mozconfig: - src_mozconfig = '%(app_name)s/config/mozconfigs/%(platform)s/%(variant)s' % { - 'app_name': config['app_name'], - 'platform': config['mozconfig_platform'], - 'variant': config['mozconfig_variant'], + src_mozconfig = "%(app_name)s/config/mozconfigs/%(platform)s/%(variant)s" % { + "app_name": config["app_name"], + "platform": config["mozconfig_platform"], + "variant": config["mozconfig_variant"], } - abs_mozconfig_path = os.path.join(dirs['abs_src_dir'], src_mozconfig) + abs_mozconfig_path = os.path.join(dirs["abs_src_dir"], src_mozconfig) elif have_src_mozconfig: abs_mozconfig_path = os.path.join( - dirs['abs_src_dir'], config.get('src_mozconfig')) + dirs["abs_src_dir"], config.get("src_mozconfig") + ) elif have_src_mozconfig_manifest: - manifest = os.path.join( - dirs['abs_work_dir'], - config['src_mozconfig_manifest']) + manifest = os.path.join(dirs["abs_work_dir"], config["src_mozconfig_manifest"]) if not os.path.exists(manifest): raise MozconfigPathError( - 'src_mozconfig_manifest: "%s" not found. Does it exist?' % - (manifest,)) + 'src_mozconfig_manifest: "%s" not found. Does it exist?' % (manifest,) + ) else: with script.opened(manifest, error_level=ERROR) as (fh, err): if err: raise MozconfigPathError( - "%s exists but coud not read properties" % - manifest) + "%s exists but coud not read properties" % manifest + ) abs_mozconfig_path = os.path.join( - dirs['abs_src_dir'], json.load(fh)['gecko_path']) + dirs["abs_src_dir"], json.load(fh)["gecko_path"] + ) else: raise MozconfigPathError( "Must provide 'app_name', 'mozconfig_platform' and 'mozconfig_variant'; " "or one of 'src_mozconfig' or 'src_mozconfig_manifest' in the config " - "in order to determine the mozconfig.") + "in order to determine the mozconfig." + ) return abs_mozconfig_path @@ -187,7 +194,7 @@ class BuildingConfig(BaseConfig): # eg ('builds/branch_specifics.py', {'foo': 'bar'}) all_config_dicts = [] # important config files - variant_cfg_file = pool_cfg_file = '' + variant_cfg_file = pool_cfg_file = "" # we want to make the order in which the options were given # not matter. ie: you can supply --branch before --build-pool @@ -226,8 +233,7 @@ class BuildingConfig(BaseConfig): # now let's update config with the remaining config files. # this functionality is the same as the base class all_config_dicts.extend( - super(BuildingConfig, self).get_cfgs_from_files(all_config_files, - options) + super(BuildingConfig, self).get_cfgs_from_files(all_config_files, options) ) # stack variant, branch, and pool cfg files on top of that, @@ -237,13 +243,13 @@ class BuildingConfig(BaseConfig): all_config_dicts.append( (variant_cfg_file, parse_config_file(variant_cfg_file)) ) - config_paths = options.config_paths or ['.'] + config_paths = options.config_paths or ["."] if pool_cfg_file: # take only the specific pool. If we are here, the pool # must be present build_pool_configs = parse_config_file( - pool_cfg_file, - search_path=config_paths + [DEFAULT_CONFIG_PATH]) + pool_cfg_file, search_path=config_paths + [DEFAULT_CONFIG_PATH] + ) all_config_dicts.append( (pool_cfg_file, build_pool_configs[options.build_pool]) ) @@ -263,112 +269,113 @@ class BuildOptionParser(object): # *It will warn and fail if there is not a config for the current # platform/bits build_variants = { - 'add-on-devel': 'builds/releng_sub_%s_configs/%s_add-on-devel.py', - 'asan': 'builds/releng_sub_%s_configs/%s_asan.py', - 'asan-tc': 'builds/releng_sub_%s_configs/%s_asan_tc.py', - 'asan-reporter-tc': 'builds/releng_sub_%s_configs/%s_asan_reporter_tc.py', - 'fuzzing-asan-tc': 'builds/releng_sub_%s_configs/%s_fuzzing_asan_tc.py', - 'tsan-tc': 'builds/releng_sub_%s_configs/%s_tsan_tc.py', - 'fuzzing-tsan-tc': 'builds/releng_sub_%s_configs/%s_fuzzing_tsan_tc.py', - 'cross-debug': 'builds/releng_sub_%s_configs/%s_cross_debug.py', - 'cross-debug-searchfox': 'builds/releng_sub_%s_configs/%s_cross_debug_searchfox.py', - 'cross-noopt-debug': 'builds/releng_sub_%s_configs/%s_cross_noopt_debug.py', - 'cross-fuzzing-asan': 'builds/releng_sub_%s_configs/%s_cross_fuzzing_asan.py', - 'cross-fuzzing-debug': 'builds/releng_sub_%s_configs/%s_cross_fuzzing_debug.py', - 'debug': 'builds/releng_sub_%s_configs/%s_debug.py', - 'fuzzing-debug': 'builds/releng_sub_%s_configs/%s_fuzzing_debug.py', - 'asan-and-debug': 'builds/releng_sub_%s_configs/%s_asan_and_debug.py', - 'asan-tc-and-debug': 'builds/releng_sub_%s_configs/%s_asan_tc_and_debug.py', - 'stat-and-debug': 'builds/releng_sub_%s_configs/%s_stat_and_debug.py', - 'code-coverage-debug': 'builds/releng_sub_%s_configs/%s_code_coverage_debug.py', - 'code-coverage-opt': 'builds/releng_sub_%s_configs/%s_code_coverage_opt.py', - 'source': 'builds/releng_sub_%s_configs/%s_source.py', - 'noopt-debug': 'builds/releng_sub_%s_configs/%s_noopt_debug.py', - 'api-16-gradle-dependencies': - 'builds/releng_sub_%s_configs/%s_api_16_gradle_dependencies.py', - 'api-16': 'builds/releng_sub_%s_configs/%s_api_16.py', - 'api-16-beta': 'builds/releng_sub_%s_configs/%s_api_16_beta.py', - 'api-16-beta-debug': 'builds/releng_sub_%s_configs/%s_api_16_beta_debug.py', - 'api-16-debug': 'builds/releng_sub_%s_configs/%s_api_16_debug.py', - 'api-16-debug-ccov': 'builds/releng_sub_%s_configs/%s_api_16_debug_ccov.py', - 'api-16-debug-searchfox': 'builds/releng_sub_%s_configs/%s_api_16_debug_searchfox.py', - 'api-16-gradle': 'builds/releng_sub_%s_configs/%s_api_16_gradle.py', - 'api-16-profile-generate': 'builds/releng_sub_%s_configs/%s_api_16_profile_generate.py', - 'rusttests': 'builds/releng_sub_%s_configs/%s_rusttests.py', - 'rusttests-debug': 'builds/releng_sub_%s_configs/%s_rusttests_debug.py', - 'x86': 'builds/releng_sub_%s_configs/%s_x86.py', - 'x86-beta': 'builds/releng_sub_%s_configs/%s_x86_beta.py', - 'x86-beta-debug': 'builds/releng_sub_%s_configs/%s_x86_beta_debug.py', - 'x86-debug': 'builds/releng_sub_%s_configs/%s_x86_debug.py', - 'x86-fuzzing-debug': 'builds/releng_sub_%s_configs/%s_x86_fuzzing_debug.py', - 'x86_64': 'builds/releng_sub_%s_configs/%s_x86_64.py', - 'x86_64-beta': 'builds/releng_sub_%s_configs/%s_x86_64_beta.py', - 'x86_64-beta-debug': 'builds/releng_sub_%s_configs/%s_x86_64_beta_debug.py', - 'x86_64-debug': 'builds/releng_sub_%s_configs/%s_x86_64_debug.py', - 'x86_64-fuzzing-asan': 'builds/releng_sub_%s_configs/%s_x86_64_fuzzing_asan.py', - 'api-16-partner-sample1': 'builds/releng_sub_%s_configs/%s_api_16_partner_sample1.py', - 'aarch64': 'builds/releng_sub_%s_configs/%s_aarch64.py', - 'aarch64-beta': 'builds/releng_sub_%s_configs/%s_aarch64_beta.py', - 'aarch64-beta-debug': 'builds/releng_sub_%s_configs/%s_aarch64_beta_debug.py', - 'aarch64-pgo': 'builds/releng_sub_%s_configs/%s_aarch64_pgo.py', - 'aarch64-debug': 'builds/releng_sub_%s_configs/%s_aarch64_debug.py', - 'android-geckoview-docs': 'builds/releng_sub_%s_configs/%s_geckoview_docs.py', - 'valgrind': 'builds/releng_sub_%s_configs/%s_valgrind.py', + "add-on-devel": "builds/releng_sub_%s_configs/%s_add-on-devel.py", + "asan": "builds/releng_sub_%s_configs/%s_asan.py", + "asan-tc": "builds/releng_sub_%s_configs/%s_asan_tc.py", + "asan-reporter-tc": "builds/releng_sub_%s_configs/%s_asan_reporter_tc.py", + "fuzzing-asan-tc": "builds/releng_sub_%s_configs/%s_fuzzing_asan_tc.py", + "tsan-tc": "builds/releng_sub_%s_configs/%s_tsan_tc.py", + "fuzzing-tsan-tc": "builds/releng_sub_%s_configs/%s_fuzzing_tsan_tc.py", + "cross-debug": "builds/releng_sub_%s_configs/%s_cross_debug.py", + "cross-debug-searchfox": "builds/releng_sub_%s_configs/%s_cross_debug_searchfox.py", + "cross-noopt-debug": "builds/releng_sub_%s_configs/%s_cross_noopt_debug.py", + "cross-fuzzing-asan": "builds/releng_sub_%s_configs/%s_cross_fuzzing_asan.py", + "cross-fuzzing-debug": "builds/releng_sub_%s_configs/%s_cross_fuzzing_debug.py", + "debug": "builds/releng_sub_%s_configs/%s_debug.py", + "fuzzing-debug": "builds/releng_sub_%s_configs/%s_fuzzing_debug.py", + "asan-and-debug": "builds/releng_sub_%s_configs/%s_asan_and_debug.py", + "asan-tc-and-debug": "builds/releng_sub_%s_configs/%s_asan_tc_and_debug.py", + "stat-and-debug": "builds/releng_sub_%s_configs/%s_stat_and_debug.py", + "code-coverage-debug": "builds/releng_sub_%s_configs/%s_code_coverage_debug.py", + "code-coverage-opt": "builds/releng_sub_%s_configs/%s_code_coverage_opt.py", + "source": "builds/releng_sub_%s_configs/%s_source.py", + "noopt-debug": "builds/releng_sub_%s_configs/%s_noopt_debug.py", + "api-16-gradle-dependencies": "builds/releng_sub_%s_configs/%s_api_16_gradle_dependencies.py", # NOQA: E501 + "api-16": "builds/releng_sub_%s_configs/%s_api_16.py", + "api-16-beta": "builds/releng_sub_%s_configs/%s_api_16_beta.py", + "api-16-beta-debug": "builds/releng_sub_%s_configs/%s_api_16_beta_debug.py", + "api-16-debug": "builds/releng_sub_%s_configs/%s_api_16_debug.py", + "api-16-debug-ccov": "builds/releng_sub_%s_configs/%s_api_16_debug_ccov.py", + "api-16-debug-searchfox": "builds/releng_sub_%s_configs/%s_api_16_debug_searchfox.py", + "api-16-gradle": "builds/releng_sub_%s_configs/%s_api_16_gradle.py", + "api-16-profile-generate": "builds/releng_sub_%s_configs/%s_api_16_profile_generate.py", + "rusttests": "builds/releng_sub_%s_configs/%s_rusttests.py", + "rusttests-debug": "builds/releng_sub_%s_configs/%s_rusttests_debug.py", + "x86": "builds/releng_sub_%s_configs/%s_x86.py", + "x86-beta": "builds/releng_sub_%s_configs/%s_x86_beta.py", + "x86-beta-debug": "builds/releng_sub_%s_configs/%s_x86_beta_debug.py", + "x86-debug": "builds/releng_sub_%s_configs/%s_x86_debug.py", + "x86-fuzzing-debug": "builds/releng_sub_%s_configs/%s_x86_fuzzing_debug.py", + "x86_64": "builds/releng_sub_%s_configs/%s_x86_64.py", + "x86_64-beta": "builds/releng_sub_%s_configs/%s_x86_64_beta.py", + "x86_64-beta-debug": "builds/releng_sub_%s_configs/%s_x86_64_beta_debug.py", + "x86_64-debug": "builds/releng_sub_%s_configs/%s_x86_64_debug.py", + "x86_64-fuzzing-asan": "builds/releng_sub_%s_configs/%s_x86_64_fuzzing_asan.py", + "api-16-partner-sample1": "builds/releng_sub_%s_configs/%s_api_16_partner_sample1.py", + "aarch64": "builds/releng_sub_%s_configs/%s_aarch64.py", + "aarch64-beta": "builds/releng_sub_%s_configs/%s_aarch64_beta.py", + "aarch64-beta-debug": "builds/releng_sub_%s_configs/%s_aarch64_beta_debug.py", + "aarch64-pgo": "builds/releng_sub_%s_configs/%s_aarch64_pgo.py", + "aarch64-debug": "builds/releng_sub_%s_configs/%s_aarch64_debug.py", + "android-geckoview-docs": "builds/releng_sub_%s_configs/%s_geckoview_docs.py", + "valgrind": "builds/releng_sub_%s_configs/%s_valgrind.py", } - build_pool_cfg_file = 'builds/build_pool_specifics.py' + build_pool_cfg_file = "builds/build_pool_specifics.py" @classmethod def _query_pltfrm_and_bits(cls, target_option, options): - """ determine platform and bits + """determine platform and bits This can be from either from a supplied --platform and --bits or parsed from given config file names. """ error_msg = ( - 'Whoops!\nYou are trying to pass a shortname for ' - '%s. \nHowever, I need to know the %s to find the appropriate ' + "Whoops!\nYou are trying to pass a shortname for " + "%s. \nHowever, I need to know the %s to find the appropriate " 'filename. You can tell me by passing:\n\t"%s" or a config ' 'filename via "--config" with %s in it. \nIn either case, these ' - 'option arguments must come before --custom-build-variant.' + "option arguments must come before --custom-build-variant." ) current_config_files = options.config_files or [] if not cls.bits: # --bits has not been supplied # lets parse given config file names for 32 or 64 for cfg_file_name in current_config_files: - if '32' in cfg_file_name: - cls.bits = '32' + if "32" in cfg_file_name: + cls.bits = "32" break - if '64' in cfg_file_name: - cls.bits = '64' + if "64" in cfg_file_name: + cls.bits = "64" break else: - sys.exit(error_msg % (target_option, 'bits', '--bits', - '"32" or "64"')) + sys.exit(error_msg % (target_option, "bits", "--bits", '"32" or "64"')) if not cls.platform: # --platform has not been supplied # lets parse given config file names for platform for cfg_file_name in current_config_files: - if 'windows' in cfg_file_name: - cls.platform = 'windows' + if "windows" in cfg_file_name: + cls.platform = "windows" break - if 'mac' in cfg_file_name: - cls.platform = 'mac' + if "mac" in cfg_file_name: + cls.platform = "mac" break - if 'linux' in cfg_file_name: - cls.platform = 'linux' + if "linux" in cfg_file_name: + cls.platform = "linux" break - if 'android' in cfg_file_name: - cls.platform = 'android' + if "android" in cfg_file_name: + cls.platform = "android" break else: sys.exit( - error_msg % - (target_option, - 'platform', - '--platform', - '"linux", "windows", "mac", or "android"')) + error_msg + % ( + target_option, + "platform", + "--platform", + '"linux", "windows", "mac", or "android"', + ) + ) return cls.bits, cls.platform @classmethod @@ -398,20 +405,20 @@ class BuildOptionParser(object): for path in config_paths: if os.path.exists(os.path.join(path, prospective_cfg_path)): # success! we found a config file - valid_variant_cfg_path = os.path.join(path, - prospective_cfg_path) + valid_variant_cfg_path = os.path.join(path, prospective_cfg_path) break return valid_variant_cfg_path, prospective_cfg_path @classmethod def set_build_variant(cls, option, opt, value, parser): - """ sets an extra config file. + """sets an extra config file. This is done by either taking an existing filepath or by taking a valid shortname coupled with known platform/bits. """ valid_variant_cfg_path, prospective_cfg_path = cls.find_variant_cfg_path( - '--custom-build-variant-cfg', value, parser) + "--custom-build-variant-cfg", value, parser + ) if not valid_variant_cfg_path: # either the value was an indeterminable path or an invalid short @@ -421,8 +428,9 @@ class BuildOptionParser(object): "appropriate config file could not be determined. Tried " "using: '%s' but it was not:" "\n\t-- a valid shortname: %s " - "\n\t-- a valid variant for the given platform and bits." % - (prospective_cfg_path, str(list(cls.build_variants.keys())))) + "\n\t-- a valid variant for the given platform and bits." + % (prospective_cfg_path, str(list(cls.build_variants.keys()))) + ) parser.values.config_files.append(valid_variant_cfg_path) setattr(parser.values, option.dest, value) # the pool @@ -452,60 +460,92 @@ class BuildOptionParser(object): # this global depends on BuildOptionParser and therefore can not go at the # top of the file BUILD_BASE_CONFIG_OPTIONS = [ - [['--developer-run'], { - "action": "store_false", - "dest": "is_automation", - "default": True, - "help": "If this is running outside of Mozilla's build" - "infrastructure, use this option. It ignores actions" - "that are not needed and adds config checks."}], - [['--platform'], { - "action": "callback", - "callback": BuildOptionParser.set_platform, - "type": "string", - "dest": "platform", - "help": "Sets the platform we are running this against" - " valid values: 'windows', 'mac', 'linux'"}], - [['--bits'], { - "action": "callback", - "callback": BuildOptionParser.set_bits, - "type": "string", - "dest": "bits", - "help": "Sets which bits we are building this against" - " valid values: '32', '64'"}], - [['--custom-build-variant-cfg'], { - "action": "callback", - "callback": BuildOptionParser.set_build_variant, - "type": "string", - "dest": "build_variant", - "help": "Sets the build type and will determine appropriate" - " additional config to use. Either pass a config path" - " or use a valid shortname from: " - "%s" % (list(BuildOptionParser.build_variants.keys()),)}], - [['--build-pool'], { - "action": "callback", - "callback": BuildOptionParser.set_build_pool, - "type": "string", - "dest": "build_pool", - "help": "This will update the config with specific pool" - " environment keys/values. The dicts for this are" - " in %s\nValid values: staging or" - " production" % ('builds/build_pool_specifics.py',)}], - [['--branch'], { - "action": "callback", - "callback": BuildOptionParser.set_build_branch, - "type": "string", - "dest": "branch", - "help": "This sets the branch we will be building this for."}], - [['--enable-nightly'], { - "action": "store_true", - "dest": "nightly_build", - "default": False, - "help": "Sets the build to run in nightly mode"}], - [['--who'], { - "dest": "who", - "default": '', - "help": "stores who made the created the change."}], + [ + ["--developer-run"], + { + "action": "store_false", + "dest": "is_automation", + "default": True, + "help": "If this is running outside of Mozilla's build" + "infrastructure, use this option. It ignores actions" + "that are not needed and adds config checks.", + }, + ], + [ + ["--platform"], + { + "action": "callback", + "callback": BuildOptionParser.set_platform, + "type": "string", + "dest": "platform", + "help": "Sets the platform we are running this against" + " valid values: 'windows', 'mac', 'linux'", + }, + ], + [ + ["--bits"], + { + "action": "callback", + "callback": BuildOptionParser.set_bits, + "type": "string", + "dest": "bits", + "help": "Sets which bits we are building this against" + " valid values: '32', '64'", + }, + ], + [ + ["--custom-build-variant-cfg"], + { + "action": "callback", + "callback": BuildOptionParser.set_build_variant, + "type": "string", + "dest": "build_variant", + "help": "Sets the build type and will determine appropriate" + " additional config to use. Either pass a config path" + " or use a valid shortname from: " + "%s" % (list(BuildOptionParser.build_variants.keys()),), + }, + ], + [ + ["--build-pool"], + { + "action": "callback", + "callback": BuildOptionParser.set_build_pool, + "type": "string", + "dest": "build_pool", + "help": "This will update the config with specific pool" + " environment keys/values. The dicts for this are" + " in %s\nValid values: staging or" + " production" % ("builds/build_pool_specifics.py",), + }, + ], + [ + ["--branch"], + { + "action": "callback", + "callback": BuildOptionParser.set_build_branch, + "type": "string", + "dest": "branch", + "help": "This sets the branch we will be building this for.", + }, + ], + [ + ["--enable-nightly"], + { + "action": "store_true", + "dest": "nightly_build", + "default": False, + "help": "Sets the build to run in nightly mode", + }, + ], + [ + ["--who"], + { + "dest": "who", + "default": "", + "help": "stores who made the created the change.", + }, + ], ] @@ -517,9 +557,13 @@ def generate_build_UID(): return uuid.uuid4().hex -class BuildScript(AutomationMixin, - VirtualenvMixin, MercurialScript, - SecretsMixin, PerfherderResourceOptionsMixin): +class BuildScript( + AutomationMixin, + VirtualenvMixin, + MercurialScript, + SecretsMixin, + PerfherderResourceOptionsMixin, +): def __init__(self, **kwargs): # objdir is referenced in _query_abs_dirs() so let's make sure we # have that attribute before calling BaseScript.__init__ @@ -533,8 +577,8 @@ class BuildScript(AutomationMixin, # TODO find out if that time diff matters or if we just use it to # separate each build self.epoch_timestamp = int(time.mktime(datetime.now().timetuple())) - self.branch = self.config.get('branch') - self.stage_platform = self.config.get('stage_platform') + self.branch = self.config.get("branch") + self.stage_platform = self.config.get("stage_platform") if not self.branch or not self.stage_platform: if not self.branch: self.error("'branch' not determined and is required") @@ -555,13 +599,13 @@ class BuildScript(AutomationMixin, def _pre_config_lock(self, rw_config): c = self.config cfg_files_and_dicts = rw_config.all_cfg_files_and_dicts - build_pool = c.get('build_pool', '') - build_variant = c.get('build_variant', '') - variant_cfg = '' + build_pool = c.get("build_pool", "") + build_variant = c.get("build_variant", "") + variant_cfg = "" if build_variant: variant_cfg = BuildOptionParser.build_variants[build_variant] % ( BuildOptionParser.platform, - BuildOptionParser.bits + BuildOptionParser.bits, ) build_pool_cfg = BuildOptionParser.build_pool_cfg_file @@ -572,42 +616,49 @@ items from that key's value." for i, (target_file, target_dict) in enumerate(cfg_files_and_dicts): if build_pool_cfg and build_pool_cfg in target_file: self.info( - cfg_match_msg % { - 'option': '--build-pool', - 'type': build_pool, - 'type_config_file': build_pool_cfg, + cfg_match_msg + % { + "option": "--build-pool", + "type": build_pool, + "type_config_file": build_pool_cfg, } ) if variant_cfg and variant_cfg in target_file: self.info( - cfg_match_msg % { - 'option': '--custom-build-variant-cfg', - 'type': build_variant, - 'type_config_file': variant_cfg, + cfg_match_msg + % { + "option": "--custom-build-variant-cfg", + "type": build_variant, + "type_config_file": variant_cfg, } ) - self.info('To generate a config file based upon options passed and ' - 'config files used, run script as before but extend options ' - 'with "--dump-config"') - self.info('For a diff of where self.config got its items, ' - 'run the script again as before but extend options with: ' - '"--dump-config-hierarchy"') - self.info("Both --dump-config and --dump-config-hierarchy don't " - "actually run any actions.") + self.info( + "To generate a config file based upon options passed and " + "config files used, run script as before but extend options " + 'with "--dump-config"' + ) + self.info( + "For a diff of where self.config got its items, " + "run the script again as before but extend options with: " + '"--dump-config-hierarchy"' + ) + self.info( + "Both --dump-config and --dump-config-hierarchy don't " + "actually run any actions." + ) def _query_objdir(self): if self.objdir: return self.objdir - if not self.config.get('objdir'): - return self.fatal(MISSING_CFG_KEY_MSG % ('objdir',)) - self.objdir = self.config['objdir'] + if not self.config.get("objdir"): + return self.fatal(MISSING_CFG_KEY_MSG % ("objdir",)) + self.objdir = self.config["objdir"] return self.objdir def query_is_nightly_promotion(self): - platform_enabled = self.config.get('enable_nightly_promotion') - branch_enabled = self.branch in self.config.get( - 'nightly_promotion_branches') + platform_enabled = self.config.get("enable_nightly_promotion") + branch_enabled = self.branch in self.config.get("nightly_promotion_branches") return platform_enabled and branch_enabled def query_build_env(self, **kwargs): @@ -615,41 +666,36 @@ items from that key's value." # let's evoke the base query_env and make a copy of it # as we don't always want every key below added to the same dict - env = copy.deepcopy( - super(BuildScript, self).query_env(**kwargs) - ) + env = copy.deepcopy(super(BuildScript, self).query_env(**kwargs)) if self.query_is_nightly() or self.query_is_nightly_promotion(): # taskcluster sets the update channel for shipping builds # explicitly - if c.get('update_channel'): - update_channel = c['update_channel'] + if c.get("update_channel"): + update_channel = c["update_channel"] if isinstance(update_channel, six.text_type): update_channel = update_channel.encode("utf-8") env["MOZ_UPDATE_CHANNEL"] = update_channel else: # let's just give the generic channel based on branch env["MOZ_UPDATE_CHANNEL"] = "nightly-%s" % (self.branch,) - self.info( - "Update channel set to: {}".format( - env["MOZ_UPDATE_CHANNEL"])) + self.info("Update channel set to: {}".format(env["MOZ_UPDATE_CHANNEL"])) return env def query_mach_build_env(self, multiLocale=None): c = self.config if multiLocale is None and self.query_is_nightly(): - multiLocale = c.get('multi_locale', False) + multiLocale = c.get("multi_locale", False) mach_env = {} - if c.get('upload_env'): - mach_env.update(c['upload_env']) + if c.get("upload_env"): + mach_env.update(c["upload_env"]) # this prevents taskcluster from overwriting the target files with # the multilocale files. Put everything from the en-US build in a # separate folder. - if multiLocale and self.config.get('taskcluster_nightly'): - if 'UPLOAD_PATH' in mach_env: - mach_env['UPLOAD_PATH'] = os.path.join(mach_env['UPLOAD_PATH'], - 'en-US') + if multiLocale and self.config.get("taskcluster_nightly"): + if "UPLOAD_PATH" in mach_env: + mach_env["UPLOAD_PATH"] = os.path.join(mach_env["UPLOAD_PATH"], "en-US") return mach_env def _get_mozconfig(self): @@ -658,7 +704,8 @@ items from that key's value." try: abs_mozconfig_path = get_mozconfig_path( - script=self, config=self.config, dirs=dirs) + script=self, config=self.config, dirs=dirs + ) except MozconfigPathError as e: if six.PY2: self.fatal(e.message) @@ -675,21 +722,19 @@ items from that key's value." # finally, copy the mozconfig to a path that 'mach build' expects it to # be self.copyfile( - abs_mozconfig_path, - os.path.join( - dirs['abs_src_dir'], - '.mozconfig')) + abs_mozconfig_path, os.path.join(dirs["abs_src_dir"], ".mozconfig") + ) # TODO: replace with ToolToolMixin def _get_tooltool_auth_file(self): # set the default authentication file based on platform; this # corresponds to where puppet puts the token - if 'tooltool_authentication_file' in self.config: - fn = self.config['tooltool_authentication_file'] + if "tooltool_authentication_file" in self.config: + fn = self.config["tooltool_authentication_file"] elif self._is_windows(): - fn = r'c:\builds\relengapi.tok' + fn = r"c:\builds\relengapi.tok" else: - fn = '/builds/relengapi.tok' + fn = "/builds/relengapi.tok" # if the file doesn't exist, don't pass it to tooltool (it will just # fail). In taskcluster, this will work OK as the relengapi-proxy will @@ -704,48 +749,52 @@ items from that key's value." c = self.config dirs = self.query_abs_dirs() - toolchains = os.environ.get('MOZ_TOOLCHAINS') - manifest_src = os.environ.get('TOOLTOOL_MANIFEST') + toolchains = os.environ.get("MOZ_TOOLCHAINS") + manifest_src = os.environ.get("TOOLTOOL_MANIFEST") if not manifest_src: - manifest_src = c.get('tooltool_manifest_src') + manifest_src = c.get("tooltool_manifest_src") if not manifest_src and not toolchains: - return self.warning(ERROR_MSGS['tooltool_manifest_undetermined']) + return self.warning(ERROR_MSGS["tooltool_manifest_undetermined"]) cmd = [ - sys.executable, '-u', - os.path.join(dirs['abs_src_dir'], 'mach'), - 'artifact', - 'toolchain', - '-v', - '--retry', '4', - '--artifact-manifest', - os.path.join(dirs['abs_src_dir'], 'toolchains.json'), + sys.executable, + "-u", + os.path.join(dirs["abs_src_dir"], "mach"), + "artifact", + "toolchain", + "-v", + "--retry", + "4", + "--artifact-manifest", + os.path.join(dirs["abs_src_dir"], "toolchains.json"), ] if manifest_src: - cmd.extend([ - '--tooltool-manifest', - os.path.join(dirs['abs_src_dir'], manifest_src), - ]) + cmd.extend( + [ + "--tooltool-manifest", + os.path.join(dirs["abs_src_dir"], manifest_src), + ] + ) auth_file = self._get_tooltool_auth_file() if auth_file: - cmd.extend(['--authentication-file', auth_file]) - cache = c['env'].get('TOOLTOOL_CACHE') + cmd.extend(["--authentication-file", auth_file]) + cache = c["env"].get("TOOLTOOL_CACHE") if cache: - cmd.extend(['--cache-dir', cache]) + cmd.extend(["--cache-dir", cache]) if toolchains: cmd.extend(toolchains.split()) self.info(str(cmd)) - self.run_command(cmd, cwd=dirs['abs_src_dir'], halt_on_failure=True, - env=env) + self.run_command(cmd, cwd=dirs["abs_src_dir"], halt_on_failure=True, env=env) def _create_mozbuild_dir(self, mozbuild_path=None): if not mozbuild_path: env = self.query_build_env() - mozbuild_path = env.get('MOZBUILD_STATE_PATH') + mozbuild_path = env.get("MOZBUILD_STATE_PATH") if mozbuild_path: self.mkdir_p(mozbuild_path) else: - self.warning("mozbuild_path could not be determined. skipping " - "creating it.") + self.warning( + "mozbuild_path could not be determined. skipping " "creating it." + ) def preflight_build(self): """set up machine state for a complete build.""" @@ -757,9 +806,9 @@ items from that key's value." def build(self): """builds application.""" - args = ['build', '-v'] + args = ["build", "-v"] - custom_build_targets = self.config.get('build_targets') + custom_build_targets = self.config.get("build_targets") if custom_build_targets: args += custom_build_targets @@ -771,29 +820,24 @@ items from that key's value." def static_analysis_autotest(self): """Run mach static-analysis autotest, in order to make sure we dont regress""" self.preflight_build() - self._run_mach_command_in_build_env(['configure']) - self._run_mach_command_in_build_env(['static-analysis', 'autotest', - '--intree-tool'], - use_subprocess=True) + self._run_mach_command_in_build_env(["configure"]) + self._run_mach_command_in_build_env( + ["static-analysis", "autotest", "--intree-tool"], use_subprocess=True + ) def _query_mach(self): dirs = self.query_abs_dirs() - if 'MOZILLABUILD' in os.environ: + if "MOZILLABUILD" in os.environ: # We found many issues with intermittent build failures when not # invoking mach via bash. # See bug 1364651 before considering changing. mach = [ - os.path.join( - os.environ['MOZILLABUILD'], - 'msys', - 'bin', - 'bash.exe'), - os.path.join( - dirs['abs_src_dir'], - 'mach')] + os.path.join(os.environ["MOZILLABUILD"], "msys", "bin", "bash.exe"), + os.path.join(dirs["abs_src_dir"], "mach"), + ] else: - mach = [sys.executable, 'mach'] + mach = [sys.executable, "mach"] return mach def _run_mach_command_in_build_env(self, args, use_subprocess=False): @@ -810,25 +854,29 @@ items from that key's value." # Not using `subprocess` causes gradle to hang if use_subprocess: import subprocess - return_code = subprocess.call(mach + ['--log-no-times'] + args, - env=env, cwd=dirs['abs_src_dir']) + + return_code = subprocess.call( + mach + ["--log-no-times"] + args, env=env, cwd=dirs["abs_src_dir"] + ) else: return_code = self.run_command( - command=mach + ['--log-no-times'] + args, - cwd=dirs['abs_src_dir'], + command=mach + ["--log-no-times"] + args, + cwd=dirs["abs_src_dir"], env=env, error_list=MakefileErrorList, - output_timeout=self.config.get('max_build_output_timeout', - 60 * 40) + output_timeout=self.config.get("max_build_output_timeout", 60 * 40), ) if return_code: self.return_code = self.worst_level( - EXIT_STATUS_DICT[TBPL_FAILURE], self.return_code, - AUTOMATION_EXIT_CODES[::-1] + EXIT_STATUS_DICT[TBPL_FAILURE], + self.return_code, + AUTOMATION_EXIT_CODES[::-1], + ) + self.fatal( + "'mach %s' did not run successfully. Please check " + "log for errors." % " ".join(args) ) - self.fatal("'mach %s' did not run successfully. Please check " - "log for errors." % ' '.join(args)) def multi_l10n(self): if not self.query_is_nightly(): @@ -836,41 +884,42 @@ items from that key's value." return dirs = self.query_abs_dirs() - base_work_dir = dirs['base_work_dir'] - work_dir = dirs['abs_work_dir'] - objdir = dirs['abs_obj_dir'] + base_work_dir = dirs["base_work_dir"] + work_dir = dirs["abs_work_dir"] + objdir = dirs["abs_obj_dir"] branch = self.branch # Building a nightly with the try repository fails because a # config-file does not exist for try. Default to mozilla-central # settings (arbitrarily). - if branch == 'try': - branch = 'mozilla-central' + if branch == "try": + branch = "mozilla-central" multil10n_path = os.path.join( - dirs['abs_src_dir'], - 'testing/mozharness/scripts/multil10n.py', + dirs["abs_src_dir"], + "testing/mozharness/scripts/multil10n.py", ) cmd = [ sys.executable, multil10n_path, - '--work-dir', + "--work-dir", work_dir, - '--config-file', - 'multi_locale/android-mozharness-build.json', - '--pull-locale-source', - '--package-multi', - '--summary', + "--config-file", + "multi_locale/android-mozharness-build.json", + "--pull-locale-source", + "--package-multi", + "--summary", ] - self.run_command(cmd, env=self.query_build_env(), cwd=base_work_dir, - halt_on_failure=True) + self.run_command( + cmd, env=self.query_build_env(), cwd=base_work_dir, halt_on_failure=True + ) package_cmd = [ - 'make', - 'echo-variable-PACKAGE', - 'AB_CD=multi', + "make", + "echo-variable-PACKAGE", + "AB_CD=multi", ] package_filename = self.get_output_from_command( package_cmd, @@ -879,23 +928,27 @@ items from that key's value." if not package_filename: self.fatal( "Unable to determine the package filename for the multi-l10n build. " - "Was trying to run: %s" % - package_cmd) + "Was trying to run: %s" % package_cmd + ) - self.info('Multi-l10n package filename is: %s' % package_filename) + self.info("Multi-l10n package filename is: %s" % package_filename) - parser = MakeUploadOutputParser(config=self.config, - log_obj=self.log_obj, - ) - upload_cmd = ['make', 'upload', 'AB_CD=multi'] - self.run_command(upload_cmd, - env=self.query_mach_build_env(multiLocale=False), - cwd=objdir, halt_on_failure=True, - output_parser=parser) + parser = MakeUploadOutputParser( + config=self.config, + log_obj=self.log_obj, + ) + upload_cmd = ["make", "upload", "AB_CD=multi"] + self.run_command( + upload_cmd, + env=self.query_mach_build_env(multiLocale=False), + cwd=objdir, + halt_on_failure=True, + output_parser=parser, + ) upload_files_cmd = [ - 'make', - 'echo-variable-UPLOAD_FILES', - 'AB_CD=multi', + "make", + "echo-variable-UPLOAD_FILES", + "AB_CD=multi", ] self.get_output_from_command( upload_files_cmd, @@ -905,7 +958,7 @@ items from that key's value." def postflight_build(self): """grabs properties from post build and calls ccache -s""" # A list of argument lists. Better names gratefully accepted! - mach_commands = self.config.get('postflight_build_mach_commands', []) + mach_commands = self.config.get("postflight_build_mach_commands", []) for mach_command in mach_commands: self._execute_postflight_build_mach_command(mach_command) @@ -913,16 +966,14 @@ items from that key's value." env = self.query_build_env() env.update(self.query_mach_build_env()) - command = [sys.executable, 'mach', '--log-no-times'] + command = [sys.executable, "mach", "--log-no-times"] command.extend(mach_command_args) self.run_command( command=command, - cwd=self.query_abs_dirs()['abs_src_dir'], + cwd=self.query_abs_dirs()["abs_src_dir"], env=env, - output_timeout=self.config.get( - 'max_build_output_timeout', - 60 * 20), + output_timeout=self.config.get("max_build_output_timeout", 60 * 20), halt_on_failure=True, ) @@ -936,16 +987,22 @@ items from that key's value." dirs = self.query_abs_dirs() self.run_command( - command=[sys.executable, 'mach', '--log-no-times', 'configure'], - cwd=dirs['abs_src_dir'], - env=env, output_timeout=60 * 3, halt_on_failure=True, + command=[sys.executable, "mach", "--log-no-times", "configure"], + cwd=dirs["abs_src_dir"], + env=env, + output_timeout=60 * 3, + halt_on_failure=True, ) self.run_command( command=[ - 'make', 'source-package', 'source-upload', + "make", + "source-package", + "source-upload", ], - cwd=dirs['abs_obj_dir'], - env=env, output_timeout=60 * 45, halt_on_failure=True, + cwd=dirs["abs_obj_dir"], + env=env, + output_timeout=60 * 45, + halt_on_failure=True, ) def _is_configuration_shipped(self): @@ -961,105 +1018,105 @@ items from that key's value." # in derived configs. # Debug builds are never shipped. - if self.config.get('debug_build'): + if self.config.get("debug_build"): return False # OS X opt builds without a variant are shipped. - if self.config.get('platform') == 'macosx64': - if not self.config.get('build_variant'): + if self.config.get("platform") == "macosx64": + if not self.config.get("build_variant"): return True # Android opt builds without a variant are shipped. - if self.config.get('platform') == 'android': - if not self.config.get('build_variant'): + if self.config.get("platform") == "android": + if not self.config.get("build_variant"): return True return False def _load_build_resources(self): - p = self.config.get('build_resources_path') % self.query_abs_dirs() + p = self.config.get("build_resources_path") % self.query_abs_dirs() if not os.path.exists(p): - self.info('%s does not exist; not loading build resources' % p) + self.info("%s does not exist; not loading build resources" % p) return None - with open(p, 'r') as fh: + with open(p, "r") as fh: resources = json.load(fh) - if 'duration' not in resources: - self.info('resource usage lacks duration; ignoring') + if "duration" not in resources: + self.info("resource usage lacks duration; ignoring") return None # We want to always collect metrics. But alerts with sccache enabled # we should disable automatic alerting - should_alert = False if os.environ.get('USE_SCCACHE') == '1' else True + should_alert = False if os.environ.get("USE_SCCACHE") == "1" else True data = { - 'name': 'build times', - 'value': resources['duration'], - 'extraOptions': self.perfherder_resource_options(), - 'shouldAlert': should_alert, - 'subtests': [], + "name": "build times", + "value": resources["duration"], + "extraOptions": self.perfherder_resource_options(), + "shouldAlert": should_alert, + "subtests": [], } - for phase in resources['phases']: - if 'duration' not in phase: + for phase in resources["phases"]: + if "duration" not in phase: continue - data['subtests'].append({ - 'name': phase['name'], - 'value': phase['duration'], - }) + data["subtests"].append( + { + "name": phase["name"], + "value": phase["duration"], + } + ) return data def _load_sccache_stats(self): stats_file = os.path.join( - self.query_abs_dirs()['abs_obj_dir'], 'sccache-stats.json' + self.query_abs_dirs()["abs_obj_dir"], "sccache-stats.json" ) if not os.path.exists(stats_file): - self.info( - '%s does not exist; not loading sccache stats' % - stats_file) + self.info("%s does not exist; not loading sccache stats" % stats_file) return - with open(stats_file, 'r') as fh: + with open(stats_file, "r") as fh: stats = json.load(fh) def get_stat(key): - val = stats['stats'][key] + val = stats["stats"][key] # Future versions of sccache will distinguish stats by language # and store them as a dict. if isinstance(val, dict): - val = sum(val['counts'].values()) + val = sum(val["counts"].values()) return val - total = get_stat('requests_executed') - hits = get_stat('cache_hits') + total = get_stat("requests_executed") + hits = get_stat("cache_hits") if total > 0: hits /= float(total) yield { - 'name': 'sccache hit rate', - 'value': hits, - 'subtests': [], - 'alertThreshold': 50.0, - 'lowerIsBetter': False, + "name": "sccache hit rate", + "value": hits, + "subtests": [], + "alertThreshold": 50.0, + "lowerIsBetter": False, # We want to always collect metrics. # But disable automatic alerting on it - 'shouldAlert': False + "shouldAlert": False, } yield { - 'name': 'sccache cache_write_errors', - 'value': stats['stats']['cache_write_errors'], - 'alertThreshold': 50.0, - 'subtests': [], + "name": "sccache cache_write_errors", + "value": stats["stats"]["cache_write_errors"], + "alertThreshold": 50.0, + "subtests": [], } yield { - 'name': 'sccache requests_not_cacheable', - 'value': stats['stats']['requests_not_cacheable'], - 'alertThreshold': 50.0, - 'subtests': [], + "name": "sccache requests_not_cacheable", + "value": stats["stats"]["requests_not_cacheable"], + "alertThreshold": 50.0, + "subtests": [], } def _get_package_metrics(self): @@ -1068,33 +1125,33 @@ items from that key's value." dirs = self.query_abs_dirs() - dist_dir = os.path.join(dirs['abs_obj_dir'], 'dist') - for ext in ['apk', 'dmg', 'tar.bz2', 'zip']: - name = 'target.' + ext + dist_dir = os.path.join(dirs["abs_obj_dir"], "dist") + for ext in ["apk", "dmg", "tar.bz2", "zip"]: + name = "target." + ext if os.path.exists(os.path.join(dist_dir, name)): packageName = name break else: self.fatal("could not determine packageName") - interests = ['libxul.so', 'classes.dex', 'omni.ja', 'xul.dll'] + interests = ["libxul.so", "classes.dex", "omni.ja", "xul.dll"] installer = os.path.join(dist_dir, packageName) installer_size = 0 size_measurements = [] def paths_with_sizes(installer): if zipfile.is_zipfile(installer): - with zipfile.ZipFile(installer, 'r') as zf: + with zipfile.ZipFile(installer, "r") as zf: for zi in zf.infolist(): yield zi.filename, zi.file_size elif tarfile.is_tarfile(installer): - with tarfile.open(installer, 'r:*') as tf: + with tarfile.open(installer, "r:*") as tf: for ti in tf: yield ti.name, ti.size if os.path.exists(installer): installer_size = self.query_filesize(installer) - self.info('Size of %s: %s bytes' % (packageName, installer_size)) + self.info("Size of %s: %s bytes" % (packageName, installer_size)) try: subtests = {} for path, size in paths_with_sizes(installer): @@ -1103,24 +1160,20 @@ items from that key's value." # We have to be careful here: desktop Firefox installers # contain two omni.ja files: one for the general runtime, # and one for the browser proper. - if name == 'omni.ja': - containing_dir = os.path.basename( - os.path.dirname(path)) - if containing_dir == 'browser': - name = 'browser-omni.ja' + if name == "omni.ja": + containing_dir = os.path.basename(os.path.dirname(path)) + if containing_dir == "browser": + name = "browser-omni.ja" if name in subtests: - self.fatal('should not see %s (%s) multiple times!' - % (name, path)) + self.fatal( + "should not see %s (%s) multiple times!" % (name, path) + ) subtests[name] = size for name in subtests: - self.info('Size of %s: %s bytes' % (name, - subtests[name])) - size_measurements.append( - {'name': name, 'value': subtests[name]}) + self.info("Size of %s: %s bytes" % (name, subtests[name])) + size_measurements.append({"name": name, "value": subtests[name]}) except Exception: - self.info( - 'Unable to search %s for component sizes.' % - installer) + self.info("Unable to search %s for component sizes." % installer) size_measurements = [] if not installer_size and not size_measurements: @@ -1131,26 +1184,30 @@ items from that key's value." # ship. def filter_alert(alert): if not self._is_configuration_shipped(): - alert['shouldAlert'] = False + alert["shouldAlert"] = False return alert - if installer.endswith('.apk'): # Android - yield filter_alert({ - "name": "installer size", - "value": installer_size, - "alertChangeType": "absolute", - "alertThreshold": (200 * 1024), - "subtests": size_measurements - }) + if installer.endswith(".apk"): # Android + yield filter_alert( + { + "name": "installer size", + "value": installer_size, + "alertChangeType": "absolute", + "alertThreshold": (200 * 1024), + "subtests": size_measurements, + } + ) else: - yield filter_alert({ - "name": "installer size", - "value": installer_size, - "alertChangeType": "absolute", - "alertThreshold": (100 * 1024), - "subtests": size_measurements - }) + yield filter_alert( + { + "name": "installer size", + "value": installer_size, + "alertChangeType": "absolute", + "alertThreshold": (100 * 1024), + "subtests": size_measurements, + } + ) def _get_sections(self, file, filter=None): """ @@ -1158,8 +1215,9 @@ items from that key's value." """ # Check for `rust_size`, our cross platform version of size. It should # be fetched by run-task in $MOZ_FETCHES_DIR/rust-size/rust-size - rust_size = os.path.join(os.environ['MOZ_FETCHES_DIR'], - 'rust-size', 'rust-size') + rust_size = os.path.join( + os.environ["MOZ_FETCHES_DIR"], "rust-size", "rust-size" + ) size_prog = self.which(rust_size) if not size_prog: self.info("Couldn't find `rust-size` program") @@ -1199,56 +1257,64 @@ items from that key's value." Currently just the sizes of interesting sections. """ lib_interests = { - 'XUL': ('libxul.so', 'xul.dll', 'XUL'), - 'NSS': ('libnss3.so', 'nss3.dll', 'libnss3.dylib'), - 'NSPR': ('libnspr4.so', 'nspr4.dll', 'libnspr4.dylib'), - 'avcodec': ('libmozavcodec.so', 'mozavcodec.dll', 'libmozavcodec.dylib'), - 'avutil': ('libmozavutil.so', 'mozavutil.dll', 'libmozavutil.dylib') + "XUL": ("libxul.so", "xul.dll", "XUL"), + "NSS": ("libnss3.so", "nss3.dll", "libnss3.dylib"), + "NSPR": ("libnspr4.so", "nspr4.dll", "libnspr4.dylib"), + "avcodec": ("libmozavcodec.so", "mozavcodec.dll", "libmozavcodec.dylib"), + "avutil": ("libmozavutil.so", "mozavutil.dll", "libmozavutil.dylib"), } - section_interests = ('.text', '.data', '.rodata', '.rdata', - '.cstring', '.data.rel.ro', '.bss') + section_interests = ( + ".text", + ".data", + ".rodata", + ".rdata", + ".cstring", + ".data.rel.ro", + ".bss", + ) lib_details = [] dirs = self.query_abs_dirs() - dist_dir = os.path.join(dirs['abs_obj_dir'], 'dist') - bin_dir = os.path.join(dist_dir, 'bin') + dist_dir = os.path.join(dirs["abs_obj_dir"], "dist") + bin_dir = os.path.join(dist_dir, "bin") for lib_type, lib_names in list(lib_interests.items()): for lib_name in lib_names: lib = os.path.join(bin_dir, lib_name) if os.path.exists(lib): lib_size = 0 - section_details = self._get_sections( - lib, section_interests) + section_details = self._get_sections(lib, section_interests) section_measurements = [] # Build up the subtests # Lump rodata sections together # - Mach-O separates out read-only string data as .cstring # - PE really uses .rdata, but XUL at least has a .rodata as well - for ro_alias in ('.cstring', '.rdata'): + for ro_alias in (".cstring", ".rdata"): if ro_alias in section_details: - if '.rodata' in section_details: - section_details['.rodata'] += section_details[ro_alias] + if ".rodata" in section_details: + section_details[".rodata"] += section_details[ro_alias] else: - section_details['.rodata'] = section_details[ro_alias] + section_details[".rodata"] = section_details[ro_alias] del section_details[ro_alias] for k, v in list(section_details.items()): - section_measurements.append({'name': k, 'value': v}) + section_measurements.append({"name": k, "value": v}) lib_size += v - lib_details.append({ - 'name': lib_type, - 'size': lib_size, - 'sections': section_measurements - }) + lib_details.append( + { + "name": lib_type, + "size": lib_size, + "sections": section_measurements, + } + ) for lib_detail in lib_details: yield { - "name": "%s section sizes" % lib_detail['name'], - "value": lib_detail['size'], + "name": "%s section sizes" % lib_detail["name"], + "value": lib_detail["size"], "shouldAlert": False, - "subtests": lib_detail['sections'] + "subtests": lib_detail["sections"], } def _generate_build_stats(self): @@ -1258,10 +1324,10 @@ items from that key's value." and then posts to graph server the results. We only post to graph server for non nightly build """ - self.info('Collecting build metrics') + self.info("Collecting build metrics") - if os.environ.get('USE_ARTIFACT'): - self.info('Skipping due to forced artifact build.') + if os.environ.get("USE_ARTIFACT"): + self.info("Skipping due to forced artifact build.") return c = self.config @@ -1269,75 +1335,74 @@ items from that key's value." # Report some important file sizes for display in treeherder perfherder_data = { - "framework": { - "name": "build_metrics" - }, + "framework": {"name": "build_metrics"}, "suites": [], } - if not c.get('debug_build') and not c.get('disable_package_metrics'): - perfherder_data['suites'].extend(self._get_package_metrics()) - perfherder_data['suites'].extend(self._get_binary_metrics()) + if not c.get("debug_build") and not c.get("disable_package_metrics"): + perfherder_data["suites"].extend(self._get_package_metrics()) + perfherder_data["suites"].extend(self._get_binary_metrics()) # Extract compiler warnings count. warnings = self.get_output_from_command( - command=[sys.executable, 'mach', 'warnings-list'], - cwd=self.query_abs_dirs()['abs_src_dir'], + command=[sys.executable, "mach", "warnings-list"], + cwd=self.query_abs_dirs()["abs_src_dir"], env=self.query_build_env(), # No need to pollute the log. silent=True, # Fail fast. - halt_on_failure=True) + halt_on_failure=True, + ) if warnings is not None: - perfherder_data['suites'].append({ - 'name': 'compiler warnings', - 'value': len(warnings.strip().splitlines()), - 'alertThreshold': 100.0, - 'subtests': [], - }) + perfherder_data["suites"].append( + { + "name": "compiler warnings", + "value": len(warnings.strip().splitlines()), + "alertThreshold": 100.0, + "subtests": [], + } + ) build_metrics = self._load_build_resources() if build_metrics: - perfherder_data['suites'].append(build_metrics) - perfherder_data['suites'].extend(self._load_sccache_stats()) + perfherder_data["suites"].append(build_metrics) + perfherder_data["suites"].extend(self._load_sccache_stats()) # Ensure all extra options for this configuration are present. - for opt in os.environ.get('PERFHERDER_EXTRA_OPTIONS', '').split(): - for suite in perfherder_data['suites']: - if opt not in suite.get('extraOptions', []): - suite.setdefault('extraOptions', []).append(opt) + for opt in os.environ.get("PERFHERDER_EXTRA_OPTIONS", "").split(): + for suite in perfherder_data["suites"]: + if opt not in suite.get("extraOptions", []): + suite.setdefault("extraOptions", []).append(opt) if self.query_is_nightly(): - for suite in perfherder_data['suites']: - suite.setdefault('extraOptions', []).insert(0, 'nightly') + for suite in perfherder_data["suites"]: + suite.setdefault("extraOptions", []).insert(0, "nightly") if perfherder_data["suites"]: - self.info('PERFHERDER_DATA: %s' % json.dumps(perfherder_data)) + self.info("PERFHERDER_DATA: %s" % json.dumps(perfherder_data)) def valgrind_test(self): - '''Execute mach's valgrind-test for memory leaks''' + """Execute mach's valgrind-test for memory leaks""" env = self.query_build_env() env.update(self.query_mach_build_env()) return_code = self.run_command( - command=[ - sys.executable, - 'mach', - 'valgrind-test'], - cwd=self.query_abs_dirs()['abs_src_dir'], + command=[sys.executable, "mach", "valgrind-test"], + cwd=self.query_abs_dirs()["abs_src_dir"], env=env, - output_timeout=self.config.get( - 'max_build_output_timeout', - 60 * 40)) + output_timeout=self.config.get("max_build_output_timeout", 60 * 40), + ) if return_code: self.return_code = self.worst_level( - EXIT_STATUS_DICT[TBPL_FAILURE], self.return_code, - AUTOMATION_EXIT_CODES[::-1] + EXIT_STATUS_DICT[TBPL_FAILURE], + self.return_code, + AUTOMATION_EXIT_CODES[::-1], ) self.fatal( "'mach valgrind-test' did not run successfully. Please check " - "log for errors.") + "log for errors." + ) def _ensure_upload_path(self): env = self.query_mach_build_env() @@ -1345,28 +1410,28 @@ items from that key's value." # Some Taskcluster workers don't like it if an artifacts directory # is defined but no artifacts are uploaded. Guard against this by always # ensuring the artifacts directory exists. - if 'UPLOAD_PATH' in env and not os.path.exists(env['UPLOAD_PATH']): - self.mkdir_p(env['UPLOAD_PATH']) + if "UPLOAD_PATH" in env and not os.path.exists(env["UPLOAD_PATH"]): + self.mkdir_p(env["UPLOAD_PATH"]) def _post_fatal(self, message=None, exit_code=None): if not self.return_code: # only overwrite return_code if it's 0 - self.error('setting return code to 2 because fatal was called') + self.error("setting return code to 2 because fatal was called") self.return_code = 2 @PostScriptRun def _shutdown_sccache(self): - '''If sccache was in use for this build, shut down the sccache server.''' - if os.environ.get('USE_SCCACHE') == '1': - topsrcdir = self.query_abs_dirs()['abs_src_dir'] - sccache_base = os.environ['MOZ_FETCHES_DIR'] - sccache = os.path.join(sccache_base, 'sccache', 'sccache') + """If sccache was in use for this build, shut down the sccache server.""" + if os.environ.get("USE_SCCACHE") == "1": + topsrcdir = self.query_abs_dirs()["abs_src_dir"] + sccache_base = os.environ["MOZ_FETCHES_DIR"] + sccache = os.path.join(sccache_base, "sccache", "sccache") if self._is_windows(): - sccache += '.exe' - self.run_command([sccache, '--stop-server'], cwd=topsrcdir) + sccache += ".exe" + self.run_command([sccache, "--stop-server"], cwd=topsrcdir) @PostScriptRun def _summarize(self): - """ If this is run in automation, ensure the return code is valid and + """If this is run in automation, ensure the return code is valid and set it to one if it's not. Finally, log any summaries we collected from the script run. """ @@ -1374,10 +1439,11 @@ items from that key's value." # let's ignore all mention of tbpl status until this # point so it will be easier to manage if self.return_code not in AUTOMATION_EXIT_CODES: - self.error("Return code is set to: %s and is outside of " - "automation's known values. Setting to 2(failure). " - "Valid return codes %s" % (self.return_code, - AUTOMATION_EXIT_CODES)) + self.error( + "Return code is set to: %s and is outside of " + "automation's known values. Setting to 2(failure). " + "Valid return codes %s" % (self.return_code, AUTOMATION_EXIT_CODES) + ) self.return_code = 2 for status, return_code in list(EXIT_STATUS_DICT.items()): if return_code == self.return_code: @@ -1386,28 +1452,24 @@ items from that key's value." @PostScriptRun def _parse_build_tests_ccov(self): - if 'MOZ_FETCHES_DIR' not in os.environ: + if "MOZ_FETCHES_DIR" not in os.environ: return dirs = self.query_abs_dirs() - topsrcdir = dirs['abs_src_dir'] - base_work_dir = dirs['base_work_dir'] + topsrcdir = dirs["abs_src_dir"] + base_work_dir = dirs["base_work_dir"] env = self.query_build_env() - grcov_path = os.path.join(os.environ['MOZ_FETCHES_DIR'], 'grcov') + grcov_path = os.path.join(os.environ["MOZ_FETCHES_DIR"], "grcov") if not os.path.isabs(grcov_path): grcov_path = os.path.join(base_work_dir, grcov_path) if self._is_windows(): - grcov_path += '.exe' - env['GRCOV_PATH'] = grcov_path + grcov_path += ".exe" + env["GRCOV_PATH"] = grcov_path cmd = self._query_mach() + [ - 'python', - os.path.join('testing', 'parse_build_tests_ccov.py'), + "python", + os.path.join("testing", "parse_build_tests_ccov.py"), ] - self.run_command( - command=cmd, - cwd=topsrcdir, - env=env, - halt_on_failure=True) + self.run_command(command=cmd, cwd=topsrcdir, env=env, halt_on_failure=True) diff --git a/testing/mozharness/mozharness/mozilla/testing/errors.py b/testing/mozharness/mozharness/mozilla/testing/errors.py index edcadac0c883..2ab314efc8d4 100644 --- a/testing/mozharness/mozharness/mozilla/testing/errors.py +++ b/testing/mozharness/mozharness/mozilla/testing/errors.py @@ -19,17 +19,21 @@ from mozharness.base.log import INFO, WARNING, ERROR # ErrorLists {{{1 _mochitest_summary = { - 'regex': re.compile(r'''(\d+ INFO (Passed|Failed|Todo):\ +(\d+)|\t(Passed|Failed|Todo): (\d+))'''), # NOQA: E501 - 'pass_group': "Passed", - 'fail_group': "Failed", - 'known_fail_group': "Todo", + "regex": re.compile( + r"""(\d+ INFO (Passed|Failed|Todo):\ +(\d+)|\t(Passed|Failed|Todo): (\d+))""" + ), # NOQA: E501 + "pass_group": "Passed", + "fail_group": "Failed", + "known_fail_group": "Todo", } _reftest_summary = { - 'regex': re.compile(r'''REFTEST INFO \| (Successful|Unexpected|Known problems): (\d+) \('''), # NOQA: E501 - 'pass_group': "Successful", - 'fail_group': "Unexpected", - 'known_fail_group': "Known problems", + "regex": re.compile( + r"""REFTEST INFO \| (Successful|Unexpected|Known problems): (\d+) \(""" + ), # NOQA: E501 + "pass_group": "Successful", + "fail_group": "Unexpected", + "known_fail_group": "Known problems", } TinderBoxPrintRe = { @@ -44,93 +48,129 @@ TinderBoxPrintRe = { "mochitest-plain_summary": _mochitest_summary, "mochitest-plain-gpu_summary": _mochitest_summary, "marionette_summary": { - 'regex': re.compile(r'''(passed|failed|todo):\ +(\d+)'''), - 'pass_group': "passed", - 'fail_group': "failed", - 'known_fail_group': "todo", + "regex": re.compile(r"""(passed|failed|todo):\ +(\d+)"""), + "pass_group": "passed", + "fail_group": "failed", + "known_fail_group": "todo", }, "reftest_summary": _reftest_summary, "reftest-qr_summary": _reftest_summary, "crashtest_summary": _reftest_summary, "crashtest-qr_summary": _reftest_summary, "xpcshell_summary": { - 'regex': re.compile(r'''INFO \| (Passed|Failed|Todo): (\d+)'''), - 'pass_group': "Passed", - 'fail_group': "Failed", - 'known_fail_group': "Todo", + "regex": re.compile(r"""INFO \| (Passed|Failed|Todo): (\d+)"""), + "pass_group": "Passed", + "fail_group": "Failed", + "known_fail_group": "Todo", }, "jsreftest_summary": _reftest_summary, "instrumentation_summary": _mochitest_summary, "cppunittest_summary": { - 'regex': re.compile(r'''cppunittests INFO \| (Passed|Failed): (\d+)'''), - 'pass_group': "Passed", - 'fail_group': "Failed", - 'known_fail_group': None, + "regex": re.compile(r"""cppunittests INFO \| (Passed|Failed): (\d+)"""), + "pass_group": "Passed", + "fail_group": "Failed", + "known_fail_group": None, }, "gtest_summary": { - 'regex': re.compile(r'''(Passed|Failed): (\d+)'''), - 'pass_group': "Passed", - 'fail_group': "Failed", - 'known_fail_group': None, + "regex": re.compile(r"""(Passed|Failed): (\d+)"""), + "pass_group": "Passed", + "fail_group": "Failed", + "known_fail_group": None, }, "jittest_summary": { - 'regex': re.compile(r'''(Passed|Failed): (\d+)'''), - 'pass_group': "Passed", - 'fail_group': "Failed", - 'known_fail_group': None, + "regex": re.compile(r"""(Passed|Failed): (\d+)"""), + "pass_group": "Passed", + "fail_group": "Failed", + "known_fail_group": None, }, "mozbase_summary": { - 'regex': re.compile(r'''(OK)|(FAILED) \(errors=(\d+)'''), - 'pass_group': "OK", - 'fail_group': "FAILED", - 'known_fail_group': None, + "regex": re.compile(r"""(OK)|(FAILED) \(errors=(\d+)"""), + "pass_group": "OK", + "fail_group": "FAILED", + "known_fail_group": None, }, "geckoview_summary": { - 'regex': re.compile(r'''(Passed|Failed): (\d+)'''), - 'pass_group': "Passed", - 'fail_group': "Failed", - 'known_fail_group': None, + "regex": re.compile(r"""(Passed|Failed): (\d+)"""), + "pass_group": "Passed", + "fail_group": "Failed", + "known_fail_group": None, }, "geckoview-junit_summary": { - 'regex': re.compile(r'''(Passed|Failed): (\d+)'''), - 'pass_group': "Passed", - 'fail_group': "Failed", - 'known_fail_group': None, + "regex": re.compile(r"""(Passed|Failed): (\d+)"""), + "pass_group": "Passed", + "fail_group": "Failed", + "known_fail_group": None, }, - "harness_error": { - 'full_regex': re.compile(r"(?:TEST-UNEXPECTED-FAIL|PROCESS-CRASH) \| .* \|[^\|]* (application crashed|missing output line for total leaks!|negative leaks caught!|\d+ bytes leaked)"), # NOQA: E501 - 'minimum_regex': re.compile(r'''(TEST-UNEXPECTED|PROCESS-CRASH)'''), - 'retry_regex': re.compile(r'''(FAIL-SHOULD-RETRY|No space left on device|ADBError|ADBProcessError|ADBTimeoutError|program finished with exit code 80|INFRA-ERROR)''') # NOQA: E501 + "full_regex": re.compile( + r"(?:TEST-UNEXPECTED-FAIL|PROCESS-CRASH) \| .* \|[^\|]* (application crashed|missing output line for total leaks!|negative leaks caught!|\d+ bytes leaked)" # NOQA: E501 + ), + "minimum_regex": re.compile(r"""(TEST-UNEXPECTED|PROCESS-CRASH)"""), + "retry_regex": re.compile( + r"""(FAIL-SHOULD-RETRY|No space left on device|ADBError|ADBProcessError|ADBTimeoutError|program finished with exit code 80|INFRA-ERROR)""" # NOQA: E501 + ), }, } TestPassed = [ - {'regex': re.compile('''(TEST-INFO|TEST-KNOWN-FAIL|TEST-PASS|INFO \| )'''), 'level': INFO}, + { + "regex": re.compile("""(TEST-INFO|TEST-KNOWN-FAIL|TEST-PASS|INFO \| )"""), + "level": INFO, + }, ] BaseHarnessErrorList = [ - {'substr': 'TEST-UNEXPECTED', 'level': ERROR, }, - {'substr': 'PROCESS-CRASH', 'level': ERROR, }, - {'regex': re.compile('''ERROR: (Address|Leak)Sanitizer'''), 'level': ERROR, }, - {'regex': re.compile('''thread '([^']+)' panicked'''), 'level': ERROR, }, - {'substr': 'pure virtual method called', 'level': ERROR, }, - {'substr': 'Pure virtual function called!', 'level': ERROR, }, + { + "substr": "TEST-UNEXPECTED", + "level": ERROR, + }, + { + "substr": "PROCESS-CRASH", + "level": ERROR, + }, + { + "regex": re.compile("""ERROR: (Address|Leak)Sanitizer"""), + "level": ERROR, + }, + { + "regex": re.compile("""thread '([^']+)' panicked"""), + "level": ERROR, + }, + { + "substr": "pure virtual method called", + "level": ERROR, + }, + { + "substr": "Pure virtual function called!", + "level": ERROR, + }, ] HarnessErrorList = BaseHarnessErrorList + [ - {'substr': 'A content process crashed', 'level': ERROR, }, + { + "substr": "A content process crashed", + "level": ERROR, + }, ] # wpt can have expected crashes so we can't always turn treeherder orange in those cases WptHarnessErrorList = BaseHarnessErrorList LogcatErrorList = [ - {'substr': 'Fatal signal 11 (SIGSEGV)', 'level': ERROR, - 'explanation': 'This usually indicates the B2G process has crashed'}, - {'substr': 'Fatal signal 7 (SIGBUS)', 'level': ERROR, - 'explanation': 'This usually indicates the B2G process has crashed'}, - {'substr': '[JavaScript Error:', 'level': WARNING}, - {'substr': 'seccomp sandbox violation', 'level': ERROR, - 'explanation': 'A content process has violated the system call sandbox (bug 790923)'}, + { + "substr": "Fatal signal 11 (SIGSEGV)", + "level": ERROR, + "explanation": "This usually indicates the B2G process has crashed", + }, + { + "substr": "Fatal signal 7 (SIGBUS)", + "level": ERROR, + "explanation": "This usually indicates the B2G process has crashed", + }, + {"substr": "[JavaScript Error:", "level": WARNING}, + { + "substr": "seccomp sandbox violation", + "level": ERROR, + "explanation": "A content process has violated the system call sandbox (bug 790923)", + }, ] diff --git a/testing/mozharness/mozharness/mozilla/testing/raptor.py b/testing/mozharness/mozharness/mozilla/testing/raptor.py index 5737a80d8e48..dfe1b7ca93c0 100644 --- a/testing/mozharness/mozharness/mozilla/testing/raptor.py +++ b/testing/mozharness/mozharness/mozilla/testing/raptor.py @@ -24,7 +24,10 @@ import mozharness from mozharness.base.errors import PythonErrorList from mozharness.base.log import OutputParser, DEBUG, ERROR, CRITICAL, INFO from mozharness.mozilla.automation import ( - EXIT_STATUS_DICT, TBPL_SUCCESS, TBPL_RETRY, TBPL_WORST_LEVEL_TUPLE + EXIT_STATUS_DICT, + TBPL_SUCCESS, + TBPL_RETRY, + TBPL_WORST_LEVEL_TUPLE, ) from mozharness.base.python import Python3Virtualenv from mozharness.mozilla.testing.android import AndroidMixin @@ -33,336 +36,494 @@ from mozharness.mozilla.testing.testbase import TestingMixin, testing_config_opt from mozharness.base.vcs.vcsbase import MercurialScript from mozharness.mozilla.testing.codecoverage import ( CodeCoverageMixin, - code_coverage_config_options + code_coverage_config_options, ) scripts_path = os.path.abspath(os.path.dirname(os.path.dirname(mozharness.__file__))) -external_tools_path = os.path.join(scripts_path, 'external_tools') +external_tools_path = os.path.join(scripts_path, "external_tools") here = os.path.abspath(os.path.dirname(__file__)) -RaptorErrorList = PythonErrorList + HarnessErrorList + [ - {'regex': re.compile(r'''run-as: Package '.*' is unknown'''), 'level': DEBUG}, - {'substr': r'''raptorDebug''', 'level': DEBUG}, - {'regex': re.compile(r'''^raptor[a-zA-Z-]*( - )?( )?(?i)error(:)?'''), 'level': ERROR}, - {'regex': re.compile(r'''^raptor[a-zA-Z-]*( - )?( )?(?i)critical(:)?'''), 'level': CRITICAL}, - {'regex': re.compile(r'''No machine_name called '.*' can be found'''), 'level': CRITICAL}, - {'substr': r"""No such file or directory: 'browser_output.txt'""", - 'level': CRITICAL, - 'explanation': "Most likely the browser failed to launch, or the test otherwise " - "failed to start."}, -] +RaptorErrorList = ( + PythonErrorList + + HarnessErrorList + + [ + {"regex": re.compile(r"""run-as: Package '.*' is unknown"""), "level": DEBUG}, + {"substr": r"""raptorDebug""", "level": DEBUG}, + { + "regex": re.compile(r"""^raptor[a-zA-Z-]*( - )?( )?(?i)error(:)?"""), + "level": ERROR, + }, + { + "regex": re.compile(r"""^raptor[a-zA-Z-]*( - )?( )?(?i)critical(:)?"""), + "level": CRITICAL, + }, + { + "regex": re.compile(r"""No machine_name called '.*' can be found"""), + "level": CRITICAL, + }, + { + "substr": r"""No such file or directory: 'browser_output.txt'""", + "level": CRITICAL, + "explanation": "Most likely the browser failed to launch, or the test otherwise " + "failed to start.", + }, + ] +) -class Raptor(TestingMixin, MercurialScript, CodeCoverageMixin, AndroidMixin, Python3Virtualenv): +class Raptor( + TestingMixin, MercurialScript, CodeCoverageMixin, AndroidMixin, Python3Virtualenv +): """ Install and run Raptor tests """ # Options to Browsertime. Paths are expected to be absolute. browsertime_options = [ - [["--browsertime-node"], { - "dest": "browsertime_node", - "default": None, - "help": argparse.SUPPRESS - }], - [["--browsertime-browsertimejs"], { - "dest": "browsertime_browsertimejs", - "default": None, - "help": argparse.SUPPRESS - }], - [["--browsertime-vismet-script"], { - "dest": "browsertime_vismet_script", - "default": None, - "help": argparse.SUPPRESS - }], - [["--browsertime-chromedriver"], { - "dest": "browsertime_chromedriver", - "default": None, - "help": argparse.SUPPRESS - }], - [["--browsertime-ffmpeg"], { - "dest": "browsertime_ffmpeg", - "default": None, - "help": argparse.SUPPRESS - }], - [["--browsertime-geckodriver"], { - "dest": "browsertime_geckodriver", - "default": None, - "help": argparse.SUPPRESS - }], - [["--browsertime-video"], { - "dest": "browsertime_video", - "action": "store_true", - "default": False, - "help": argparse.SUPPRESS - }], - [["--browsertime-visualmetrics"], { - "dest": "browsertime_visualmetrics", - "action": "store_true", - "default": False, - "help": argparse.SUPPRESS - }], - [["--browsertime-no-ffwindowrecorder"], { - "dest": "browsertime_no_ffwindowrecorder", - "action": "store_true", - "default": False, - "help": argparse.SUPPRESS - }], - [["--browsertime"], { - "dest": "browsertime", - "action": "store_true", - "default": False, - "help": argparse.SUPPRESS - }], + [ + ["--browsertime-node"], + {"dest": "browsertime_node", "default": None, "help": argparse.SUPPRESS}, + ], + [ + ["--browsertime-browsertimejs"], + { + "dest": "browsertime_browsertimejs", + "default": None, + "help": argparse.SUPPRESS, + }, + ], + [ + ["--browsertime-vismet-script"], + { + "dest": "browsertime_vismet_script", + "default": None, + "help": argparse.SUPPRESS, + }, + ], + [ + ["--browsertime-chromedriver"], + { + "dest": "browsertime_chromedriver", + "default": None, + "help": argparse.SUPPRESS, + }, + ], + [ + ["--browsertime-ffmpeg"], + {"dest": "browsertime_ffmpeg", "default": None, "help": argparse.SUPPRESS}, + ], + [ + ["--browsertime-geckodriver"], + { + "dest": "browsertime_geckodriver", + "default": None, + "help": argparse.SUPPRESS, + }, + ], + [ + ["--browsertime-video"], + { + "dest": "browsertime_video", + "action": "store_true", + "default": False, + "help": argparse.SUPPRESS, + }, + ], + [ + ["--browsertime-visualmetrics"], + { + "dest": "browsertime_visualmetrics", + "action": "store_true", + "default": False, + "help": argparse.SUPPRESS, + }, + ], + [ + ["--browsertime-no-ffwindowrecorder"], + { + "dest": "browsertime_no_ffwindowrecorder", + "action": "store_true", + "default": False, + "help": argparse.SUPPRESS, + }, + ], + [ + ["--browsertime"], + { + "dest": "browsertime", + "action": "store_true", + "default": False, + "help": argparse.SUPPRESS, + }, + ], ] - config_options = [ - [["--test"], - {"action": "store", - "dest": "test", - "help": "Raptor test to run" - }], - [["--app"], - {"default": "firefox", - "choices": [ - "firefox", - "chrome", - "chrome-m", - "chromium", - "fennec", - "geckoview", - "refbrow", - "fenix" - ], - "dest": "app", - "help": "Name of the application we are testing (default: firefox)." - }], - [["--activity"], - {"dest": "activity", - "help": "The Android activity used to launch the Android app. " - "e.g.: org.mozilla.fenix.browser.BrowserPerformanceTestActivity" - }], - [["--intent"], - {"dest": "intent", - "help": "Name of the Android intent action used to launch the Android app" - }], - [["--is-release-build"], - {"action": "store_true", - "dest": "is_release_build", - "help": "Whether the build is a release build which requires work arounds " - "using MOZ_DISABLE_NONLOCAL_CONNECTIONS to support installing unsigned " - "webextensions. Defaults to False." - }], - [["--add-option"], - {"action": "extend", - "dest": "raptor_cmd_line_args", - "default": None, - "help": "Extra options to Raptor." - }], - [["--enable-webrender"], { - "action": "store_true", - "dest": "enable_webrender", - "default": False, - "help": "Enable the WebRender compositor in Gecko.", - }], - [["--no-conditioned-profile"], { - "action": "store_true", - "dest": "no_conditioned_profile", - "default": False, - "help": "Run without the conditioned profile.", - }], - [["--device-name"], { - "dest": "device_name", - "default": None, - "help": "Device name of mobile device.", - }], - [["--geckoProfile"], { - "dest": "gecko_profile", - "action": "store_true", - "default": False, - "help": argparse.SUPPRESS - }], - [["--geckoProfileInterval"], { - "dest": "gecko_profile_interval", - "type": "int", - "help": argparse.SUPPRESS - }], - [["--geckoProfileEntries"], { - "dest": "gecko_profile_entries", - "type": "int", - "help": argparse.SUPPRESS - }], - [["--gecko-profile"], { - "dest": "gecko_profile", - "action": "store_true", - "default": False, - "help": "Whether to profile the test run and save the profile results." - }], - [["--gecko-profile-interval"], { - "dest": "gecko_profile_interval", - "type": "int", - "help": "The interval between samples taken by the profiler (ms)." - }], - [["--gecko-profile-entries"], { - "dest": "gecko_profile_entries", - "type": "int", - "help": "How many samples to take with the profiler." - }], - [["--page-cycles"], { - "dest": "page_cycles", - "type": "int", - "help": "How many times to repeat loading the test page (for page load tests); " - "for benchmark tests this is how many times the benchmark test will be run." - }], - [["--page-timeout"], { - "dest": "page_timeout", - "type": "int", - "help": "How long to wait (ms) for one page_cycle to complete, before timing out." - }], - [["--browser-cycles"], { - "dest": "browser_cycles", - "type": "int", - "help": "The number of times a cold load test is repeated (for cold load tests only, " - "where the browser is shutdown and restarted between test iterations)." - }], - [["--project"], { - "action": "store", - "dest": "project", - "default": "mozilla-central", - "type": "str", - "help": "Name of the project (try, mozilla-central, etc.)" - }], - [["--test-url-params"], { - "action": "store", - "dest": "test_url_params", - "help": "Parameters to add to the test_url query string." - }], - [["--host"], { - "dest": "host", - "type": "str", - "default": "127.0.0.1", - "help": "Hostname from which to serve urls (default: 127.0.0.1). " + config_options = ( + [ + [ + ["--test"], + {"action": "store", "dest": "test", "help": "Raptor test to run"}, + ], + [ + ["--app"], + { + "default": "firefox", + "choices": [ + "firefox", + "chrome", + "chrome-m", + "chromium", + "fennec", + "geckoview", + "refbrow", + "fenix", + ], + "dest": "app", + "help": "Name of the application we are testing (default: firefox).", + }, + ], + [ + ["--activity"], + { + "dest": "activity", + "help": "The Android activity used to launch the Android app. " + "e.g.: org.mozilla.fenix.browser.BrowserPerformanceTestActivity", + }, + ], + [ + ["--intent"], + { + "dest": "intent", + "help": "Name of the Android intent action used to launch the Android app", + }, + ], + [ + ["--is-release-build"], + { + "action": "store_true", + "dest": "is_release_build", + "help": "Whether the build is a release build which requires work arounds " + "using MOZ_DISABLE_NONLOCAL_CONNECTIONS to support installing unsigned " + "webextensions. Defaults to False.", + }, + ], + [ + ["--add-option"], + { + "action": "extend", + "dest": "raptor_cmd_line_args", + "default": None, + "help": "Extra options to Raptor.", + }, + ], + [ + ["--enable-webrender"], + { + "action": "store_true", + "dest": "enable_webrender", + "default": False, + "help": "Enable the WebRender compositor in Gecko.", + }, + ], + [ + ["--no-conditioned-profile"], + { + "action": "store_true", + "dest": "no_conditioned_profile", + "default": False, + "help": "Run without the conditioned profile.", + }, + ], + [ + ["--device-name"], + { + "dest": "device_name", + "default": None, + "help": "Device name of mobile device.", + }, + ], + [ + ["--geckoProfile"], + { + "dest": "gecko_profile", + "action": "store_true", + "default": False, + "help": argparse.SUPPRESS, + }, + ], + [ + ["--geckoProfileInterval"], + { + "dest": "gecko_profile_interval", + "type": "int", + "help": argparse.SUPPRESS, + }, + ], + [ + ["--geckoProfileEntries"], + { + "dest": "gecko_profile_entries", + "type": "int", + "help": argparse.SUPPRESS, + }, + ], + [ + ["--gecko-profile"], + { + "dest": "gecko_profile", + "action": "store_true", + "default": False, + "help": "Whether to profile the test run and save the profile results.", + }, + ], + [ + ["--gecko-profile-interval"], + { + "dest": "gecko_profile_interval", + "type": "int", + "help": "The interval between samples taken by the profiler (ms).", + }, + ], + [ + ["--gecko-profile-entries"], + { + "dest": "gecko_profile_entries", + "type": "int", + "help": "How many samples to take with the profiler.", + }, + ], + [ + ["--page-cycles"], + { + "dest": "page_cycles", + "type": "int", + "help": ( + "How many times to repeat loading the test page (for page load " + "tests); for benchmark tests this is how many times the benchmark test " + "will be run." + ), + }, + ], + [ + ["--page-timeout"], + { + "dest": "page_timeout", + "type": "int", + "help": "How long to wait (ms) for one page_cycle to complete, before timing out.", # NOQA: E501 + }, + ], + [ + ["--browser-cycles"], + { + "dest": "browser_cycles", + "type": "int", + "help": ( + "The number of times a cold load test is repeated (for cold load tests " + "only, where the browser is shutdown and restarted between test " + "iterations)." + ), + }, + ], + [ + ["--project"], + { + "action": "store", + "dest": "project", + "default": "mozilla-central", + "type": "str", + "help": "Name of the project (try, mozilla-central, etc.)", + }, + ], + [ + ["--test-url-params"], + { + "action": "store", + "dest": "test_url_params", + "help": "Parameters to add to the test_url query string.", + }, + ], + [ + ["--host"], + { + "dest": "host", + "type": "str", + "default": "127.0.0.1", + "help": "Hostname from which to serve urls (default: 127.0.0.1). " "The value HOST_IP will cause the value of host to be " "to be loaded from the environment variable HOST_IP.", - }], - [["--power-test"], { - "dest": "power_test", - "action": "store_true", - "default": False, - "help": "Use Raptor to measure power usage on Android browsers (Geckoview Example, " - "Fenix, Refbrow, and Fennec) as well as on Intel-based MacOS machines that " - "have Intel Power Gadget installed.", - }], - [["--memory-test"], { - "dest": "memory_test", - "action": "store_true", - "default": False, - "help": "Use Raptor to measure memory usage.", - }], - [["--cpu-test"], { - "dest": "cpu_test", - "action": "store_true", - "default": False, - "help": "Use Raptor to measure CPU usage." - }], - [["--disable-perf-tuning"], { - "action": "store_true", - "dest": "disable_perf_tuning", - "default": False, - "help": "Disable performance tuning on android.", - }], - [["--conditioned-profile-scenario"], { - "dest": "conditioned_profile_scenario", - "type": "str", - "default": "settled", - "help": "Name of profile scenario.", - }], - [["--live-sites"], { - "dest": "live_sites", - "action": "store_true", - "default": False, - "help": "Run tests using live sites instead of recorded sites.", - }], - [["--chimera"], { - "dest": "chimera", - "action": "store_true", - "default": False, - "help": "Run tests in chimera mode. Each browser cycle will run a cold and warm test.", - }], - [["--debug-mode"], { - "dest": "debug_mode", - "action": "store_true", - "default": False, - "help": "Run Raptor in debug mode (open browser console, limited page-cycles, etc.)", - }], - [["--noinstall"], { - "dest": "noinstall", - "action": "store_true", - "default": False, - "help": "Do not offer to install Android APK.", - }], - [["--disable-e10s"], { - "dest": "e10s", - "action": "store_false", - "default": True, - "help": "Run without multiple processes (e10s).", - }], - [["--enable-fission"], { - "action": "store_true", - "dest": "enable_fission", - "default": False, - "help": "Enable Fission (site isolation) in Gecko.", - }], - [["--setpref"], { - "action": "append", - "metavar": "PREF=VALUE", - "dest": "extra_prefs", - "default": [], - "help": "Set a browser preference. May be used multiple times." - }], - [["--cold"], { - "action": "store_true", - "dest": "cold", - "default": False, - "help": "Enable cold page-load for browsertime tp6", - }], - [["--verbose"], { - "action": "store_true", - "dest": "verbose", - "default": False, - "help": "Verbose output", - }], - - ] + testing_config_options + \ - copy.deepcopy(code_coverage_config_options) + \ - browsertime_options + }, + ], + [ + ["--power-test"], + { + "dest": "power_test", + "action": "store_true", + "default": False, + "help": ( + "Use Raptor to measure power usage on Android browsers (Geckoview " + "Example, Fenix, Refbrow, and Fennec) as well as on Intel-based MacOS " + "machines that have Intel Power Gadget installed." + ), + }, + ], + [ + ["--memory-test"], + { + "dest": "memory_test", + "action": "store_true", + "default": False, + "help": "Use Raptor to measure memory usage.", + }, + ], + [ + ["--cpu-test"], + { + "dest": "cpu_test", + "action": "store_true", + "default": False, + "help": "Use Raptor to measure CPU usage.", + }, + ], + [ + ["--disable-perf-tuning"], + { + "action": "store_true", + "dest": "disable_perf_tuning", + "default": False, + "help": "Disable performance tuning on android.", + }, + ], + [ + ["--conditioned-profile-scenario"], + { + "dest": "conditioned_profile_scenario", + "type": "str", + "default": "settled", + "help": "Name of profile scenario.", + }, + ], + [ + ["--live-sites"], + { + "dest": "live_sites", + "action": "store_true", + "default": False, + "help": "Run tests using live sites instead of recorded sites.", + }, + ], + [ + ["--chimera"], + { + "dest": "chimera", + "action": "store_true", + "default": False, + "help": "Run tests in chimera mode. Each browser cycle will run a cold and warm test.", # NOQA: E501 + }, + ], + [ + ["--debug-mode"], + { + "dest": "debug_mode", + "action": "store_true", + "default": False, + "help": "Run Raptor in debug mode (open browser console, limited page-cycles, etc.)", # NOQA: E501 + }, + ], + [ + ["--noinstall"], + { + "dest": "noinstall", + "action": "store_true", + "default": False, + "help": "Do not offer to install Android APK.", + }, + ], + [ + ["--disable-e10s"], + { + "dest": "e10s", + "action": "store_false", + "default": True, + "help": "Run without multiple processes (e10s).", + }, + ], + [ + ["--enable-fission"], + { + "action": "store_true", + "dest": "enable_fission", + "default": False, + "help": "Enable Fission (site isolation) in Gecko.", + }, + ], + [ + ["--setpref"], + { + "action": "append", + "metavar": "PREF=VALUE", + "dest": "extra_prefs", + "default": [], + "help": "Set a browser preference. May be used multiple times.", + }, + ], + [ + ["--cold"], + { + "action": "store_true", + "dest": "cold", + "default": False, + "help": "Enable cold page-load for browsertime tp6", + }, + ], + [ + ["--verbose"], + { + "action": "store_true", + "dest": "verbose", + "default": False, + "help": "Verbose output", + }, + ], + ] + + testing_config_options + + copy.deepcopy(code_coverage_config_options) + + browsertime_options + ) def __init__(self, **kwargs): - kwargs.setdefault('config_options', self.config_options) - kwargs.setdefault('all_actions', ['clobber', - 'download-and-extract', - 'populate-webroot', - 'create-virtualenv', - 'install-chrome-android', - 'install-chromium-distribution', - 'install', - 'run-tests', - ]) - kwargs.setdefault('default_actions', ['clobber', - 'download-and-extract', - 'populate-webroot', - 'create-virtualenv', - 'install-chromium-distribution', - 'install', - 'run-tests', - ]) - kwargs.setdefault('config', {}) + kwargs.setdefault("config_options", self.config_options) + kwargs.setdefault( + "all_actions", + [ + "clobber", + "download-and-extract", + "populate-webroot", + "create-virtualenv", + "install-chrome-android", + "install-chromium-distribution", + "install", + "run-tests", + ], + ) + kwargs.setdefault( + "default_actions", + [ + "clobber", + "download-and-extract", + "populate-webroot", + "create-virtualenv", + "install-chromium-distribution", + "install", + "run-tests", + ], + ) + kwargs.setdefault("config", {}) super(Raptor, self).__init__(**kwargs) # Convenience - self.workdir = self.query_abs_dirs()['abs_work_dir'] + self.workdir = self.query_abs_dirs()["abs_work_dir"] - self.run_local = self.config.get('run_local') + self.run_local = self.config.get("run_local") # App (browser testing on) defaults to firefox self.app = "firefox" @@ -373,17 +534,21 @@ class Raptor(TestingMixin, MercurialScript, CodeCoverageMixin, AndroidMixin, Pyt # i.e. "--app=geckoview" or separate as "--app", "geckoview" so we have to # parse carefully. It's simplest to use `argparse` to parse partially. self.app = "firefox" - if 'raptor_cmd_line_args' in self.config: + if "raptor_cmd_line_args" in self.config: sub_parser = argparse.ArgumentParser() # It's not necessary to limit the allowed values: each value # will be parsed and verifed by raptor/raptor.py. - sub_parser.add_argument('--app', default=None, dest='app') - sub_parser.add_argument('-i', '--intent', default=None, dest='intent') - sub_parser.add_argument('-a', '--activity', default=None, dest='activity') + sub_parser.add_argument("--app", default=None, dest="app") + sub_parser.add_argument("-i", "--intent", default=None, dest="intent") + sub_parser.add_argument( + "-a", "--activity", default=None, dest="activity" + ) # We'd prefer to use `parse_known_intermixed_args`, but that's # new in Python 3.7. - known, unknown = sub_parser.parse_known_args(self.config['raptor_cmd_line_args']) + known, unknown = sub_parser.parse_known_args( + self.config["raptor_cmd_line_args"] + ) if known.app: self.app = known.app @@ -393,11 +558,11 @@ class Raptor(TestingMixin, MercurialScript, CodeCoverageMixin, AndroidMixin, Pyt self.activity = known.activity else: # Raptor initiated in production via mozharness - self.test = self.config['test'] + self.test = self.config["test"] self.app = self.config.get("app", "firefox") self.binary_path = self.config.get("binary_path", None) - if self.app in ('refbrow', 'fenix'): + if self.app in ("refbrow", "fenix"): self.app_name = self.binary_path self.installer_url = self.config.get("installer_url") @@ -407,26 +572,28 @@ class Raptor(TestingMixin, MercurialScript, CodeCoverageMixin, AndroidMixin, Pyt self.repo_path = self.config.get("repo_path") self.obj_path = self.config.get("obj_path") self.test = None - self.gecko_profile = self.config.get('gecko_profile') or \ - "--geckoProfile" in self.config.get("raptor_cmd_line_args", []) - self.gecko_profile_interval = self.config.get('gecko_profile_interval') - self.gecko_profile_entries = self.config.get('gecko_profile_entries') - self.test_packages_url = self.config.get('test_packages_url') - self.test_url_params = self.config.get('test_url_params') - self.host = self.config.get('host') - if self.host == 'HOST_IP': - self.host = os.environ['HOST_IP'] - self.power_test = self.config.get('power_test') - self.memory_test = self.config.get('memory_test') - self.cpu_test = self.config.get('cpu_test') - self.live_sites = self.config.get('live_sites') - self.chimera = self.config.get('chimera') - self.disable_perf_tuning = self.config.get('disable_perf_tuning') - self.conditioned_profile_scenario = self.config.get('conditioned_profile_scenario', - 'settled') - self.extra_prefs = self.config.get('extra_prefs') - self.is_release_build = self.config.get('is_release_build') - self.debug_mode = self.config.get('debug_mode', False) + self.gecko_profile = self.config.get( + "gecko_profile" + ) or "--geckoProfile" in self.config.get("raptor_cmd_line_args", []) + self.gecko_profile_interval = self.config.get("gecko_profile_interval") + self.gecko_profile_entries = self.config.get("gecko_profile_entries") + self.test_packages_url = self.config.get("test_packages_url") + self.test_url_params = self.config.get("test_url_params") + self.host = self.config.get("host") + if self.host == "HOST_IP": + self.host = os.environ["HOST_IP"] + self.power_test = self.config.get("power_test") + self.memory_test = self.config.get("memory_test") + self.cpu_test = self.config.get("cpu_test") + self.live_sites = self.config.get("live_sites") + self.chimera = self.config.get("chimera") + self.disable_perf_tuning = self.config.get("disable_perf_tuning") + self.conditioned_profile_scenario = self.config.get( + "conditioned_profile_scenario", "settled" + ) + self.extra_prefs = self.config.get("extra_prefs") + self.is_release_build = self.config.get("is_release_build") + self.debug_mode = self.config.get("debug_mode", False) self.chromium_dist_path = None self.firefox_android_browsers = ["fennec", "geckoview", "refbrow", "fenix"] self.android_browsers = self.firefox_android_browsers + ["chrome-m"] @@ -435,11 +602,15 @@ class Raptor(TestingMixin, MercurialScript, CodeCoverageMixin, AndroidMixin, Pyt for (arg,), details in Raptor.browsertime_options: # Allow overriding defaults on the `./mach raptor-test ...` command-line. - value = self.config.get(details['dest']) + value = self.config.get(details["dest"]) if value and arg not in self.config.get("raptor_cmd_line_args", []): - setattr(self, details['dest'], value) + setattr(self, details["dest"], value) - if not self.run_local and self.browsertime_visualmetrics and self.browsertime_video: + if ( + not self.run_local + and self.browsertime_visualmetrics + and self.browsertime_video + ): self.error("Cannot run visual metrics in the same CI task as the test.") # We accept some configuration options from the try commit message in the @@ -449,14 +620,14 @@ class Raptor(TestingMixin, MercurialScript, CodeCoverageMixin, AndroidMixin, Pyt gecko_results = [] # If gecko_profile is set, we add that to Raptor's options if self.gecko_profile: - gecko_results.append('--geckoProfile') + gecko_results.append("--geckoProfile") if self.gecko_profile_interval: gecko_results.extend( - ['--geckoProfileInterval', str(self.gecko_profile_interval)] + ["--geckoProfileInterval", str(self.gecko_profile_interval)] ) if self.gecko_profile_entries: gecko_results.extend( - ['--geckoProfileEntries', str(self.gecko_profile_entries)] + ["--geckoProfileEntries", str(self.gecko_profile_entries)] ) return gecko_results @@ -464,15 +635,18 @@ class Raptor(TestingMixin, MercurialScript, CodeCoverageMixin, AndroidMixin, Pyt if self.abs_dirs: return self.abs_dirs abs_dirs = super(Raptor, self).query_abs_dirs() - abs_dirs['abs_blob_upload_dir'] = os.path.join(abs_dirs['abs_work_dir'], - 'blobber_upload_dir') - abs_dirs['abs_test_install_dir'] = os.path.join(abs_dirs['abs_work_dir'], 'tests') + abs_dirs["abs_blob_upload_dir"] = os.path.join( + abs_dirs["abs_work_dir"], "blobber_upload_dir" + ) + abs_dirs["abs_test_install_dir"] = os.path.join( + abs_dirs["abs_work_dir"], "tests" + ) self.abs_dirs = abs_dirs return self.abs_dirs def install_chrome_android(self): - '''Install Google Chrome for Android in production from tooltool''' + """Install Google Chrome for Android in production from tooltool""" if self.app != "chrome-m": self.info("Google Chrome for Android not required") return @@ -492,15 +666,17 @@ class Raptor(TestingMixin, MercurialScript, CodeCoverageMixin, AndroidMixin, Pyt "raptor", "tooltool-manifests", "chrome-android", - "chrome85.manifest" + "chrome85.manifest", ), - output_dir=tmpdir + output_dir=tmpdir, ) # Find the downloaded APK files = os.listdir(tmpdir) if len(files) > 1: - raise Exception("Found more than one chrome APK file after tooltool download") + raise Exception( + "Found more than one chrome APK file after tooltool download" + ) chromeapk = os.path.join(tmpdir, files[0]) # Disable verification and install the APK @@ -514,16 +690,16 @@ class Raptor(TestingMixin, MercurialScript, CodeCoverageMixin, AndroidMixin, Pyt self.info("Google Chrome for Android successfully installed") def install_chromium_distribution(self): - '''Install Google Chromium distribution in production''' - linux, mac, win = 'linux', 'mac', 'win' - chrome, chromium = 'chrome', 'chromium' + """Install Google Chromium distribution in production""" + linux, mac, win = "linux", "mac", "win" + chrome, chromium = "chrome", "chromium" available_chromium_dists = [chrome, chromium] binary_location = { chromium: { - linux: ['chrome-linux', 'chrome'], - mac: ['chrome-mac', 'Chromium.app', 'Contents', 'MacOS', 'Chromium'], - win: ['chrome-win', 'Chrome.exe'] + linux: ["chrome-linux", "chrome"], + mac: ["chrome-mac", "Chromium.app", "Contents", "MacOS", "Chromium"], + win: ["chrome-win", "Chrome.exe"], }, } @@ -531,7 +707,7 @@ class Raptor(TestingMixin, MercurialScript, CodeCoverageMixin, AndroidMixin, Pyt self.info("Google Chrome or Chromium distributions are not required.") return - if self.app == 'chrome': + if self.app == "chrome": self.info("Chrome should be preinstalled.") if win in self.platform_name(): base_path = "C:\\%s\\Google\\Chrome\\Application\\chrome.exe" @@ -541,15 +717,20 @@ class Raptor(TestingMixin, MercurialScript, CodeCoverageMixin, AndroidMixin, Pyt elif linux in self.platform_name(): self.chromium_dist_path = "/usr/bin/google-chrome" elif mac in self.platform_name(): - self.chromium_dist_path = "/Applications/Google Chrome.app/" \ - "Contents/MacOS/Google Chrome" + self.chromium_dist_path = ( + "/Applications/Google Chrome.app/" "Contents/MacOS/Google Chrome" + ) else: self.error( - "Chrome is not installed on the platform %s yet." % self.platform_name() + "Chrome is not installed on the platform %s yet." + % self.platform_name() ) if os.path.exists(self.chromium_dist_path): - self.info("Google Chrome found in expected location %s" % self.chromium_dist_path) + self.info( + "Google Chrome found in expected location %s" + % self.chromium_dist_path + ) else: self.error("Cannot find Google Chrome at %s" % self.chromium_dist_path) @@ -562,27 +743,34 @@ class Raptor(TestingMixin, MercurialScript, CodeCoverageMixin, AndroidMixin, Pyt return self.info("Getting fetched %s build" % chromium_dist) - self.chromium_dist_dest = os.path.normpath(os.path.abspath(os.environ['MOZ_FETCHES_DIR'])) + self.chromium_dist_dest = os.path.normpath( + os.path.abspath(os.environ["MOZ_FETCHES_DIR"]) + ) if mac in self.platform_name(): - self.chromium_dist_path = os.path.join(self.chromium_dist_dest, - *binary_location[chromium_dist][mac]) + self.chromium_dist_path = os.path.join( + self.chromium_dist_dest, *binary_location[chromium_dist][mac] + ) elif linux in self.platform_name(): - self.chromium_dist_path = os.path.join(self.chromium_dist_dest, - *binary_location[chromium_dist][linux]) + self.chromium_dist_path = os.path.join( + self.chromium_dist_dest, *binary_location[chromium_dist][linux] + ) else: - self.chromium_dist_path = os.path.join(self.chromium_dist_dest, - *binary_location[chromium_dist][win]) + self.chromium_dist_path = os.path.join( + self.chromium_dist_dest, *binary_location[chromium_dist][win] + ) self.info("%s dest is: %s" % (chromium_dist, self.chromium_dist_dest)) self.info("%s path is: %s" % (chromium_dist, self.chromium_dist_path)) # Now ensure Chromium binary exists if os.path.exists(self.chromium_dist_path): - self.info("Successfully installed %s to: %s" - % (chromium_dist, self.chromium_dist_path)) + self.info( + "Successfully installed %s to: %s" + % (chromium_dist, self.chromium_dist_path) + ) else: self.info("Abort: failed to install %s" % chromium_dist) @@ -594,92 +782,99 @@ class Raptor(TestingMixin, MercurialScript, CodeCoverageMixin, AndroidMixin, Pyt # Get the APK location to be able to get the browser version # through mozversion if self.app in self.firefox_android_browsers and not self.run_local: - kw_options['installerpath'] = self.installer_path + kw_options["installerpath"] = self.installer_path # If testing on Firefox, the binary path already came from mozharness/pro; # otherwise the binary path is forwarded from command-line arg (raptor_cmd_line_args). - kw_options['app'] = self.app - if self.app == "firefox" or (self.app in self.firefox_android_browsers and - not self.run_local): - binary_path = self.binary_path or self.config.get('binary_path') + kw_options["app"] = self.app + if self.app == "firefox" or ( + self.app in self.firefox_android_browsers and not self.run_local + ): + binary_path = self.binary_path or self.config.get("binary_path") if not binary_path: self.fatal("Raptor requires a path to the binary.") - kw_options['binary'] = binary_path + kw_options["binary"] = binary_path if self.app in self.firefox_android_browsers: # In production ensure we have correct app name, # i.e. fennec_aurora or fennec_release etc. - kw_options['binary'] = self.query_package_name() - self.info("Set binary to %s instead of %s" % (kw_options['binary'], binary_path)) + kw_options["binary"] = self.query_package_name() + self.info( + "Set binary to %s instead of %s" + % (kw_options["binary"], binary_path) + ) else: # Running on Chromium if not self.run_local: # When running locally we already set the Chromium binary above, in init. # In production, we already installed Chromium, so set the binary path # to our install. - kw_options['binary'] = self.chromium_dist_path or "" + kw_options["binary"] = self.chromium_dist_path or "" # Options overwritten from **kw - if 'test' in self.config: - kw_options['test'] = self.config['test'] - if 'binary' in self.config: - kw_options['binary'] = self.config['binary'] + if "test" in self.config: + kw_options["test"] = self.config["test"] + if "binary" in self.config: + kw_options["binary"] = self.config["binary"] if self.symbols_path: - kw_options['symbolsPath'] = self.symbols_path - if self.config.get('obj_path', None) is not None: - kw_options['obj-path'] = self.config['obj_path'] + kw_options["symbolsPath"] = self.symbols_path + if self.config.get("obj_path", None) is not None: + kw_options["obj-path"] = self.config["obj_path"] if self.test_url_params: - kw_options['test-url-params'] = self.test_url_params - if self.config.get('device_name') is not None: - kw_options['device-name'] = self.config['device_name'] - if self.config.get('activity') is not None: - kw_options['activity'] = self.config['activity'] - if self.config.get('conditioned_profile_scenario') is not None: - kw_options['conditioned-profile-scenario'] = \ - self.config['conditioned_profile_scenario'] + kw_options["test-url-params"] = self.test_url_params + if self.config.get("device_name") is not None: + kw_options["device-name"] = self.config["device_name"] + if self.config.get("activity") is not None: + kw_options["activity"] = self.config["activity"] + if self.config.get("conditioned_profile_scenario") is not None: + kw_options["conditioned-profile-scenario"] = self.config[ + "conditioned_profile_scenario" + ] kw_options.update(kw) if self.host: - kw_options['host'] = self.host + kw_options["host"] = self.host # Configure profiling options options.extend(self.query_gecko_profile_options()) # Extra arguments if args is not None: options += args - if self.config.get('run_local', False): - options.extend(['--run-local']) - if 'raptor_cmd_line_args' in self.config: - options += self.config['raptor_cmd_line_args'] - if self.config.get('code_coverage', False): - options.extend(['--code-coverage']) - if self.config.get('is_release_build', False): - options.extend(['--is-release-build']) - if self.config.get('power_test', False): - options.extend(['--power-test']) - if self.config.get('memory_test', False): - options.extend(['--memory-test']) - if self.config.get('cpu_test', False): - options.extend(['--cpu-test']) - if self.config.get('live_sites', False): - options.extend(['--live-sites']) - if self.config.get('chimera', False): - options.extend(['--chimera']) - if self.config.get('disable_perf_tuning', False): - options.extend(['--disable-perf-tuning']) - if self.config.get('cold', False): - options.extend(['--cold']) - if self.config.get('enable_webrender', False): - options.extend(['--enable-webrender']) - if self.config.get('no_conditioned_profile', False): - options.extend(['--no-conditioned-profile']) - if self.config.get('enable_fission', False): - options.extend(['--enable-fission']) - if self.config.get('verbose', False): - options.extend(['--verbose']) - if self.config.get('extra_prefs'): - options.extend(['--setpref={}'.format(i) for i in self.config.get('extra_prefs')]) + if self.config.get("run_local", False): + options.extend(["--run-local"]) + if "raptor_cmd_line_args" in self.config: + options += self.config["raptor_cmd_line_args"] + if self.config.get("code_coverage", False): + options.extend(["--code-coverage"]) + if self.config.get("is_release_build", False): + options.extend(["--is-release-build"]) + if self.config.get("power_test", False): + options.extend(["--power-test"]) + if self.config.get("memory_test", False): + options.extend(["--memory-test"]) + if self.config.get("cpu_test", False): + options.extend(["--cpu-test"]) + if self.config.get("live_sites", False): + options.extend(["--live-sites"]) + if self.config.get("chimera", False): + options.extend(["--chimera"]) + if self.config.get("disable_perf_tuning", False): + options.extend(["--disable-perf-tuning"]) + if self.config.get("cold", False): + options.extend(["--cold"]) + if self.config.get("enable_webrender", False): + options.extend(["--enable-webrender"]) + if self.config.get("no_conditioned_profile", False): + options.extend(["--no-conditioned-profile"]) + if self.config.get("enable_fission", False): + options.extend(["--enable-fission"]) + if self.config.get("verbose", False): + options.extend(["--verbose"]) + if self.config.get("extra_prefs"): + options.extend( + ["--setpref={}".format(i) for i in self.config.get("extra_prefs")] + ) for (arg,), details in Raptor.browsertime_options: # Allow overriding defaults on the `./mach raptor-test ...` command-line - value = self.config.get(details['dest']) + value = self.config.get(details["dest"]) if value and arg not in self.config.get("raptor_cmd_line_args", []): if isinstance(value, string_types): options.extend([arg, os.path.expandvars(value)]) @@ -687,23 +882,23 @@ class Raptor(TestingMixin, MercurialScript, CodeCoverageMixin, AndroidMixin, Pyt options.extend([arg]) for key, value in kw_options.items(): - options.extend(['--%s' % key, value]) + options.extend(["--%s" % key, value]) return options def populate_webroot(self): """Populate the production test machines' webroots""" self.raptor_path = os.path.join( - self.query_abs_dirs()['abs_test_install_dir'], 'raptor' + self.query_abs_dirs()["abs_test_install_dir"], "raptor" ) - if self.config.get('run_local'): - self.raptor_path = os.path.join(self.repo_path, 'testing', 'raptor') + if self.config.get("run_local"): + self.raptor_path = os.path.join(self.repo_path, "testing", "raptor") def clobber(self): # Recreate the upload directory for storing the logcat collected # during APK installation. super(Raptor, self).clobber() - upload_dir = self.query_abs_dirs()['abs_blob_upload_dir'] + upload_dir = self.query_abs_dirs()["abs_blob_upload_dir"] if not os.path.isdir(upload_dir): self.mkdir_p(upload_dir) @@ -719,7 +914,7 @@ class Raptor(TestingMixin, MercurialScript, CodeCoverageMixin, AndroidMixin, Pyt def download_and_extract(self, extract_dirs=None, suite_categories=None): return super(Raptor, self).download_and_extract( - suite_categories=['common', 'condprof', 'raptor'] + suite_categories=["common", "condprof", "raptor"] ) def create_virtualenv(self, **kwargs): @@ -732,17 +927,17 @@ class Raptor(TestingMixin, MercurialScript, CodeCoverageMixin, AndroidMixin, Pyt if self.run_local and os.path.exists(_virtualenv_path): self.info("Virtualenv already exists, skipping creation") - _python_interp = self.config.get('exes')['python'] + _python_interp = self.config.get("exes")["python"] - if 'win' in self.platform_name(): - _path = os.path.join(_virtualenv_path, - 'Lib', - 'site-packages') + if "win" in self.platform_name(): + _path = os.path.join(_virtualenv_path, "Lib", "site-packages") else: - _path = os.path.join(_virtualenv_path, - 'lib', - os.path.basename(_python_interp), - 'site-packages') + _path = os.path.join( + _virtualenv_path, + "lib", + os.path.basename(_python_interp), + "site-packages", + ) sys.path.append(_path) return @@ -751,15 +946,15 @@ class Raptor(TestingMixin, MercurialScript, CodeCoverageMixin, AndroidMixin, Pyt # Install mozbase first, so we use in-tree versions if not self.run_local: mozbase_requirements = os.path.join( - self.query_abs_dirs()['abs_test_install_dir'], - 'config', - 'mozbase_requirements.txt' + self.query_abs_dirs()["abs_test_install_dir"], + "config", + "mozbase_requirements.txt", ) else: mozbase_requirements = os.path.join( os.path.dirname(self.raptor_path), - 'config', - 'mozbase_source_requirements.txt' + "config", + "mozbase_source_requirements.txt", ) self.register_virtualenv_module( requirements=[mozbase_requirements], @@ -767,27 +962,23 @@ class Raptor(TestingMixin, MercurialScript, CodeCoverageMixin, AndroidMixin, Pyt editable=True, ) - modules = ['pip>=1.5'] + modules = ["pip>=1.5"] if self.run_local: # Add modules required for visual metrics - modules.extend([ - 'numpy==1.16.1', - 'Pillow==6.1.0', - 'scipy==1.2.3', - 'pyssim==0.4' - ]) + modules.extend( + ["numpy==1.16.1", "Pillow==6.1.0", "scipy==1.2.3", "pyssim==0.4"] + ) # Require pip >= 1.5 so pip will prefer .whl files to install super(Raptor, self).create_virtualenv(modules=modules) # Install Raptor dependencies self.install_module( - requirements=[os.path.join(self.raptor_path, - 'requirements.txt')] + requirements=[os.path.join(self.raptor_path, "requirements.txt")] ) def install(self): - if not self.config.get('noinstall', False): + if not self.config.get("noinstall", False): if self.app in self.firefox_android_browsers: self.device.uninstall_app(self.binary_path) self.install_apk(self.installer_path) @@ -799,7 +990,7 @@ class Raptor(TestingMixin, MercurialScript, CodeCoverageMixin, AndroidMixin, Pyt # create upload dir if it doesn't already exist self.info("Creating dir: %s" % os.path.dirname(dest)) os.makedirs(os.path.dirname(dest)) - self.info('Copying raptor results from %s to %s' % (src, dest)) + self.info("Copying raptor results from %s to %s" % (src, dest)) try: copyfile(src, dest) except Exception as e: @@ -815,57 +1006,60 @@ class Raptor(TestingMixin, MercurialScript, CodeCoverageMixin, AndroidMixin, Pyt # Python version check python = self.query_python_path() self.run_command([python, "--version"]) - parser = RaptorOutputParser(config=self.config, log_obj=self.log_obj, - error_list=RaptorErrorList) + parser = RaptorOutputParser( + config=self.config, log_obj=self.log_obj, error_list=RaptorErrorList + ) env = {} - env['MOZ_UPLOAD_DIR'] = self.query_abs_dirs()['abs_blob_upload_dir'] + env["MOZ_UPLOAD_DIR"] = self.query_abs_dirs()["abs_blob_upload_dir"] if not self.run_local: - env['MINIDUMP_STACKWALK'] = self.query_minidump_stackwalk() - env['MINIDUMP_SAVE_PATH'] = self.query_abs_dirs()['abs_blob_upload_dir'] - env['RUST_BACKTRACE'] = 'full' - if not os.path.isdir(env['MOZ_UPLOAD_DIR']): - self.mkdir_p(env['MOZ_UPLOAD_DIR']) + env["MINIDUMP_STACKWALK"] = self.query_minidump_stackwalk() + env["MINIDUMP_SAVE_PATH"] = self.query_abs_dirs()["abs_blob_upload_dir"] + env["RUST_BACKTRACE"] = "full" + if not os.path.isdir(env["MOZ_UPLOAD_DIR"]): + self.mkdir_p(env["MOZ_UPLOAD_DIR"]) env = self.query_env(partial_env=env, log_level=INFO) # adjust PYTHONPATH to be able to use raptor as a python package - if 'PYTHONPATH' in env: - env['PYTHONPATH'] = self.raptor_path + os.pathsep + env['PYTHONPATH'] + if "PYTHONPATH" in env: + env["PYTHONPATH"] = self.raptor_path + os.pathsep + env["PYTHONPATH"] else: - env['PYTHONPATH'] = self.raptor_path + env["PYTHONPATH"] = self.raptor_path # mitmproxy needs path to mozharness when installing the cert, and tooltool - env['SCRIPTSPATH'] = scripts_path - env['EXTERNALTOOLSPATH'] = external_tools_path + env["SCRIPTSPATH"] = scripts_path + env["EXTERNALTOOLSPATH"] = external_tools_path # disable "GC poisoning" Bug# 1499043 - env['JSGC_DISABLE_POISONING'] = '1' + env["JSGC_DISABLE_POISONING"] = "1" # Needed to load unsigned Raptor WebExt on release builds if self.is_release_build: - env['MOZ_DISABLE_NONLOCAL_CONNECTIONS'] = '1' + env["MOZ_DISABLE_NONLOCAL_CONNECTIONS"] = "1" if self.repo_path is not None: - env['MOZ_DEVELOPER_REPO_DIR'] = self.repo_path + env["MOZ_DEVELOPER_REPO_DIR"] = self.repo_path if self.obj_path is not None: - env['MOZ_DEVELOPER_OBJ_DIR'] = self.obj_path + env["MOZ_DEVELOPER_OBJ_DIR"] = self.obj_path # Sets a timeout for how long Raptor should run without output - output_timeout = self.config.get('raptor_output_timeout', 3600) + output_timeout = self.config.get("raptor_output_timeout", 3600) # Run Raptor tests - run_tests = os.path.join(self.raptor_path, 'raptor', 'raptor.py') + run_tests = os.path.join(self.raptor_path, "raptor", "raptor.py") - mozlog_opts = ['--log-tbpl-level=debug'] - if not self.run_local and 'suite' in self.config: - fname_pattern = '%s_%%s.log' % self.config['test'] - mozlog_opts.append('--log-errorsummary=%s' - % os.path.join(env['MOZ_UPLOAD_DIR'], - fname_pattern % 'errorsummary')) - mozlog_opts.append('--log-raw=%s' - % os.path.join(env['MOZ_UPLOAD_DIR'], - fname_pattern % 'raw')) + mozlog_opts = ["--log-tbpl-level=debug"] + if not self.run_local and "suite" in self.config: + fname_pattern = "%s_%%s.log" % self.config["test"] + mozlog_opts.append( + "--log-errorsummary=%s" + % os.path.join(env["MOZ_UPLOAD_DIR"], fname_pattern % "errorsummary") + ) + mozlog_opts.append( + "--log-raw=%s" + % os.path.join(env["MOZ_UPLOAD_DIR"], fname_pattern % "raw") + ) def launch_in_debug_mode(cmdline): cmdline = set(cmdline) - debug_opts = {'--debug', '--debugger', '--debugger_args'} + debug_opts = {"--debug", "--debugger", "--debugger_args"} return bool(debug_opts.intersection(cmdline)) @@ -877,10 +1071,13 @@ class Raptor(TestingMixin, MercurialScript, CodeCoverageMixin, AndroidMixin, Pyt raptor_process = subprocess.Popen(command, cwd=self.workdir, env=env) raptor_process.wait() else: - self.return_code = self.run_command(command, cwd=self.workdir, - output_timeout=output_timeout, - output_parser=parser, - env=env) + self.return_code = self.run_command( + command, + cwd=self.workdir, + output_timeout=output_timeout, + output_parser=parser, + env=env, + ) if self.app in self.android_browsers: self.logcat_stop() @@ -894,34 +1091,37 @@ class Raptor(TestingMixin, MercurialScript, CodeCoverageMixin, AndroidMixin, Pyt # Copy results to upload dir so they are included as an artifact self.info("Copying Raptor results to upload dir:") - src = os.path.join(self.query_abs_dirs()['abs_work_dir'], 'raptor.json') - dest = os.path.join(env['MOZ_UPLOAD_DIR'], 'perfherder-data.json') + src = os.path.join(self.query_abs_dirs()["abs_work_dir"], "raptor.json") + dest = os.path.join(env["MOZ_UPLOAD_DIR"], "perfherder-data.json") self.info(str(dest)) self._artifact_perf_data(src, dest) # Make individual perfherder data JSON's for each supporting data type - for file in glob.glob(os.path.join(self.query_abs_dirs()['abs_work_dir'], '*')): + for file in glob.glob( + os.path.join(self.query_abs_dirs()["abs_work_dir"], "*") + ): path, filename = os.path.split(file) - if not filename.startswith('raptor-'): + if not filename.startswith("raptor-"): continue # filename is expected to contain a unique data name # i.e. raptor-os-baseline-power.json would result in # the data name os-baseline-power - data_name = '-'.join(filename.split('-')[1:]) - data_name = '.'.join(data_name.split('.')[:-1]) + data_name = "-".join(filename.split("-")[1:]) + data_name = ".".join(data_name.split(".")[:-1]) src = file dest = os.path.join( - env['MOZ_UPLOAD_DIR'], - 'perfherder-data-%s.json' % data_name + env["MOZ_UPLOAD_DIR"], "perfherder-data-%s.json" % data_name ) self._artifact_perf_data(src, dest) - src = os.path.join(self.query_abs_dirs()['abs_work_dir'], 'screenshots.html') + src = os.path.join( + self.query_abs_dirs()["abs_work_dir"], "screenshots.html" + ) if os.path.exists(src): - dest = os.path.join(env['MOZ_UPLOAD_DIR'], 'screenshots.html') + dest = os.path.join(env["MOZ_UPLOAD_DIR"], "screenshots.html") self.info(str(dest)) self._artifact_perf_data(src, dest) @@ -930,14 +1130,18 @@ class Raptor(TestingMixin, MercurialScript, CodeCoverageMixin, AndroidMixin, Pyt # in TBPL_RETRY cause a retry rather than simply reporting an error. if parser.tbpl_status != TBPL_SUCCESS: parser_status = EXIT_STATUS_DICT[parser.tbpl_status] - self.info('return code %s changed to %s due to log output' % - (str(self.return_code), str(parser_status))) + self.info( + "return code %s changed to %s due to log output" + % (str(self.return_code), str(parser_status)) + ) self.return_code = parser_status class RaptorOutputParser(OutputParser): - minidump_regex = re.compile(r'''raptorError: "error executing: '(\S+) (\S+) (\S+)'"''') - RE_PERF_DATA = re.compile(r'.*PERFHERDER_DATA:\s+(\{.*\})') + minidump_regex = re.compile( + r'''raptorError: "error executing: '(\S+) (\S+) (\S+)'"''' + ) + RE_PERF_DATA = re.compile(r".*PERFHERDER_DATA:\s+(\{.*\})") def __init__(self, **kwargs): super(RaptorOutputParser, self).__init__(**kwargs) @@ -945,7 +1149,7 @@ class RaptorOutputParser(OutputParser): self.found_perf_data = [] self.tbpl_status = TBPL_SUCCESS self.worst_log_level = INFO - self.harness_retry_re = TinderBoxPrintRe['harness_error']['retry_regex'] + self.harness_retry_re = TinderBoxPrintRe["harness_error"]["retry_regex"] def parse_single_line(self, line): m = self.minidump_regex.search(line) @@ -957,9 +1161,10 @@ class RaptorOutputParser(OutputParser): self.found_perf_data.append(m.group(1)) if self.harness_retry_re.search(line): - self.critical(' %s' % line) + self.critical(" %s" % line) self.worst_log_level = self.worst_level(CRITICAL, self.worst_log_level) - self.tbpl_status = self.worst_level(TBPL_RETRY, self.tbpl_status, - levels=TBPL_WORST_LEVEL_TUPLE) + self.tbpl_status = self.worst_level( + TBPL_RETRY, self.tbpl_status, levels=TBPL_WORST_LEVEL_TUPLE + ) return # skip base parse_single_line super(RaptorOutputParser, self).parse_single_line(line) diff --git a/testing/mozharness/mozharness/mozilla/testing/testbase.py b/testing/mozharness/mozharness/mozilla/testing/testbase.py index 256d754565a5..7adeb3d087d5 100755 --- a/testing/mozharness/mozharness/mozilla/testing/testbase.py +++ b/testing/mozharness/mozharness/mozilla/testing/testbase.py @@ -24,86 +24,127 @@ from mozharness.mozilla.automation import AutomationMixin, TBPL_WARNING from mozharness.mozilla.structuredlog import StructuredOutputParser from mozharness.mozilla.testing.unittest import DesktopUnittestOutputParser from mozharness.mozilla.testing.try_tools import TryToolsMixin, try_config_options -from mozharness.mozilla.testing.verify_tools import VerifyToolsMixin, verify_config_options +from mozharness.mozilla.testing.verify_tools import ( + VerifyToolsMixin, + verify_config_options, +) from mozharness.mozilla.tooltool import TooltoolMixin from mozharness.lib.python.authentication import get_credentials -INSTALLER_SUFFIXES = ('.apk', # Android - '.tar.bz2', '.tar.gz', # Linux - '.dmg', # Mac - '.installer-stub.exe', '.installer.exe', '.exe', '.zip', # Windows - ) +INSTALLER_SUFFIXES = ( + ".apk", # Android + ".tar.bz2", + ".tar.gz", # Linux + ".dmg", # Mac + ".installer-stub.exe", + ".installer.exe", + ".exe", + ".zip", # Windows +) # https://searchfox.org/mozilla-central/source/testing/config/tooltool-manifests TOOLTOOL_PLATFORM_DIR = { - 'linux': 'linux32', - 'linux64': 'linux64', - 'win32': 'win32', - 'win64': 'win32', - 'macosx': 'macosx64', + "linux": "linux32", + "linux64": "linux64", + "win32": "win32", + "win64": "win32", + "macosx": "macosx64", } -testing_config_options = [ - [["--installer-url"], - {"action": "store", - "dest": "installer_url", - "default": None, - "help": "URL to the installer to install", - }], - [["--installer-path"], - {"action": "store", - "dest": "installer_path", - "default": None, - "help": "Path to the installer to install. " - "This is set automatically if run with --download-and-extract.", - }], - [["--binary-path"], - {"action": "store", - "dest": "binary_path", - "default": None, - "help": "Path to installed binary. This is set automatically if run with --install.", - }], - [["--exe-suffix"], - {"action": "store", - "dest": "exe_suffix", - "default": None, - "help": "Executable suffix for binaries on this platform", - }], - [["--test-url"], - {"action": "store", - "dest": "test_url", - "default": None, - "help": "URL to the zip file containing the actual tests", - }], - [["--test-packages-url"], - {"action": "store", - "dest": "test_packages_url", - "default": None, - "help": "URL to a json file describing which tests archives to download", - }], - [["--jsshell-url"], - {"action": "store", - "dest": "jsshell_url", - "default": None, - "help": "URL to the jsshell to install", - }], - [["--download-symbols"], - {"action": "store", - "dest": "download_symbols", - "type": "choice", - "choices": ['ondemand', 'true'], - "help": "Download and extract crash reporter symbols.", - }], -] + copy.deepcopy(virtualenv_config_options) \ - + copy.deepcopy(try_config_options) \ - + copy.deepcopy(verify_config_options) +testing_config_options = ( + [ + [ + ["--installer-url"], + { + "action": "store", + "dest": "installer_url", + "default": None, + "help": "URL to the installer to install", + }, + ], + [ + ["--installer-path"], + { + "action": "store", + "dest": "installer_path", + "default": None, + "help": "Path to the installer to install. " + "This is set automatically if run with --download-and-extract.", + }, + ], + [ + ["--binary-path"], + { + "action": "store", + "dest": "binary_path", + "default": None, + "help": "Path to installed binary. This is set automatically if run with --install.", # NOQA: E501 + }, + ], + [ + ["--exe-suffix"], + { + "action": "store", + "dest": "exe_suffix", + "default": None, + "help": "Executable suffix for binaries on this platform", + }, + ], + [ + ["--test-url"], + { + "action": "store", + "dest": "test_url", + "default": None, + "help": "URL to the zip file containing the actual tests", + }, + ], + [ + ["--test-packages-url"], + { + "action": "store", + "dest": "test_packages_url", + "default": None, + "help": "URL to a json file describing which tests archives to download", + }, + ], + [ + ["--jsshell-url"], + { + "action": "store", + "dest": "jsshell_url", + "default": None, + "help": "URL to the jsshell to install", + }, + ], + [ + ["--download-symbols"], + { + "action": "store", + "dest": "download_symbols", + "type": "choice", + "choices": ["ondemand", "true"], + "help": "Download and extract crash reporter symbols.", + }, + ], + ] + + copy.deepcopy(virtualenv_config_options) + + copy.deepcopy(try_config_options) + + copy.deepcopy(verify_config_options) +) # TestingMixin {{{1 -class TestingMixin(VirtualenvMixin, AutomationMixin, ResourceMonitoringMixin, - TooltoolMixin, TryToolsMixin, VerifyToolsMixin): +class TestingMixin( + VirtualenvMixin, + AutomationMixin, + ResourceMonitoringMixin, + TooltoolMixin, + TryToolsMixin, + VerifyToolsMixin, +): """ The steps to identify + download the proper bits for [browser] unit tests and Talos. @@ -130,14 +171,16 @@ class TestingMixin(VirtualenvMixin, AutomationMixin, ResourceMonitoringMixin, elif self.installer_url: reference_url = self.installer_url else: - self.fatal("Can't figure out build directory urls without an installer_url " - "or test_packages_url!") + self.fatal( + "Can't figure out build directory urls without an installer_url " + "or test_packages_url!" + ) reference_url = urllib.parse.unquote(reference_url) parts = list(urlparse(reference_url)) - last_slash = parts[2].rfind('/') - parts[2] = '/'.join([parts[2][:last_slash], file_name]) + last_slash = parts[2].rfind("/") + parts[2] = "/".join([parts[2][:last_slash], file_name]) url = ParseResult(*parts).geturl() @@ -148,19 +191,21 @@ class TestingMixin(VirtualenvMixin, AutomationMixin, ResourceMonitoringMixin, in the build upload directory where that file can be found. """ if self.test_packages_url: - reference_suffixes = ['.test_packages.json'] + reference_suffixes = [".test_packages.json"] reference_url = self.test_packages_url elif self.installer_url: reference_suffixes = INSTALLER_SUFFIXES reference_url = self.installer_url else: - self.fatal("Can't figure out build directory urls without an installer_url " - "or test_packages_url!") + self.fatal( + "Can't figure out build directory urls without an installer_url " + "or test_packages_url!" + ) url = None for reference_suffix in reference_suffixes: if reference_url.endswith(reference_suffix): - url = reference_url[:-len(reference_suffix)] + suffix + url = reference_url[: -len(reference_suffix)] + suffix break return url @@ -170,7 +215,9 @@ class TestingMixin(VirtualenvMixin, AutomationMixin, ResourceMonitoringMixin, return self.symbols_url elif self.installer_url: - symbols_url = self.query_prefixed_build_dir_url('.crashreporter-symbols.zip') + symbols_url = self.query_prefixed_build_dir_url( + ".crashreporter-symbols.zip" + ) # Check if the URL exists. If not, use none to allow mozcrash to auto-check for symbols try: @@ -178,33 +225,39 @@ class TestingMixin(VirtualenvMixin, AutomationMixin, ResourceMonitoringMixin, self._urlopen(symbols_url, timeout=120) self.symbols_url = symbols_url except Exception as ex: - self.warning("Cannot open symbols url %s (installer url: %s): %s" % - (symbols_url, self.installer_url, ex)) + self.warning( + "Cannot open symbols url %s (installer url: %s): %s" + % (symbols_url, self.installer_url, ex) + ) if raise_on_failure: raise # If no symbols URL can be determined let minidump_stackwalk query the symbols. # As of now this only works for Nightly and release builds. if not self.symbols_url: - self.warning("No symbols_url found. Let minidump_stackwalk query for symbols.") + self.warning( + "No symbols_url found. Let minidump_stackwalk query for symbols." + ) return self.symbols_url def _pre_config_lock(self, rw_config): - for i, (target_file, target_dict) in enumerate(rw_config.all_cfg_files_and_dicts): - if 'developer_config' in target_file: + for i, (target_file, target_dict) in enumerate( + rw_config.all_cfg_files_and_dicts + ): + if "developer_config" in target_file: self._developer_mode_changes(rw_config) def _developer_mode_changes(self, rw_config): - """ This function is called when you append the config called - developer_config.py. This allows you to run a job - outside of the Release Engineering infrastructure. + """This function is called when you append the config called + developer_config.py. This allows you to run a job + outside of the Release Engineering infrastructure. - What this functions accomplishes is: - * --installer-url is set - * --test-url is set if needed - * every url is substituted by another external to the - Release Engineering network + What this functions accomplishes is: + * --installer-url is set + * --test-url is set if needed + * every url is substituted by another external to the + Release Engineering network """ c = self.config orig_config = copy.deepcopy(c) @@ -221,15 +274,19 @@ class TestingMixin(VirtualenvMixin, AutomationMixin, ResourceMonitoringMixin, if c.get("installer_url") is None: self.exception("You must use --installer-url with developer_config.py") if c.get("require_test_zip"): - if not c.get('test_url') and not c.get('test_packages_url'): - self.exception("You must use --test-url or --test-packages-url with " - "developer_config.py") + if not c.get("test_url") and not c.get("test_packages_url"): + self.exception( + "You must use --test-url or --test-packages-url with " + "developer_config.py" + ) c["installer_url"] = _replace_url(c["installer_url"], c["replace_urls"]) if c.get("test_url"): c["test_url"] = _replace_url(c["test_url"], c["replace_urls"]) if c.get("test_packages_url"): - c["test_packages_url"] = _replace_url(c["test_packages_url"], c["replace_urls"]) + c["test_packages_url"] = _replace_url( + c["test_packages_url"], c["replace_urls"] + ) for key, value in self.config.items(): if type(value) == str and value.startswith("http"): @@ -240,17 +297,19 @@ class TestingMixin(VirtualenvMixin, AutomationMixin, ResourceMonitoringMixin, get_credentials() def _urlopen(self, url, **kwargs): - ''' + """ This function helps dealing with downloading files while outside of the releng network. - ''' + """ # Code based on http://code.activestate.com/recipes/305288-http-basic-authentication def _urlopen_basic_auth(url, **kwargs): self.info("We want to download this file %s" % url) if not hasattr(self, "https_username"): - self.info("NOTICE: Files downloaded from outside of " - "Release Engineering network require LDAP " - "credentials.") + self.info( + "NOTICE: Files downloaded from outside of " + "Release Engineering network require LDAP " + "credentials." + ) self.https_username, self.https_password = get_credentials() # This creates a password manager @@ -268,7 +327,9 @@ class TestingMixin(VirtualenvMixin, AutomationMixin, ResourceMonitoringMixin, return _urlopen_basic_auth(url, **kwargs) else: # windows certificates need to be refreshed (https://bugs.python.org/issue36011) - if self.platform_name() in ('win64',) and platform.architecture()[0] in ('x64',): + if self.platform_name() in ("win64",) and platform.architecture()[0] in ( + "x64", + ): if self.ssl_context is None: self.ssl_context = ssl.SSLContext(ssl.PROTOCOL_TLS) self.ssl_context.load_default_certs() @@ -287,9 +348,11 @@ class TestingMixin(VirtualenvMixin, AutomationMixin, ResourceMonitoringMixin, You can set this by specifying --installer-url URL """ - if (self.config.get("require_test_zip") and - not self.test_url and - not self.test_packages_url): + if ( + self.config.get("require_test_zip") + and not self.test_url + and not self.test_packages_url + ): message += """test_url isn't set! You can set this by specifying --test-url URL @@ -299,17 +362,18 @@ You can set this by specifying --test-url URL def _read_packages_manifest(self): dirs = self.query_abs_dirs() - source = self.download_file(self.test_packages_url, - parent_dir=dirs['abs_work_dir'], - error_level=FATAL) + source = self.download_file( + self.test_packages_url, parent_dir=dirs["abs_work_dir"], error_level=FATAL + ) with self.opened(os.path.realpath(source)) as (fh, err): package_requirements = json.load(fh) if not package_requirements or err: - self.fatal("There was an error reading test package requirements from %s " - "requirements: `%s` - error: `%s`" % (source, - package_requirements or 'None', - err or 'No error')) + self.fatal( + "There was an error reading test package requirements from %s " + "requirements: `%s` - error: `%s`" + % (source, package_requirements or "None", err or "No error") + ) return package_requirements def _download_test_packages(self, suite_categories, extract_dirs): @@ -317,29 +381,30 @@ You can set this by specifying --test-url URL # This is a difference in the convention of the configs more than # to how these tests are run, so we pave over these differences here. aliases = { - 'mochitest-chrome': 'mochitest', - 'mochitest-media': 'mochitest', - 'mochitest-plain': 'mochitest', - 'mochitest-plain-gpu': 'mochitest', - 'mochitest-webgl1-core': 'mochitest', - 'mochitest-webgl1-ext': 'mochitest', - 'mochitest-webgl2-core': 'mochitest', - 'mochitest-webgl2-ext': 'mochitest', - 'mochitest-webgl2-deqp': 'mochitest', - 'mochitest-webgpu': 'mochitest', - 'geckoview': 'mochitest', - 'geckoview-junit': 'mochitest', - 'reftest-qr': 'reftest', - 'crashtest': 'reftest', - 'crashtest-qr': 'reftest', - 'reftest-debug': 'reftest', - 'crashtest-debug': 'reftest', + "mochitest-chrome": "mochitest", + "mochitest-media": "mochitest", + "mochitest-plain": "mochitest", + "mochitest-plain-gpu": "mochitest", + "mochitest-webgl1-core": "mochitest", + "mochitest-webgl1-ext": "mochitest", + "mochitest-webgl2-core": "mochitest", + "mochitest-webgl2-ext": "mochitest", + "mochitest-webgl2-deqp": "mochitest", + "mochitest-webgpu": "mochitest", + "geckoview": "mochitest", + "geckoview-junit": "mochitest", + "reftest-qr": "reftest", + "crashtest": "reftest", + "crashtest-qr": "reftest", + "reftest-debug": "reftest", + "crashtest-debug": "reftest", } suite_categories = [aliases.get(name, name) for name in suite_categories] dirs = self.query_abs_dirs() - test_install_dir = dirs.get('abs_test_install_dir', - os.path.join(dirs['abs_work_dir'], 'tests')) + test_install_dir = dirs.get( + "abs_test_install_dir", os.path.join(dirs["abs_work_dir"], "tests") + ) self.mkdir_p(test_install_dir) package_requirements = self._read_packages_manifest() target_packages = [] @@ -360,83 +425,100 @@ You can set this by specifying --test-url URL else: # If we don't harness specific requirements, assume the common zip # has everything we need to run tests for this suite. - target_packages.extend(package_requirements['common']) + target_packages.extend(package_requirements["common"]) # eliminate duplicates -- no need to download anything twice target_packages = list(set(target_packages)) - self.info("Downloading packages: %s for test suite categories: %s" % - (target_packages, suite_categories)) + self.info( + "Downloading packages: %s for test suite categories: %s" + % (target_packages, suite_categories) + ) for file_name in target_packages: target_dir = test_install_dir unpack_dirs = extract_dirs if "common.tests" in file_name and isinstance(unpack_dirs, list): # Ensure that the following files are always getting extracted - required_files = ["mach", - "mozinfo.json", - ] + required_files = [ + "mach", + "mozinfo.json", + ] for req_file in required_files: if req_file not in unpack_dirs: - self.info("Adding '{}' for extraction from common.tests archive" - .format(req_file)) + self.info( + "Adding '{}' for extraction from common.tests archive".format( + req_file + ) + ) unpack_dirs.append(req_file) if "jsshell-" in file_name or file_name == "target.jsshell.zip": self.info("Special-casing the jsshell zip file") unpack_dirs = None - target_dir = dirs['abs_test_bin_dir'] + target_dir = dirs["abs_test_bin_dir"] if "web-platform" in file_name: self.info("Extracting everything from web-platform archive") unpack_dirs = None url = self.query_build_dir_url(file_name) - self.download_unpack(url, target_dir, - extract_dirs=unpack_dirs) + self.download_unpack(url, target_dir, extract_dirs=unpack_dirs) def _download_test_zip(self, extract_dirs=None): dirs = self.query_abs_dirs() - test_install_dir = dirs.get('abs_test_install_dir', - os.path.join(dirs['abs_work_dir'], 'tests')) - self.download_unpack(self.test_url, test_install_dir, - extract_dirs=extract_dirs) + test_install_dir = dirs.get( + "abs_test_install_dir", os.path.join(dirs["abs_work_dir"], "tests") + ) + self.download_unpack(self.test_url, test_install_dir, extract_dirs=extract_dirs) def structured_output(self, suite_category): """Defines whether structured logging is in use in this configuration. This may need to be replaced with data from a different config at the resolution of bug 1070041 and related bugs. """ - return ('structured_suites' in self.config and - suite_category in self.config['structured_suites']) + return ( + "structured_suites" in self.config + and suite_category in self.config["structured_suites"] + ) - def get_test_output_parser(self, suite_category, strict=False, - fallback_parser_class=DesktopUnittestOutputParser, - **kwargs): + def get_test_output_parser( + self, + suite_category, + strict=False, + fallback_parser_class=DesktopUnittestOutputParser, + **kwargs + ): """Derive and return an appropriate output parser, either the structured output parser or a fallback based on the type of logging in use as determined by configuration. """ if not self.structured_output(suite_category): if fallback_parser_class is DesktopUnittestOutputParser: - return DesktopUnittestOutputParser(suite_category=suite_category, **kwargs) + return DesktopUnittestOutputParser( + suite_category=suite_category, **kwargs + ) return fallback_parser_class(**kwargs) self.info("Structured output parser in use for %s." % suite_category) - return StructuredOutputParser(suite_category=suite_category, strict=strict, **kwargs) + return StructuredOutputParser( + suite_category=suite_category, strict=strict, **kwargs + ) def _download_installer(self): file_name = None if self.installer_path: file_name = self.installer_path dirs = self.query_abs_dirs() - source = self.download_file(self.installer_url, - file_name=file_name, - parent_dir=dirs['abs_work_dir'], - error_level=FATAL) + source = self.download_file( + self.installer_url, + file_name=file_name, + parent_dir=dirs["abs_work_dir"], + error_level=FATAL, + ) self.installer_path = os.path.realpath(source) def _download_and_extract_symbols(self): dirs = self.query_abs_dirs() - if self.config.get('download_symbols') == 'ondemand': + if self.config.get("download_symbols") == "ondemand": self.symbols_url = self.query_symbols_url() self.symbols_path = self.symbols_url return @@ -447,13 +529,13 @@ You can set this by specifying --test-url URL # before being unable to proceed (e.g. debug tests need symbols) self.symbols_url = self.retry( action=self.query_symbols_url, - kwargs={'raise_on_failure': True}, + kwargs={"raise_on_failure": True}, sleeptime=20, error_level=FATAL, error_message="We can't proceed without downloading symbols.", ) if not self.symbols_path: - self.symbols_path = os.path.join(dirs['abs_work_dir'], 'symbols') + self.symbols_path = os.path.join(dirs["abs_work_dir"], "symbols") if self.symbols_url: self.download_unpack(self.symbols_url, self.symbols_path) @@ -466,20 +548,22 @@ You can set this by specifying --test-url URL # See bug 957502 and friends from_ = "http://ftp.mozilla.org" to_ = "https://ftp-ssl.mozilla.org" - for attr in 'symbols_url', 'installer_url', 'test_packages_url', 'test_url': + for attr in "symbols_url", "installer_url", "test_packages_url", "test_url": url = getattr(self, attr) if url and url.startswith(from_): new_url = url.replace(from_, to_) self.info("Replacing url %s -> %s" % (url, new_url)) setattr(self, attr, new_url) - if 'test_url' in self.config: + if "test_url" in self.config: # A user has specified a test_url directly, any test_packages_url will # be ignored. if self.test_packages_url: - self.error('Test data will be downloaded from "%s", the specified test ' - ' package data at "%s" will be ignored.' % - (self.config.get('test_url'), self.test_packages_url)) + self.error( + 'Test data will be downloaded from "%s", the specified test ' + ' package data at "%s" will be ignored.' + % (self.config.get("test_url"), self.test_packages_url) + ) self._download_test_zip(extract_dirs) else: @@ -488,77 +572,84 @@ You can set this by specifying --test-url URL # where the packages manifest is located. This is the case when the # test package manifest isn't set as a property, which is true # for some self-serve jobs and platforms using parse_make_upload. - self.test_packages_url = self.query_prefixed_build_dir_url('.test_packages.json') + self.test_packages_url = self.query_prefixed_build_dir_url( + ".test_packages.json" + ) - suite_categories = suite_categories or ['common'] + suite_categories = suite_categories or ["common"] self._download_test_packages(suite_categories, extract_dirs) self._download_installer() - if self.config.get('download_symbols'): + if self.config.get("download_symbols"): self._download_and_extract_symbols() # create_virtualenv is in VirtualenvMixin. def preflight_install(self): if not self.installer_path: - if self.config.get('installer_path'): - self.installer_path = self.config['installer_path'] + if self.config.get("installer_path"): + self.installer_path = self.config["installer_path"] else: - self.fatal("""installer_path isn't set! + self.fatal( + """installer_path isn't set! You can set this by: 1. specifying --installer-path PATH, or 2. running the download-and-extract action -""") +""" + ) if not self.is_python_package_installed("mozInstall"): - self.fatal("""Can't call install() without mozinstall! -Did you run with --create-virtualenv? Is mozinstall in virtualenv_modules?""") + self.fatal( + """Can't call install() without mozinstall! +Did you run with --create-virtualenv? Is mozinstall in virtualenv_modules?""" + ) def install_app(self, app=None, target_dir=None, installer_path=None): """ Dependent on mozinstall """ # install the application cmd = [self.query_python_path("mozinstall")] if app: - cmd.extend(['--app', app]) + cmd.extend(["--app", app]) # Remove the below when we no longer need to support mozinstall 0.3 self.info("Detecting whether we're running mozinstall >=1.0...") - output = self.get_output_from_command(cmd + ['-h']) - if '--source' in output: - cmd.append('--source') + output = self.get_output_from_command(cmd + ["-h"]) + if "--source" in output: + cmd.append("--source") # End remove dirs = self.query_abs_dirs() if not target_dir: - target_dir = dirs.get('abs_app_install_dir', - os.path.join(dirs['abs_work_dir'], - 'application')) + target_dir = dirs.get( + "abs_app_install_dir", os.path.join(dirs["abs_work_dir"], "application") + ) self.mkdir_p(target_dir) if not installer_path: installer_path = self.installer_path - cmd.extend([installer_path, - '--destination', target_dir]) + cmd.extend([installer_path, "--destination", target_dir]) # TODO we'll need some error checking here - return self.get_output_from_command(cmd, halt_on_failure=True, - fatal_exit_code=3) + return self.get_output_from_command( + cmd, halt_on_failure=True, fatal_exit_code=3 + ) def install(self): - self.binary_path = self.install_app(app=self.config.get('application')) + self.binary_path = self.install_app(app=self.config.get("application")) def uninstall_app(self, install_dir=None): """ Dependent on mozinstall """ # uninstall the application - cmd = self.query_exe("mozuninstall", - default=self.query_python_path("mozuninstall"), - return_type="list") + cmd = self.query_exe( + "mozuninstall", + default=self.query_python_path("mozuninstall"), + return_type="list", + ) dirs = self.query_abs_dirs() if not install_dir: - install_dir = dirs.get('abs_app_install_dir', - os.path.join(dirs['abs_work_dir'], - 'application')) + install_dir = dirs.get( + "abs_app_install_dir", os.path.join(dirs["abs_work_dir"], "application") + ) cmd.append(install_dir) # TODO we'll need some error checking here - self.get_output_from_command(cmd, halt_on_failure=True, - fatal_exit_code=3) + self.get_output_from_command(cmd, halt_on_failure=True, fatal_exit_code=3) def uninstall(self): self.uninstall_app() @@ -569,14 +660,15 @@ Did you run with --create-virtualenv? Is mozinstall in virtualenv_modules?""") minidump_stackwalk_path = None - if 'MOZ_FETCHES_DIR' in os.environ: + if "MOZ_FETCHES_DIR" in os.environ: minidump_stackwalk_path = os.path.join( - os.environ['MOZ_FETCHES_DIR'], - 'minidump_stackwalk', - 'minidump_stackwalk') + os.environ["MOZ_FETCHES_DIR"], + "minidump_stackwalk", + "minidump_stackwalk", + ) - if self.platform_name() in ('win32', 'win64'): - minidump_stackwalk_path += '.exe' + if self.platform_name() in ("win32", "win64"): + minidump_stackwalk_path += ".exe" if not minidump_stackwalk_path or not os.path.isfile(minidump_stackwalk_path): self.error("minidump_stackwalk path was not fetched?") @@ -626,40 +718,46 @@ Did you run with --create-virtualenv? Is mozinstall in virtualenv_modules?""") # platforms like mac as excutable files may be universal # files containing multiple architectures # NOTE 'enabled' is only here while we have unconsolidated configs - if not suite['enabled']: + if not suite["enabled"]: continue - if suite.get('architectures'): + if suite.get("architectures"): arch = platform.architecture()[0] - if arch not in suite['architectures']: + if arch not in suite["architectures"]: continue - cmd = suite['cmd'] - name = suite['name'] - self.info("Running pre test command %(name)s with '%(cmd)s'" - % {'name': name, 'cmd': ' '.join(cmd)}) - self.run_command(cmd, - cwd=dirs['abs_work_dir'], - error_list=BaseErrorList, - halt_on_failure=suite['halt_on_failure'], - fatal_exit_code=suite.get('fatal_exit_code', 3)) + cmd = suite["cmd"] + name = suite["name"] + self.info( + "Running pre test command %(name)s with '%(cmd)s'" + % {"name": name, "cmd": " ".join(cmd)} + ) + self.run_command( + cmd, + cwd=dirs["abs_work_dir"], + error_list=BaseErrorList, + halt_on_failure=suite["halt_on_failure"], + fatal_exit_code=suite.get("fatal_exit_code", 3), + ) def preflight_run_tests(self): """preflight commands for all tests""" c = self.config - if c.get('run_cmd_checks_enabled'): - self._run_cmd_checks(c.get('preflight_run_cmd_suites', [])) - elif c.get('preflight_run_cmd_suites'): - self.warning("Proceeding without running prerun test commands." - " These are often OS specific and disabling them may" - " result in spurious test results!") + if c.get("run_cmd_checks_enabled"): + self._run_cmd_checks(c.get("preflight_run_cmd_suites", [])) + elif c.get("preflight_run_cmd_suites"): + self.warning( + "Proceeding without running prerun test commands." + " These are often OS specific and disabling them may" + " result in spurious test results!" + ) def postflight_run_tests(self): """preflight commands for all tests""" c = self.config - if c.get('run_cmd_checks_enabled'): - self._run_cmd_checks(c.get('postflight_run_cmd_suites', [])) + if c.get("run_cmd_checks_enabled"): + self._run_cmd_checks(c.get("postflight_run_cmd_suites", [])) def query_abs_dirs(self): abs_dirs = super(TestingMixin, self).query_abs_dirs() - if 'MOZ_FETCHES_DIR' in os.environ: - abs_dirs['abs_fetches_dir'] = os.environ['MOZ_FETCHES_DIR'] + if "MOZ_FETCHES_DIR" in os.environ: + abs_dirs["abs_fetches_dir"] = os.environ["MOZ_FETCHES_DIR"] return abs_dirs diff --git a/testing/mozharness/scripts/desktop_unittest.py b/testing/mozharness/scripts/desktop_unittest.py index b4cc26924edd..b9715ad90e61 100755 --- a/testing/mozharness/scripts/desktop_unittest.py +++ b/testing/mozharness/scripts/desktop_unittest.py @@ -37,146 +37,231 @@ from mozharness.mozilla.testing.errors import HarnessErrorList from mozharness.mozilla.testing.unittest import DesktopUnittestOutputParser from mozharness.mozilla.testing.codecoverage import ( CodeCoverageMixin, - code_coverage_config_options + code_coverage_config_options, ) from mozharness.mozilla.testing.testbase import TestingMixin, testing_config_options -SUITE_CATEGORIES = ['gtest', 'cppunittest', 'jittest', 'mochitest', 'reftest', 'xpcshell'] -SUITE_DEFAULT_E10S = ['mochitest', 'reftest'] -SUITE_NO_E10S = ['xpcshell'] -SUITE_REPEATABLE = ['mochitest', 'reftest'] +SUITE_CATEGORIES = [ + "gtest", + "cppunittest", + "jittest", + "mochitest", + "reftest", + "xpcshell", +] +SUITE_DEFAULT_E10S = ["mochitest", "reftest"] +SUITE_NO_E10S = ["xpcshell"] +SUITE_REPEATABLE = ["mochitest", "reftest"] # DesktopUnittest {{{1 -class DesktopUnittest(TestingMixin, MercurialScript, MozbaseMixin, - CodeCoverageMixin): - config_options = [ - [['--mochitest-suite', ], { - "action": "extend", - "dest": "specified_mochitest_suites", - "type": "string", - "help": "Specify which mochi suite to run. " +class DesktopUnittest(TestingMixin, MercurialScript, MozbaseMixin, CodeCoverageMixin): + config_options = ( + [ + [ + [ + "--mochitest-suite", + ], + { + "action": "extend", + "dest": "specified_mochitest_suites", + "type": "string", + "help": "Specify which mochi suite to run. " "Suites are defined in the config file.\n" - "Examples: 'all', 'plain1', 'plain5', 'chrome', or 'a11y'"} - ], - [['--reftest-suite', ], { - "action": "extend", - "dest": "specified_reftest_suites", - "type": "string", - "help": "Specify which reftest suite to run. " + "Examples: 'all', 'plain1', 'plain5', 'chrome', or 'a11y'", + }, + ], + [ + [ + "--reftest-suite", + ], + { + "action": "extend", + "dest": "specified_reftest_suites", + "type": "string", + "help": "Specify which reftest suite to run. " "Suites are defined in the config file.\n" - "Examples: 'all', 'crashplan', or 'jsreftest'"} - ], - [['--xpcshell-suite', ], { - "action": "extend", - "dest": "specified_xpcshell_suites", - "type": "string", - "help": "Specify which xpcshell suite to run. " + "Examples: 'all', 'crashplan', or 'jsreftest'", + }, + ], + [ + [ + "--xpcshell-suite", + ], + { + "action": "extend", + "dest": "specified_xpcshell_suites", + "type": "string", + "help": "Specify which xpcshell suite to run. " "Suites are defined in the config file\n." - "Examples: 'xpcshell'"} - ], - [['--cppunittest-suite', ], { - "action": "extend", - "dest": "specified_cppunittest_suites", - "type": "string", - "help": "Specify which cpp unittest suite to run. " + "Examples: 'xpcshell'", + }, + ], + [ + [ + "--cppunittest-suite", + ], + { + "action": "extend", + "dest": "specified_cppunittest_suites", + "type": "string", + "help": "Specify which cpp unittest suite to run. " "Suites are defined in the config file\n." - "Examples: 'cppunittest'"} - ], - [['--gtest-suite', ], { - "action": "extend", - "dest": "specified_gtest_suites", - "type": "string", - "help": "Specify which gtest suite to run. " + "Examples: 'cppunittest'", + }, + ], + [ + [ + "--gtest-suite", + ], + { + "action": "extend", + "dest": "specified_gtest_suites", + "type": "string", + "help": "Specify which gtest suite to run. " "Suites are defined in the config file\n." - "Examples: 'gtest'"} - ], - [['--jittest-suite', ], { - "action": "extend", - "dest": "specified_jittest_suites", - "type": "string", - "help": "Specify which jit-test suite to run. " + "Examples: 'gtest'", + }, + ], + [ + [ + "--jittest-suite", + ], + { + "action": "extend", + "dest": "specified_jittest_suites", + "type": "string", + "help": "Specify which jit-test suite to run. " "Suites are defined in the config file\n." - "Examples: 'jittest'"} - ], - [['--run-all-suites', ], { - "action": "store_true", - "dest": "run_all_suites", - "default": False, - "help": "This will run all suites that are specified " + "Examples: 'jittest'", + }, + ], + [ + [ + "--run-all-suites", + ], + { + "action": "store_true", + "dest": "run_all_suites", + "default": False, + "help": "This will run all suites that are specified " "in the config file. You do not need to specify " - "any other suites.\nBeware, this may take a while ;)"} - ], - [['--disable-e10s', ], { - "action": "store_false", - "dest": "e10s", - "default": True, - "help": "Run tests without multiple processes (e10s)."} - ], - [['--headless', ], { - "action": "store_true", - "dest": "headless", - "default": False, - "help": "Run tests in headless mode."} - ], - [['--no-random', ], { - "action": "store_true", - "dest": "no_random", - "default": False, - "help": "Run tests with no random intermittents and bisect in case of real failure."} - ], - [["--total-chunks"], { - "action": "store", - "dest": "total_chunks", - "help": "Number of total chunks"} - ], - [["--this-chunk"], { - "action": "store", - "dest": "this_chunk", - "help": "Number of this chunk"} - ], - [["--allow-software-gl-layers"], { - "action": "store_true", - "dest": "allow_software_gl_layers", - "default": False, - "help": "Permits a software GL implementation (such as LLVMPipe) to use " - "the GL compositor."} - ], - [["--enable-webrender"], { - "action": "store_true", - "dest": "enable_webrender", - "default": False, - "help": "Enable the WebRender compositor in Gecko."} - ], - [["--gpu-required"], { - "action": "store_true", - "dest": "gpu_required", - "default": False, - "help": "Run additional verification on modified tests using gpu instances."} - ], - [["--setpref"], { - "action": "append", - "metavar": "PREF=VALUE", - "dest": "extra_prefs", - "default": [], - "help": "Defines an extra user preference."} - ], - [['--repeat', ], { - "action": "store", - "type": "int", - "dest": "repeat", - "default": 0, - "help": "Repeat the tests the given number of times. Supported " - "by mochitest, reftest, crashtest, ignored otherwise."} - ], - [["--enable-xorigin-tests"], { - "action": "store_true", - "dest": "enable_xorigin_tests", - "default": False, - "help": "Run tests in a cross origin iframe."} - ], - ] + copy.deepcopy(testing_config_options) + \ - copy.deepcopy(code_coverage_config_options) + "any other suites.\nBeware, this may take a while ;)", + }, + ], + [ + [ + "--disable-e10s", + ], + { + "action": "store_false", + "dest": "e10s", + "default": True, + "help": "Run tests without multiple processes (e10s).", + }, + ], + [ + [ + "--headless", + ], + { + "action": "store_true", + "dest": "headless", + "default": False, + "help": "Run tests in headless mode.", + }, + ], + [ + [ + "--no-random", + ], + { + "action": "store_true", + "dest": "no_random", + "default": False, + "help": "Run tests with no random intermittents and bisect in case of real failure.", # NOQA: E501 + }, + ], + [ + ["--total-chunks"], + { + "action": "store", + "dest": "total_chunks", + "help": "Number of total chunks", + }, + ], + [ + ["--this-chunk"], + { + "action": "store", + "dest": "this_chunk", + "help": "Number of this chunk", + }, + ], + [ + ["--allow-software-gl-layers"], + { + "action": "store_true", + "dest": "allow_software_gl_layers", + "default": False, + "help": "Permits a software GL implementation (such as LLVMPipe) to use " + "the GL compositor.", + }, + ], + [ + ["--enable-webrender"], + { + "action": "store_true", + "dest": "enable_webrender", + "default": False, + "help": "Enable the WebRender compositor in Gecko.", + }, + ], + [ + ["--gpu-required"], + { + "action": "store_true", + "dest": "gpu_required", + "default": False, + "help": "Run additional verification on modified tests using gpu instances.", + }, + ], + [ + ["--setpref"], + { + "action": "append", + "metavar": "PREF=VALUE", + "dest": "extra_prefs", + "default": [], + "help": "Defines an extra user preference.", + }, + ], + [ + [ + "--repeat", + ], + { + "action": "store", + "type": "int", + "dest": "repeat", + "default": 0, + "help": "Repeat the tests the given number of times. Supported " + "by mochitest, reftest, crashtest, ignored otherwise.", + }, + ], + [ + ["--enable-xorigin-tests"], + { + "action": "store_true", + "dest": "enable_xorigin_tests", + "default": False, + "help": "Run tests in a cross origin iframe.", + }, + ], + ] + + copy.deepcopy(testing_config_options) + + copy.deepcopy(code_coverage_config_options) + ) def __init__(self, require_config_file=True): # abs_dirs defined already in BaseScript but is here to make pylint happy @@ -184,27 +269,28 @@ class DesktopUnittest(TestingMixin, MercurialScript, MozbaseMixin, super(DesktopUnittest, self).__init__( config_options=self.config_options, all_actions=[ - 'clobber', - 'download-and-extract', - 'create-virtualenv', - 'start-pulseaudio', - 'install', - 'stage-files', - 'run-tests', + "clobber", + "download-and-extract", + "create-virtualenv", + "start-pulseaudio", + "install", + "stage-files", + "run-tests", ], require_config_file=require_config_file, - config={'require_test_zip': True}) + config={"require_test_zip": True}, + ) c = self.config self.global_test_options = [] - self.installer_url = c.get('installer_url') - self.test_url = c.get('test_url') - self.test_packages_url = c.get('test_packages_url') - self.symbols_url = c.get('symbols_url') + self.installer_url = c.get("installer_url") + self.test_url = c.get("test_url") + self.test_packages_url = c.get("test_packages_url") + self.symbols_url = c.get("symbols_url") # this is so mozinstall in install() doesn't bug out if we don't run # the download_and_extract action - self.installer_path = c.get('installer_path') - self.binary_path = c.get('binary_path') + self.installer_path = c.get("installer_path") + self.binary_path = c.get("binary_path") self.abs_app_dir = None self.abs_res_dir = None @@ -214,41 +300,45 @@ class DesktopUnittest(TestingMixin, MercurialScript, MozbaseMixin, perfherder_parts = [] perfherder_options = [] suites = ( - ('specified_mochitest_suites', 'mochitest'), - ('specified_reftest_suites', 'reftest'), - ('specified_xpcshell_suites', 'xpcshell'), - ('specified_cppunittest_suites', 'cppunit'), - ('specified_gtest_suites', 'gtest'), - ('specified_jittest_suites', 'jittest'), + ("specified_mochitest_suites", "mochitest"), + ("specified_reftest_suites", "reftest"), + ("specified_xpcshell_suites", "xpcshell"), + ("specified_cppunittest_suites", "cppunit"), + ("specified_gtest_suites", "gtest"), + ("specified_jittest_suites", "jittest"), ) for s, prefix in suites: if s in c: perfherder_parts.append(prefix) perfherder_parts.extend(c[s]) - if 'this_chunk' in c: - perfherder_parts.append(c['this_chunk']) + if "this_chunk" in c: + perfherder_parts.append(c["this_chunk"]) - if c['e10s']: - perfherder_options.append('e10s') + if c["e10s"]: + perfherder_options.append("e10s") - self.resource_monitor_perfherder_id = ('.'.join(perfherder_parts), - perfherder_options) + self.resource_monitor_perfherder_id = ( + ".".join(perfherder_parts), + perfherder_options, + ) # helper methods {{{2 def _pre_config_lock(self, rw_config): super(DesktopUnittest, self)._pre_config_lock(rw_config) c = self.config - if not c.get('run_all_suites'): + if not c.get("run_all_suites"): return # configs are valid for category in SUITE_CATEGORIES: - specific_suites = c.get('specified_%s_suites' % (category)) + specific_suites = c.get("specified_%s_suites" % (category)) if specific_suites: - if specific_suites != 'all': - self.fatal("Config options are not valid. Please ensure" - " that if the '--run-all-suites' flag was enabled," - " then do not specify to run only specific suites " - "like:\n '--mochitest-suite browser-chrome'") + if specific_suites != "all": + self.fatal( + "Config options are not valid. Please ensure" + " that if the '--run-all-suites' flag was enabled," + " then do not specify to run only specific suites " + "like:\n '--mochitest-suite browser-chrome'" + ) def query_abs_dirs(self): if self.abs_dirs: @@ -257,30 +347,45 @@ class DesktopUnittest(TestingMixin, MercurialScript, MozbaseMixin, c = self.config dirs = {} - dirs['abs_work_dir'] = abs_dirs['abs_work_dir'] - dirs['abs_app_install_dir'] = os.path.join(abs_dirs['abs_work_dir'], 'application') - dirs['abs_test_install_dir'] = os.path.join(abs_dirs['abs_work_dir'], 'tests') - dirs['abs_test_extensions_dir'] = os.path.join(dirs['abs_test_install_dir'], 'extensions') - dirs['abs_test_bin_dir'] = os.path.join(dirs['abs_test_install_dir'], 'bin') - dirs['abs_test_bin_plugins_dir'] = os.path.join(dirs['abs_test_bin_dir'], - 'plugins') - dirs['abs_test_bin_components_dir'] = os.path.join(dirs['abs_test_bin_dir'], - 'components') - dirs['abs_mochitest_dir'] = os.path.join(dirs['abs_test_install_dir'], "mochitest") - dirs['abs_reftest_dir'] = os.path.join(dirs['abs_test_install_dir'], "reftest") - dirs['abs_xpcshell_dir'] = os.path.join(dirs['abs_test_install_dir'], "xpcshell") - dirs['abs_cppunittest_dir'] = os.path.join(dirs['abs_test_install_dir'], "cppunittest") - dirs['abs_gtest_dir'] = os.path.join(dirs['abs_test_install_dir'], "gtest") - dirs['abs_blob_upload_dir'] = os.path.join(abs_dirs['abs_work_dir'], - 'blobber_upload_dir') - dirs['abs_jittest_dir'] = os.path.join(dirs['abs_test_install_dir'], - "jit-test", "jit-test") + dirs["abs_work_dir"] = abs_dirs["abs_work_dir"] + dirs["abs_app_install_dir"] = os.path.join( + abs_dirs["abs_work_dir"], "application" + ) + dirs["abs_test_install_dir"] = os.path.join(abs_dirs["abs_work_dir"], "tests") + dirs["abs_test_extensions_dir"] = os.path.join( + dirs["abs_test_install_dir"], "extensions" + ) + dirs["abs_test_bin_dir"] = os.path.join(dirs["abs_test_install_dir"], "bin") + dirs["abs_test_bin_plugins_dir"] = os.path.join( + dirs["abs_test_bin_dir"], "plugins" + ) + dirs["abs_test_bin_components_dir"] = os.path.join( + dirs["abs_test_bin_dir"], "components" + ) + dirs["abs_mochitest_dir"] = os.path.join( + dirs["abs_test_install_dir"], "mochitest" + ) + dirs["abs_reftest_dir"] = os.path.join(dirs["abs_test_install_dir"], "reftest") + dirs["abs_xpcshell_dir"] = os.path.join( + dirs["abs_test_install_dir"], "xpcshell" + ) + dirs["abs_cppunittest_dir"] = os.path.join( + dirs["abs_test_install_dir"], "cppunittest" + ) + dirs["abs_gtest_dir"] = os.path.join(dirs["abs_test_install_dir"], "gtest") + dirs["abs_blob_upload_dir"] = os.path.join( + abs_dirs["abs_work_dir"], "blobber_upload_dir" + ) + dirs["abs_jittest_dir"] = os.path.join( + dirs["abs_test_install_dir"], "jit-test", "jit-test" + ) - if os.path.isabs(c['virtualenv_path']): - dirs['abs_virtualenv_dir'] = c['virtualenv_path'] + if os.path.isabs(c["virtualenv_path"]): + dirs["abs_virtualenv_dir"] = c["virtualenv_path"] else: - dirs['abs_virtualenv_dir'] = os.path.join(abs_dirs['abs_work_dir'], - c['virtualenv_path']) + dirs["abs_virtualenv_dir"] = os.path.join( + abs_dirs["abs_work_dir"], c["virtualenv_path"] + ) abs_dirs.update(dirs) self.abs_dirs = abs_dirs @@ -316,26 +421,33 @@ class DesktopUnittest(TestingMixin, MercurialScript, MozbaseMixin, self.abs_res_dir = abs_app_dir return self.abs_res_dir - @PreScriptAction('create-virtualenv') + @PreScriptAction("create-virtualenv") def _pre_create_virtualenv(self, action): dirs = self.query_abs_dirs() - self.register_virtualenv_module(name='mock') - self.register_virtualenv_module(name='simplejson') + self.register_virtualenv_module(name="mock") + self.register_virtualenv_module(name="simplejson") - requirements_files = [os.path.join(dirs['abs_test_install_dir'], - 'config', 'marionette_requirements.txt')] + requirements_files = [ + os.path.join( + dirs["abs_test_install_dir"], "config", "marionette_requirements.txt" + ) + ] - if self._query_specified_suites('mochitest') is not None: + if self._query_specified_suites("mochitest") is not None: # mochitest is the only thing that needs this requirements_files.append( - os.path.join(dirs['abs_mochitest_dir'], - 'websocketprocessbridge', - 'websocketprocessbridge_requirements.txt')) + os.path.join( + dirs["abs_mochitest_dir"], + "websocketprocessbridge", + "websocketprocessbridge_requirements.txt", + ) + ) for requirements_file in requirements_files: - self.register_virtualenv_module(requirements=[requirements_file], - two_pass=True) + self.register_virtualenv_module( + requirements=[requirements_file], two_pass=True + ) def _query_symbols_url(self): """query the full symbols URL based upon binary URL""" @@ -346,13 +458,16 @@ class DesktopUnittest(TestingMixin, MercurialScript, MozbaseMixin, symbols_url = None self.info("finding symbols_url based upon self.installer_url") if self.installer_url: - for ext in ['.zip', '.dmg', '.tar.bz2']: + for ext in [".zip", ".dmg", ".tar.bz2"]: if ext in self.installer_url: symbols_url = self.installer_url.replace( - ext, '.crashreporter-symbols.zip') + ext, ".crashreporter-symbols.zip" + ) if not symbols_url: - self.fatal("self.installer_url was found but symbols_url could \ - not be determined") + self.fatal( + "self.installer_url was found but symbols_url could \ + not be determined" + ) else: self.fatal("self.installer_url was not found in self.config") self.info("setting symbols_url as %s" % (symbols_url)) @@ -360,20 +475,22 @@ class DesktopUnittest(TestingMixin, MercurialScript, MozbaseMixin, return self.symbols_url def _get_mozharness_test_paths(self, suite_category, suite): - test_paths = json.loads(os.environ.get('MOZHARNESS_TEST_PATHS', '""')) + test_paths = json.loads(os.environ.get("MOZHARNESS_TEST_PATHS", '""')) - if '-coverage' in suite: - suite = suite[:suite.index('-coverage')] + if "-coverage" in suite: + suite = suite[: suite.index("-coverage")] if not test_paths or suite not in test_paths: return None suite_test_paths = test_paths[suite] - if suite_category == 'reftest': + if suite_category == "reftest": dirs = self.query_abs_dirs() - suite_test_paths = [os.path.join(dirs['abs_reftest_dir'], 'tests', p) - for p in suite_test_paths] + suite_test_paths = [ + os.path.join(dirs["abs_reftest_dir"], "tests", p) + for p in suite_test_paths + ] return suite_test_paths @@ -381,111 +498,127 @@ class DesktopUnittest(TestingMixin, MercurialScript, MozbaseMixin, if self.binary_path: c = self.config dirs = self.query_abs_dirs() - run_file = c['run_file_names'][suite_category] - base_cmd = [self.query_python_path('python'), '-u'] + run_file = c["run_file_names"][suite_category] + base_cmd = [self.query_python_path("python"), "-u"] base_cmd.append(os.path.join(dirs["abs_%s_dir" % suite_category], run_file)) abs_app_dir = self.query_abs_app_dir() abs_res_dir = self.query_abs_res_dir() raw_log_file, error_summary_file = self.get_indexed_logs( - dirs['abs_blob_upload_dir'], suite) + dirs["abs_blob_upload_dir"], suite + ) str_format_values = { - 'binary_path': self.binary_path, - 'symbols_path': self._query_symbols_url(), - 'abs_work_dir': dirs['abs_work_dir'], - 'abs_app_dir': abs_app_dir, - 'abs_res_dir': abs_res_dir, - 'raw_log_file': raw_log_file, - 'error_summary_file': error_summary_file, - 'gtest_dir': os.path.join(dirs['abs_test_install_dir'], - 'gtest'), + "binary_path": self.binary_path, + "symbols_path": self._query_symbols_url(), + "abs_work_dir": dirs["abs_work_dir"], + "abs_app_dir": abs_app_dir, + "abs_res_dir": abs_res_dir, + "raw_log_file": raw_log_file, + "error_summary_file": error_summary_file, + "gtest_dir": os.path.join(dirs["abs_test_install_dir"], "gtest"), } # TestingMixin._download_and_extract_symbols() will set # self.symbols_path when downloading/extracting. if self.symbols_path: - str_format_values['symbols_path'] = self.symbols_path + str_format_values["symbols_path"] = self.symbols_path if suite_category not in SUITE_NO_E10S: - if suite_category in SUITE_DEFAULT_E10S and not c['e10s']: - base_cmd.append('--disable-e10s') - elif suite_category not in SUITE_DEFAULT_E10S and c['e10s']: - base_cmd.append('--e10s') - if c.get('repeat'): + if suite_category in SUITE_DEFAULT_E10S and not c["e10s"]: + base_cmd.append("--disable-e10s") + elif suite_category not in SUITE_DEFAULT_E10S and c["e10s"]: + base_cmd.append("--e10s") + if c.get("repeat"): if suite_category in SUITE_REPEATABLE: - base_cmd.extend(["--repeat=%s" % c.get('repeat')]) + base_cmd.extend(["--repeat=%s" % c.get("repeat")]) else: - self.log("--repeat not supported in {}".format(suite_category), level=WARNING) + self.log( + "--repeat not supported in {}".format(suite_category), + level=WARNING, + ) # Ignore chunking if we have user specified test paths if not (self.verify_enabled or self.per_test_coverage): test_paths = self._get_mozharness_test_paths(suite_category, suite) if test_paths: base_cmd.extend(test_paths) - elif c.get('total_chunks') and c.get('this_chunk'): - base_cmd.extend(['--total-chunks', c['total_chunks'], - '--this-chunk', c['this_chunk']]) + elif c.get("total_chunks") and c.get("this_chunk"): + base_cmd.extend( + [ + "--total-chunks", + c["total_chunks"], + "--this-chunk", + c["this_chunk"], + ] + ) - if c['no_random']: + if c["no_random"]: if suite_category == "mochitest": - base_cmd.append('--bisect-chunk=default') + base_cmd.append("--bisect-chunk=default") else: - self.warning("--no-random does not currently work with suites other than " - "mochitest.") + self.warning( + "--no-random does not currently work with suites other than " + "mochitest." + ) - if c['headless']: - base_cmd.append('--headless') + if c["headless"]: + base_cmd.append("--headless") - if c['enable_webrender']: - base_cmd.append('--enable-webrender') + if c["enable_webrender"]: + base_cmd.append("--enable-webrender") - if c['enable_xorigin_tests']: - base_cmd.append('--enable-xorigin-tests') + if c["enable_xorigin_tests"]: + base_cmd.append("--enable-xorigin-tests") - if c['extra_prefs']: - base_cmd.extend(['--setpref={}'.format(p) for p in c['extra_prefs']]) + if c["extra_prefs"]: + base_cmd.extend(["--setpref={}".format(p) for p in c["extra_prefs"]]) # set pluginsPath - abs_res_plugins_dir = os.path.join(abs_res_dir, 'plugins') - str_format_values['test_plugin_path'] = abs_res_plugins_dir + abs_res_plugins_dir = os.path.join(abs_res_dir, "plugins") + str_format_values["test_plugin_path"] = abs_res_plugins_dir if suite_category not in c["suite_definitions"]: self.fatal("'%s' not defined in the config!") - if suite in ('browser-chrome-coverage', 'xpcshell-coverage', - 'mochitest-devtools-chrome-coverage', 'plain-coverage'): - base_cmd.append('--jscov-dir-prefix=%s' % - dirs['abs_blob_upload_dir']) + if suite in ( + "browser-chrome-coverage", + "xpcshell-coverage", + "mochitest-devtools-chrome-coverage", + "plain-coverage", + ): + base_cmd.append("--jscov-dir-prefix=%s" % dirs["abs_blob_upload_dir"]) options = c["suite_definitions"][suite_category]["options"] if options: for option in options: option = option % str_format_values - if not option.endswith('None'): + if not option.endswith("None"): base_cmd.append(option) if self.structured_output( - suite_category, - self._query_try_flavor(suite_category, suite) + suite_category, self._query_try_flavor(suite_category, suite) ): base_cmd.append("--log-raw=-") return base_cmd else: - self.warning("Suite options for %s could not be determined." - "\nIf you meant to have options for this suite, " - "please make sure they are specified in your " - "config under %s_options" % - (suite_category, suite_category)) + self.warning( + "Suite options for %s could not be determined." + "\nIf you meant to have options for this suite, " + "please make sure they are specified in your " + "config under %s_options" % (suite_category, suite_category) + ) return base_cmd else: - self.fatal("'binary_path' could not be determined.\n This should " - "be like '/path/build/application/firefox/firefox'" - "\nIf you are running this script without the 'install' " - "action (where binary_path is set), please ensure you are" - " either:\n(1) specifying it in the config file under " - "binary_path\n(2) specifying it on command line with the" - " '--binary-path' flag") + self.fatal( + "'binary_path' could not be determined.\n This should " + "be like '/path/build/application/firefox/firefox'" + "\nIf you are running this script without the 'install' " + "action (where binary_path is set), please ensure you are" + " either:\n(1) specifying it in the config file under " + "binary_path\n(2) specifying it on command line with the" + " '--binary-path' flag" + ) def _query_specified_suites(self, category): """Checks if the provided suite does indeed exist. @@ -496,16 +629,18 @@ class DesktopUnittest(TestingMixin, MercurialScript, MozbaseMixin, Otherwise, do not run any suites and return a fatal error. """ c = self.config - all_suites = c.get('all_{}_suites'.format(category), None) - specified_suites = c.get('specified_{}_suites'.format(category), None) + all_suites = c.get("all_{}_suites".format(category), None) + specified_suites = c.get("specified_{}_suites".format(category), None) # Bug 1603842 - disallow selection of more than 1 suite at at time if specified_suites is None: # Path taken by test-verify return self.query_per_test_category_suites(category, all_suites) if specified_suites and len(specified_suites) > 1: - self.fatal("""Selection of multiple suites is not permitted. \ - Please select at most 1 test suite.""") + self.fatal( + """Selection of multiple suites is not permitted. \ + Please select at most 1 test suite.""" + ) return # Normal path taken by most test suites as only one suite is specified @@ -516,35 +651,40 @@ class DesktopUnittest(TestingMixin, MercurialScript, MozbaseMixin, def _query_try_flavor(self, category, suite): flavors = { - "mochitest": [("plain.*", "mochitest"), - ("browser-chrome.*", "browser-chrome"), - ("mochitest-devtools-chrome.*", "devtools-chrome"), - ("chrome", "chrome")], + "mochitest": [ + ("plain.*", "mochitest"), + ("browser-chrome.*", "browser-chrome"), + ("mochitest-devtools-chrome.*", "devtools-chrome"), + ("chrome", "chrome"), + ], "xpcshell": [("xpcshell", "xpcshell")], - "reftest": [("reftest", "reftest"), - ("crashtest", "crashtest")] + "reftest": [("reftest", "reftest"), ("crashtest", "crashtest")], } for suite_pattern, flavor in flavors.get(category, []): if re.compile(suite_pattern).match(suite): return flavor def structured_output(self, suite_category, flavor=None): - unstructured_flavors = self.config.get('unstructured_flavors') + unstructured_flavors = self.config.get("unstructured_flavors") if not unstructured_flavors: return True if suite_category not in unstructured_flavors: return True - if not unstructured_flavors.get(suite_category) or \ - flavor in unstructured_flavors.get(suite_category): + if not unstructured_flavors.get( + suite_category + ) or flavor in unstructured_flavors.get(suite_category): return False return True - def get_test_output_parser(self, suite_category, flavor=None, strict=False, - **kwargs): + def get_test_output_parser( + self, suite_category, flavor=None, strict=False, **kwargs + ): if not self.structured_output(suite_category, flavor): return DesktopUnittestOutputParser(suite_category=suite_category, **kwargs) self.info("Structured output parser in use for %s." % suite_category) - return StructuredOutputParser(suite_category=suite_category, strict=strict, **kwargs) + return StructuredOutputParser( + suite_category=suite_category, strict=strict, **kwargs + ) # Actions {{{2 @@ -554,15 +694,15 @@ class DesktopUnittest(TestingMixin, MercurialScript, MozbaseMixin, # preflight_install is in TestingMixin. # install is in TestingMixin. - @PreScriptAction('download-and-extract') + @PreScriptAction("download-and-extract") def _pre_download_and_extract(self, action): """Abort if --artifact try syntax is used with compiled-code tests""" - dir = self.query_abs_dirs()['abs_blob_upload_dir'] + dir = self.query_abs_dirs()["abs_blob_upload_dir"] self.mkdir_p(dir) - if not self.try_message_has_flag('artifact'): + if not self.try_message_has_flag("artifact"): return - self.info('Artifact build requested in try syntax.') + self.info("Artifact build requested in try syntax.") rejected = [] compiled_code_suites = [ "cppunit", @@ -577,9 +717,11 @@ class DesktopUnittest(TestingMixin, MercurialScript, MozbaseMixin, break if rejected: self.record_status(TBPL_EXCEPTION) - self.fatal("There are specified suites that are incompatible with " - "--artifact try syntax flag: {}".format(', '.join(rejected)), - exit_code=self.return_code) + self.fatal( + "There are specified suites that are incompatible with " + "--artifact try syntax flag: {}".format(", ".join(rejected)), + exit_code=self.return_code, + ) def download_and_extract(self): """ @@ -590,42 +732,48 @@ class DesktopUnittest(TestingMixin, MercurialScript, MozbaseMixin, extract_dirs = None - if c.get('run_all_suites'): + if c.get("run_all_suites"): target_categories = SUITE_CATEGORIES else: - target_categories = [cat for cat in SUITE_CATEGORIES - if self._query_specified_suites(cat) is not None] - super(DesktopUnittest, self).download_and_extract(extract_dirs=extract_dirs, - suite_categories=target_categories) + target_categories = [ + cat + for cat in SUITE_CATEGORIES + if self._query_specified_suites(cat) is not None + ] + super(DesktopUnittest, self).download_and_extract( + extract_dirs=extract_dirs, suite_categories=target_categories + ) def start_pulseaudio(self): command = [] # Implies that underlying system is Linux. - if (os.environ.get('NEED_PULSEAUDIO') == 'true'): - command.extend([ - 'pulseaudio', - '--daemonize', - '--log-level=4', - '--log-time=1', - '-vvvvv', - '--exit-idle-time=-1' - ]) + if os.environ.get("NEED_PULSEAUDIO") == "true": + command.extend( + [ + "pulseaudio", + "--daemonize", + "--log-level=4", + "--log-time=1", + "-vvvvv", + "--exit-idle-time=-1", + ] + ) # Only run the initialization for Debian. # Ubuntu appears to have an alternate method of starting pulseaudio. if self._is_debian(): - self._kill_named_proc('pulseaudio') + self._kill_named_proc("pulseaudio") self.run_command(command) # All Linux systems need module-null-sink to be loaded, otherwise # media tests fail. - self.run_command('pactl load-module module-null-sink') - self.run_command('pactl list modules short') + self.run_command("pactl load-module module-null-sink") + self.run_command("pactl list modules short") def stage_files(self): for category in SUITE_CATEGORIES: suites = self._query_specified_suites(category) - stage = getattr(self, '_stage_{}'.format(category), None) + stage = getattr(self, "_stage_{}".format(category), None) if suites and stage: stage(suites) @@ -636,47 +784,54 @@ class DesktopUnittest(TestingMixin, MercurialScript, MozbaseMixin, # For mac these directories are in Contents/Resources, on other # platforms abs_res_dir will point to abs_app_dir. abs_res_dir = self.query_abs_res_dir() - abs_res_components_dir = os.path.join(abs_res_dir, 'components') - abs_res_plugins_dir = os.path.join(abs_res_dir, 'plugins') - abs_res_extensions_dir = os.path.join(abs_res_dir, 'extensions') + abs_res_components_dir = os.path.join(abs_res_dir, "components") + abs_res_plugins_dir = os.path.join(abs_res_dir, "plugins") + abs_res_extensions_dir = os.path.join(abs_res_dir, "extensions") if bin_name: - src = os.path.join(dirs['abs_test_bin_dir'], bin_name) + src = os.path.join(dirs["abs_test_bin_dir"], bin_name) if os.path.exists(src): - self.info('copying %s to %s' % (src, os.path.join(abs_app_dir, bin_name))) + self.info( + "copying %s to %s" % (src, os.path.join(abs_app_dir, bin_name)) + ) shutil.copy2(src, os.path.join(abs_app_dir, bin_name)) elif fail_if_not_exists: - raise OSError('File %s not found' % src) - self.copytree(dirs['abs_test_bin_components_dir'], - abs_res_components_dir, - overwrite='overwrite_if_exists') + raise OSError("File %s not found" % src) + self.copytree( + dirs["abs_test_bin_components_dir"], + abs_res_components_dir, + overwrite="overwrite_if_exists", + ) self.mkdir_p(abs_res_plugins_dir) - self.copytree(dirs['abs_test_bin_plugins_dir'], - abs_res_plugins_dir, - overwrite='overwrite_if_exists') - if os.path.isdir(dirs['abs_test_extensions_dir']): + self.copytree( + dirs["abs_test_bin_plugins_dir"], + abs_res_plugins_dir, + overwrite="overwrite_if_exists", + ) + if os.path.isdir(dirs["abs_test_extensions_dir"]): self.mkdir_p(abs_res_extensions_dir) - self.copytree(dirs['abs_test_extensions_dir'], - abs_res_extensions_dir, - overwrite='overwrite_if_exists') + self.copytree( + dirs["abs_test_extensions_dir"], + abs_res_extensions_dir, + overwrite="overwrite_if_exists", + ) def _stage_xpcshell(self, suites): - self._stage_files(self.config['xpcshell_name']) + self._stage_files(self.config["xpcshell_name"]) # http3server isn't built for Windows tests or Linux asan/tsan # builds. Only stage if the `http3server_name` config is set and if # the file actually exists. - if self.config.get('http3server_name'): - self._stage_files(self.config['http3server_name'], - fail_if_not_exists=False) + if self.config.get("http3server_name"): + self._stage_files(self.config["http3server_name"], fail_if_not_exists=False) def _stage_cppunittest(self, suites): abs_res_dir = self.query_abs_res_dir() dirs = self.query_abs_dirs() - abs_cppunittest_dir = dirs['abs_cppunittest_dir'] + abs_cppunittest_dir = dirs["abs_cppunittest_dir"] # move manifest and js fils to resources dir, where tests expect them - files = glob.glob(os.path.join(abs_cppunittest_dir, '*.js')) - files.extend(glob.glob(os.path.join(abs_cppunittest_dir, '*.manifest'))) + files = glob.glob(os.path.join(abs_cppunittest_dir, "*.js")) + files.extend(glob.glob(os.path.join(abs_cppunittest_dir, "*.manifest"))) for f in files: self.move(f, abs_res_dir) @@ -684,22 +839,24 @@ class DesktopUnittest(TestingMixin, MercurialScript, MozbaseMixin, abs_res_dir = self.query_abs_res_dir() abs_app_dir = self.query_abs_app_dir() dirs = self.query_abs_dirs() - abs_gtest_dir = dirs['abs_gtest_dir'] - dirs['abs_test_bin_dir'] = os.path.join(dirs['abs_test_install_dir'], 'bin') + abs_gtest_dir = dirs["abs_gtest_dir"] + dirs["abs_test_bin_dir"] = os.path.join(dirs["abs_test_install_dir"], "bin") - files = glob.glob(os.path.join(dirs['abs_test_bin_plugins_dir'], 'gmp-*')) - files.append(os.path.join(abs_gtest_dir, 'dependentlibs.list.gtest')) + files = glob.glob(os.path.join(dirs["abs_test_bin_plugins_dir"], "gmp-*")) + files.append(os.path.join(abs_gtest_dir, "dependentlibs.list.gtest")) for f in files: self.move(f, abs_res_dir) - self.copytree(os.path.join(abs_gtest_dir, 'gtest_bin'), - os.path.join(abs_app_dir)) + self.copytree( + os.path.join(abs_gtest_dir, "gtest_bin"), os.path.join(abs_app_dir) + ) def _kill_proc_tree(self, pid): # Kill a process tree (including grandchildren) with signal.SIGTERM try: import signal import psutil + if pid == os.getpid(): return (None, None) @@ -714,25 +871,27 @@ class DesktopUnittest(TestingMixin, MercurialScript, MozbaseMixin, timeout = 60 gone, alive = psutil.wait_procs(children, timeout=timeout) for p in gone: - self.info('psutil found pid %s dead' % p.pid) + self.info("psutil found pid %s dead" % p.pid) for p in alive: - self.error('failed to kill pid %d after %d' % (p.pid, timeout)) + self.error("failed to kill pid %d after %d" % (p.pid, timeout)) return (gone, alive) except Exception as e: - self.error('Exception while trying to kill process tree: %s' % str(e)) + self.error("Exception while trying to kill process tree: %s" % str(e)) def _kill_named_proc(self, pname): try: import psutil except Exception as e: - self.info("Error importing psutil, not killing process %s: %s" % pname, str(e)) + self.info( + "Error importing psutil, not killing process %s: %s" % pname, str(e) + ) return for proc in psutil.process_iter(): try: if proc.name() == pname: - procd = proc.as_dict(attrs=['pid', 'ppid', 'name', 'username']) + procd = proc.as_dict(attrs=["pid", "ppid", "name", "username"]) self.info("in _kill_named_proc, killing %s" % procd) self._kill_proc_tree(proc.pid) except Exception as e: @@ -742,18 +901,17 @@ class DesktopUnittest(TestingMixin, MercurialScript, MozbaseMixin, def _remove_xen_clipboard(self): """ - When running on a Windows 7 VM, we have XenDPriv.exe running which - interferes with the clipboard, lets terminate this process and remove - the binary so it doesn't restart + When running on a Windows 7 VM, we have XenDPriv.exe running which + interferes with the clipboard, lets terminate this process and remove + the binary so it doesn't restart """ if not self._is_windows(): return - self._kill_named_proc('XenDPriv.exe') - xenpath = os.path.join(os.environ['ProgramFiles'], - 'Citrix', - 'XenTools', - 'XenDPriv.exe') + self._kill_named_proc("XenDPriv.exe") + xenpath = os.path.join( + os.environ["ProgramFiles"], "Citrix", "XenTools", "XenDPriv.exe" + ) try: if os.path.isfile(xenpath): os.remove(xenpath) @@ -762,11 +920,12 @@ class DesktopUnittest(TestingMixin, MercurialScript, MozbaseMixin, def _report_system_info(self): """ - Create the system-info.log artifact file, containing a variety of - system information that might be useful in diagnosing test failures. + Create the system-info.log artifact file, containing a variety of + system information that might be useful in diagnosing test failures. """ try: import psutil + path = os.path.join(dir, "system-info.log") with open(path, "w") as f: f.write("System info collected at %s\n\n" % datetime.now()) @@ -787,8 +946,10 @@ class DesktopUnittest(TestingMixin, MercurialScript, MozbaseMixin, try: for p in psutil.process_iter(): ctime = str(datetime.fromtimestamp(p.create_time())) - f.write(" PID %d %s %s created at %s\n" % - (p.pid, p.name(), str(p.cmdline()), ctime)) + f.write( + " PID %d %s %s created at %s\n" + % (p.pid, p.name(), str(p.cmdline()), ctime) + ) except Exception: f.write("Exception getting process info: %s\n" % sys.exc_info()[0]) except Exception: @@ -807,9 +968,9 @@ class DesktopUnittest(TestingMixin, MercurialScript, MozbaseMixin, break def get_timeout_for_category(self, suite_category): - if suite_category == 'cppunittest': + if suite_category == "cppunittest": return 2500 - return self.config["suite_definitions"][suite_category].get('run_timeout', 1000) + return self.config["suite_definitions"][suite_category].get("run_timeout", 1000) def _run_category_suites(self, suite_category): """run suite(s) to a specific category""" @@ -826,31 +987,31 @@ class DesktopUnittest(TestingMixin, MercurialScript, MozbaseMixin, executed_too_many_tests = False if suites: - self.info('#### Running %s suites' % suite_category) + self.info("#### Running %s suites" % suite_category) for suite in suites: if executed_too_many_tests and not self.per_test_coverage: return False replace_dict = { - 'abs_app_dir': abs_app_dir, - + "abs_app_dir": abs_app_dir, # Mac specific, but points to abs_app_dir on other # platforms. - 'abs_res_dir': abs_res_dir, + "abs_res_dir": abs_res_dir, } options_list = [] - env = { - 'TEST_SUITE': suite - } + env = {"TEST_SUITE": suite} if isinstance(suites[suite], dict): - options_list = suites[suite].get('options', []) - if (self.verify_enabled or self.per_test_coverage or - self._get_mozharness_test_paths(suite_category, suite)): + options_list = suites[suite].get("options", []) + if ( + self.verify_enabled + or self.per_test_coverage + or self._get_mozharness_test_paths(suite_category, suite) + ): # Ignore tests list in modes where we are running specific tests. tests_list = [] else: - tests_list = suites[suite].get('tests', []) - env = copy.deepcopy(suites[suite].get('env', {})) + tests_list = suites[suite].get("tests", []) + env = copy.deepcopy(suites[suite].get("env", {})) else: options_list = suites[suite] tests_list = [] @@ -858,36 +1019,40 @@ class DesktopUnittest(TestingMixin, MercurialScript, MozbaseMixin, flavor = self._query_try_flavor(suite_category, suite) try_options, try_tests = self.try_args(flavor) - suite_name = suite_category + '-' + suite + suite_name = suite_category + "-" + suite tbpl_status, log_level = None, None error_list = BaseErrorList + HarnessErrorList - parser = self.get_test_output_parser(suite_category, - flavor=flavor, - config=self.config, - error_list=error_list, - log_obj=self.log_obj) + parser = self.get_test_output_parser( + suite_category, + flavor=flavor, + config=self.config, + error_list=error_list, + log_obj=self.log_obj, + ) if suite_category == "reftest": ref_formatter = imp.load_source( "ReftestFormatter", os.path.abspath( - os.path.join(dirs["abs_reftest_dir"], "output.py"))) + os.path.join(dirs["abs_reftest_dir"], "output.py") + ), + ) parser.formatter = ref_formatter.ReftestFormatter() if self.query_minidump_stackwalk(): - env['MINIDUMP_STACKWALK'] = self.minidump_stackwalk_path - if self.config['nodejs_path']: - env['MOZ_NODE_PATH'] = self.config['nodejs_path'] - env['MOZ_UPLOAD_DIR'] = self.query_abs_dirs()['abs_blob_upload_dir'] - env['MINIDUMP_SAVE_PATH'] = self.query_abs_dirs()['abs_blob_upload_dir'] - env['RUST_BACKTRACE'] = 'full' - if not os.path.isdir(env['MOZ_UPLOAD_DIR']): - self.mkdir_p(env['MOZ_UPLOAD_DIR']) + env["MINIDUMP_STACKWALK"] = self.minidump_stackwalk_path + if self.config["nodejs_path"]: + env["MOZ_NODE_PATH"] = self.config["nodejs_path"] + env["MOZ_UPLOAD_DIR"] = self.query_abs_dirs()["abs_blob_upload_dir"] + env["MINIDUMP_SAVE_PATH"] = self.query_abs_dirs()["abs_blob_upload_dir"] + env["RUST_BACKTRACE"] = "full" + if not os.path.isdir(env["MOZ_UPLOAD_DIR"]): + self.mkdir_p(env["MOZ_UPLOAD_DIR"]) - if self.config['allow_software_gl_layers']: - env['MOZ_LAYERS_ALLOW_SOFTWARE_GL'] = '1' + if self.config["allow_software_gl_layers"]: + env["MOZ_LAYERS_ALLOW_SOFTWARE_GL"] = "1" - env['STYLO_THREADS'] = '4' + env["STYLO_THREADS"] = "4" env = self.query_env(partial_env=env, log_level=INFO) cmd_timeout = self.get_timeout_for_category(suite_category) @@ -897,8 +1062,11 @@ class DesktopUnittest(TestingMixin, MercurialScript, MozbaseMixin, # Make sure baseline code coverage tests are never # skipped and that having them run has no influence # on the max number of actual tests that are to be run. - is_baseline_test = 'baselinecoverage' in per_test_args[-1] \ - if self.per_test_coverage else False + is_baseline_test = ( + "baselinecoverage" in per_test_args[-1] + if self.per_test_coverage + else False + ) if executed_too_many_tests and not is_baseline_test: continue @@ -907,8 +1075,10 @@ class DesktopUnittest(TestingMixin, MercurialScript, MozbaseMixin, # Running tests has run out of time. That is okay! Stop running # them so that a task timeout is not triggered, and so that # (partial) results are made available in a timely manner. - self.info("TinderboxPrint: Running tests took too long: Not all tests " - "were executed.<br/>") + self.info( + "TinderboxPrint: Running tests took too long: Not all tests " + "were executed.<br/>" + ) # Signal per-test time exceeded, to break out of suites and # suite categories loops also. return False @@ -917,20 +1087,26 @@ class DesktopUnittest(TestingMixin, MercurialScript, MozbaseMixin, # otherwise updated at once, there probably is not enough time # to run all tests, and attempting to do so may cause other # problems, such as generating too much log output. - self.info("TinderboxPrint: Too many modified tests: Not all tests " - "were executed.<br/>") + self.info( + "TinderboxPrint: Too many modified tests: Not all tests " + "were executed.<br/>" + ) executed_too_many_tests = True executed_tests = executed_tests + 1 abs_base_cmd = self._query_abs_base_cmd(suite_category, suite) cmd = abs_base_cmd[:] - cmd.extend(self.query_options(options_list, - try_options, - str_format_values=replace_dict)) - cmd.extend(self.query_tests_args(tests_list, - try_tests, - str_format_values=replace_dict)) + cmd.extend( + self.query_options( + options_list, try_options, str_format_values=replace_dict + ) + ) + cmd.extend( + self.query_tests_args( + tests_list, try_tests, str_format_values=replace_dict + ) + ) final_cmd = copy.copy(cmd) final_cmd.extend(per_test_args) @@ -940,13 +1116,18 @@ class DesktopUnittest(TestingMixin, MercurialScript, MozbaseMixin, if self.per_test_coverage: self.set_coverage_env(final_env) - return_code = self.run_command(final_cmd, cwd=dirs['abs_work_dir'], - output_timeout=cmd_timeout, - output_parser=parser, - env=final_env) + return_code = self.run_command( + final_cmd, + cwd=dirs["abs_work_dir"], + output_timeout=cmd_timeout, + output_parser=parser, + env=final_env, + ) if self.per_test_coverage: - self.add_per_test_coverage_report(final_env, suite, per_test_args[-1]) + self.add_per_test_coverage_report( + final_env, suite, per_test_args[-1] + ) # mochitest, reftest, and xpcshell suites do not return # appropriate return codes. Therefore, we must parse the output @@ -959,35 +1140,42 @@ class DesktopUnittest(TestingMixin, MercurialScript, MozbaseMixin, # 3) checking to see if the return code is in success_codes success_codes = None - if (suite_category == 'reftest' - and '32bit' in platform.architecture() - and platform.system() == "Windows"): + if ( + suite_category == "reftest" + and "32bit" in platform.architecture() + and platform.system() == "Windows" + ): # see bug 1120644, 1526777, 1531499 success_codes = [1] - tbpl_status, log_level, summary = parser.evaluate_parser(return_code, - success_codes, - summary) + tbpl_status, log_level, summary = parser.evaluate_parser( + return_code, success_codes, summary + ) parser.append_tinderboxprint_line(suite_name) self.record_status(tbpl_status, level=log_level) if len(per_test_args) > 0: - self.log_per_test_status(per_test_args[-1], tbpl_status, log_level) + self.log_per_test_status( + per_test_args[-1], tbpl_status, log_level + ) if tbpl_status == TBPL_RETRY: self.info("Per-test run abandoned due to RETRY status") return False else: - self.log("The %s suite: %s ran with return status: %s" % - (suite_category, suite, tbpl_status), level=log_level) + self.log( + "The %s suite: %s ran with return status: %s" + % (suite_category, suite, tbpl_status), + level=log_level, + ) if executed_too_many_tests: return False else: - self.debug('There were no suites to run for %s' % suite_category) + self.debug("There were no suites to run for %s" % suite_category) return True # main {{{1 -if __name__ == '__main__': +if __name__ == "__main__": desktop_unittest = DesktopUnittest() desktop_unittest.run_and_exit() diff --git a/testing/mozharness/scripts/marionette.py b/testing/mozharness/scripts/marionette.py index 8fcb30d0f7c5..5b051fa6f4ee 100755 --- a/testing/mozharness/scripts/marionette.py +++ b/testing/mozharness/scripts/marionette.py @@ -23,179 +23,224 @@ from mozharness.mozilla.testing.testbase import TestingMixin, testing_config_opt from mozharness.mozilla.testing.unittest import TestSummaryOutputParserHelper from mozharness.mozilla.testing.codecoverage import ( CodeCoverageMixin, - code_coverage_config_options + code_coverage_config_options, ) from mozharness.mozilla.testing.errors import HarnessErrorList from mozharness.mozilla.structuredlog import StructuredOutputParser -class MarionetteTest(TestingMixin, MercurialScript, TransferMixin, - CodeCoverageMixin): - config_options = [[ - ["--application"], - {"action": "store", - "dest": "application", - "default": None, - "help": "application name of binary" - } - ], [ - ["--app-arg"], - {"action": "store", - "dest": "app_arg", - "default": None, - "help": "Optional command-line argument to pass to the browser" - } - ], [ - ["--marionette-address"], - {"action": "store", - "dest": "marionette_address", - "default": None, - "help": "The host:port of the Marionette server running inside Gecko. " - "Unused for emulator testing", - } - ], [ - ["--emulator"], - {"action": "store", - "type": "choice", - "choices": ['arm', 'x86'], - "dest": "emulator", - "default": None, - "help": "Use an emulator for testing", - } - ], [ - ["--test-manifest"], - {"action": "store", - "dest": "test_manifest", - "default": "unit-tests.ini", - "help": "Path to test manifest to run relative to the Marionette " - "tests directory", - } - ], [ - ["--total-chunks"], - {"action": "store", - "dest": "total_chunks", - "help": "Number of total chunks", - } - ], [ - ["--this-chunk"], - {"action": "store", - "dest": "this_chunk", - "help": "Number of this chunk", - } - ], [ - ["--setpref"], - {"action": "append", - "metavar": "PREF=VALUE", - "dest": "extra_prefs", - "default": [], - "help": "Extra user prefs.", - } - ], [ - ["--headless"], - {"action": "store_true", - "dest": "headless", - "default": False, - "help": "Run tests in headless mode.", - } - ], [ - ["--headless-width"], - {"action": "store", - "dest": "headless_width", - "default": "1600", - "help": "Specify headless virtual screen width (default: 1600).", - } - ], [ - ["--headless-height"], - {"action": "store", - "dest": "headless_height", - "default": "1200", - "help": "Specify headless virtual screen height (default: 1200).", - } - ], [ - ["--allow-software-gl-layers"], - {"action": "store_true", - "dest": "allow_software_gl_layers", - "default": False, - "help": "Permits a software GL implementation (such as LLVMPipe) to use the GL compositor." - } - ], [ - ["--enable-webrender"], - {"action": "store_true", - "dest": "enable_webrender", - "default": False, - "help": "Enable the WebRender compositor in Gecko." - } - ]] + copy.deepcopy(testing_config_options) \ +class MarionetteTest(TestingMixin, MercurialScript, TransferMixin, CodeCoverageMixin): + config_options = ( + [ + [ + ["--application"], + { + "action": "store", + "dest": "application", + "default": None, + "help": "application name of binary", + }, + ], + [ + ["--app-arg"], + { + "action": "store", + "dest": "app_arg", + "default": None, + "help": "Optional command-line argument to pass to the browser", + }, + ], + [ + ["--marionette-address"], + { + "action": "store", + "dest": "marionette_address", + "default": None, + "help": "The host:port of the Marionette server running inside Gecko. " + "Unused for emulator testing", + }, + ], + [ + ["--emulator"], + { + "action": "store", + "type": "choice", + "choices": ["arm", "x86"], + "dest": "emulator", + "default": None, + "help": "Use an emulator for testing", + }, + ], + [ + ["--test-manifest"], + { + "action": "store", + "dest": "test_manifest", + "default": "unit-tests.ini", + "help": "Path to test manifest to run relative to the Marionette " + "tests directory", + }, + ], + [ + ["--total-chunks"], + { + "action": "store", + "dest": "total_chunks", + "help": "Number of total chunks", + }, + ], + [ + ["--this-chunk"], + { + "action": "store", + "dest": "this_chunk", + "help": "Number of this chunk", + }, + ], + [ + ["--setpref"], + { + "action": "append", + "metavar": "PREF=VALUE", + "dest": "extra_prefs", + "default": [], + "help": "Extra user prefs.", + }, + ], + [ + ["--headless"], + { + "action": "store_true", + "dest": "headless", + "default": False, + "help": "Run tests in headless mode.", + }, + ], + [ + ["--headless-width"], + { + "action": "store", + "dest": "headless_width", + "default": "1600", + "help": "Specify headless virtual screen width (default: 1600).", + }, + ], + [ + ["--headless-height"], + { + "action": "store", + "dest": "headless_height", + "default": "1200", + "help": "Specify headless virtual screen height (default: 1200).", + }, + ], + [ + ["--allow-software-gl-layers"], + { + "action": "store_true", + "dest": "allow_software_gl_layers", + "default": False, + "help": "Permits a software GL implementation (such as LLVMPipe) to use the GL compositor.", # NOQA: E501 + }, + ], + [ + ["--enable-webrender"], + { + "action": "store_true", + "dest": "enable_webrender", + "default": False, + "help": "Enable the WebRender compositor in Gecko.", + }, + ], + ] + + copy.deepcopy(testing_config_options) + copy.deepcopy(code_coverage_config_options) + ) repos = [] def __init__(self, require_config_file=False): super(MarionetteTest, self).__init__( config_options=self.config_options, - all_actions=['clobber', - 'pull', - 'download-and-extract', - 'create-virtualenv', - 'install', - 'run-tests'], - default_actions=['clobber', - 'pull', - 'download-and-extract', - 'create-virtualenv', - 'install', - 'run-tests'], + all_actions=[ + "clobber", + "pull", + "download-and-extract", + "create-virtualenv", + "install", + "run-tests", + ], + default_actions=[ + "clobber", + "pull", + "download-and-extract", + "create-virtualenv", + "install", + "run-tests", + ], require_config_file=require_config_file, - config={'require_test_zip': True}) + config={"require_test_zip": True}, + ) # these are necessary since self.config is read only c = self.config - self.installer_url = c.get('installer_url') - self.installer_path = c.get('installer_path') - self.binary_path = c.get('binary_path') - self.test_url = c.get('test_url') - self.test_packages_url = c.get('test_packages_url') + self.installer_url = c.get("installer_url") + self.installer_path = c.get("installer_path") + self.binary_path = c.get("binary_path") + self.test_url = c.get("test_url") + self.test_packages_url = c.get("test_packages_url") - self.test_suite = self._get_test_suite(c.get('emulator')) + self.test_suite = self._get_test_suite(c.get("emulator")) if self.test_suite not in self.config["suite_definitions"]: self.fatal("{} is not defined in the config!".format(self.test_suite)) - if c.get('structured_output'): + if c.get("structured_output"): self.parser_class = StructuredOutputParser else: self.parser_class = TestSummaryOutputParserHelper def _pre_config_lock(self, rw_config): super(MarionetteTest, self)._pre_config_lock(rw_config) - if not self.config.get('emulator') and not self.config.get('marionette_address'): - self.fatal("You need to specify a --marionette-address for non-emulator tests! " - "(Try --marionette-address localhost:2828 )") + if not self.config.get("emulator") and not self.config.get( + "marionette_address" + ): + self.fatal( + "You need to specify a --marionette-address for non-emulator tests! " + "(Try --marionette-address localhost:2828 )" + ) def _query_tests_dir(self): dirs = self.query_abs_dirs() test_dir = self.config["suite_definitions"][self.test_suite]["testsdir"] - return os.path.join(dirs['abs_test_install_dir'], test_dir) + return os.path.join(dirs["abs_test_install_dir"], test_dir) def query_abs_dirs(self): if self.abs_dirs: return self.abs_dirs abs_dirs = super(MarionetteTest, self).query_abs_dirs() dirs = {} - dirs['abs_test_install_dir'] = os.path.join( - abs_dirs['abs_work_dir'], 'tests') - dirs['abs_marionette_dir'] = os.path.join( - dirs['abs_test_install_dir'], 'marionette', 'harness', 'marionette_harness') - dirs['abs_marionette_tests_dir'] = os.path.join( - dirs['abs_test_install_dir'], 'marionette', 'tests', 'testing', - 'marionette', 'harness', 'marionette_harness', 'tests') - dirs['abs_gecko_dir'] = os.path.join( - abs_dirs['abs_work_dir'], 'gecko') - dirs['abs_emulator_dir'] = os.path.join( - abs_dirs['abs_work_dir'], 'emulator') + dirs["abs_test_install_dir"] = os.path.join(abs_dirs["abs_work_dir"], "tests") + dirs["abs_marionette_dir"] = os.path.join( + dirs["abs_test_install_dir"], "marionette", "harness", "marionette_harness" + ) + dirs["abs_marionette_tests_dir"] = os.path.join( + dirs["abs_test_install_dir"], + "marionette", + "tests", + "testing", + "marionette", + "harness", + "marionette_harness", + "tests", + ) + dirs["abs_gecko_dir"] = os.path.join(abs_dirs["abs_work_dir"], "gecko") + dirs["abs_emulator_dir"] = os.path.join(abs_dirs["abs_work_dir"], "emulator") - dirs['abs_blob_upload_dir'] = os.path.join(abs_dirs['abs_work_dir'], 'blobber_upload_dir') + dirs["abs_blob_upload_dir"] = os.path.join( + abs_dirs["abs_work_dir"], "blobber_upload_dir" + ) for key in dirs.keys(): if key not in abs_dirs: @@ -203,12 +248,12 @@ class MarionetteTest(TestingMixin, MercurialScript, TransferMixin, self.abs_dirs = abs_dirs return self.abs_dirs - @PreScriptAction('create-virtualenv') + @PreScriptAction("create-virtualenv") def _configure_marionette_virtualenv(self, action): dirs = self.query_abs_dirs() - requirements = os.path.join(dirs['abs_test_install_dir'], - 'config', - 'marionette_requirements.txt') + requirements = os.path.join( + dirs["abs_test_install_dir"], "config", "marionette_requirements.txt" + ) if not os.path.isfile(requirements): self.fatal( "Could not find marionette requirements file: {}".format(requirements) @@ -221,27 +266,30 @@ class MarionetteTest(TestingMixin, MercurialScript, TransferMixin, Determine which in tree options group to use and return the appropriate key. """ - platform = 'emulator' if is_emulator else 'desktop' + platform = "emulator" if is_emulator else "desktop" # Currently running marionette on an emulator means webapi # tests. This method will need to change if this does. - testsuite = 'webapi' if is_emulator else 'marionette' - return '{}_{}'.format(testsuite, platform) + testsuite = "webapi" if is_emulator else "marionette" + return "{}_{}".format(testsuite, platform) def download_and_extract(self): super(MarionetteTest, self).download_and_extract() - if self.config.get('emulator'): + if self.config.get("emulator"): dirs = self.query_abs_dirs() - self.mkdir_p(dirs['abs_emulator_dir']) - tar = self.query_exe('tar', return_type='list') - self.run_command(tar + ['zxf', self.installer_path], - cwd=dirs['abs_emulator_dir'], - error_list=TarErrorList, - halt_on_failure=True, fatal_exit_code=3) + self.mkdir_p(dirs["abs_emulator_dir"]) + tar = self.query_exe("tar", return_type="list") + self.run_command( + tar + ["zxf", self.installer_path], + cwd=dirs["abs_emulator_dir"], + error_list=TarErrorList, + halt_on_failure=True, + fatal_exit_code=3, + ) def install(self): - if self.config.get('emulator'): + if self.config.get("emulator"): self.info("Emulator tests; skipping.") else: super(MarionetteTest, self).install() @@ -252,51 +300,50 @@ class MarionetteTest(TestingMixin, MercurialScript, TransferMixin, """ dirs = self.query_abs_dirs() - raw_log_file = os.path.join(dirs['abs_blob_upload_dir'], - 'marionette_raw.log') - error_summary_file = os.path.join(dirs['abs_blob_upload_dir'], - 'marionette_errorsummary.log') - html_report_file = os.path.join(dirs['abs_blob_upload_dir'], - 'report.html') + raw_log_file = os.path.join(dirs["abs_blob_upload_dir"], "marionette_raw.log") + error_summary_file = os.path.join( + dirs["abs_blob_upload_dir"], "marionette_errorsummary.log" + ) + html_report_file = os.path.join(dirs["abs_blob_upload_dir"], "report.html") config_fmt_args = { # emulator builds require a longer timeout - 'timeout': 60000 if self.config.get('emulator') else 10000, - 'profile': os.path.join(dirs['abs_work_dir'], 'profile'), - 'xml_output': os.path.join(dirs['abs_work_dir'], 'output.xml'), - 'html_output': os.path.join(dirs['abs_blob_upload_dir'], 'output.html'), - 'logcat_dir': dirs['abs_work_dir'], - 'emulator': 'arm', - 'symbols_path': self.symbols_path, - 'binary': self.binary_path, - 'address': self.config.get('marionette_address'), - 'raw_log_file': raw_log_file, - 'error_summary_file': error_summary_file, - 'html_report_file': html_report_file, - 'gecko_log': dirs["abs_blob_upload_dir"], - 'this_chunk': self.config.get('this_chunk', 1), - 'total_chunks': self.config.get('total_chunks', 1) + "timeout": 60000 if self.config.get("emulator") else 10000, + "profile": os.path.join(dirs["abs_work_dir"], "profile"), + "xml_output": os.path.join(dirs["abs_work_dir"], "output.xml"), + "html_output": os.path.join(dirs["abs_blob_upload_dir"], "output.html"), + "logcat_dir": dirs["abs_work_dir"], + "emulator": "arm", + "symbols_path": self.symbols_path, + "binary": self.binary_path, + "address": self.config.get("marionette_address"), + "raw_log_file": raw_log_file, + "error_summary_file": error_summary_file, + "html_report_file": html_report_file, + "gecko_log": dirs["abs_blob_upload_dir"], + "this_chunk": self.config.get("this_chunk", 1), + "total_chunks": self.config.get("total_chunks", 1), } self.info("The emulator type: %s" % config_fmt_args["emulator"]) # build the marionette command arguments - python = self.query_python_path('python') + python = self.query_python_path("python") - cmd = [python, '-u', os.path.join(dirs['abs_marionette_dir'], - 'runtests.py')] + cmd = [python, "-u", os.path.join(dirs["abs_marionette_dir"], "runtests.py")] - manifest = os.path.join(dirs['abs_marionette_tests_dir'], - self.config['test_manifest']) + manifest = os.path.join( + dirs["abs_marionette_tests_dir"], self.config["test_manifest"] + ) - if self.config.get('app_arg'): - config_fmt_args['app_arg'] = self.config['app_arg'] + if self.config.get("app_arg"): + config_fmt_args["app_arg"] = self.config["app_arg"] - if self.config['enable_webrender']: - cmd.append('--enable-webrender') + if self.config["enable_webrender"]: + cmd.append("--enable-webrender") - cmd.extend(['--setpref={}'.format(p) for p in self.config['extra_prefs']]) + cmd.extend(["--setpref={}".format(p) for p in self.config["extra_prefs"]]) - cmd.append('--gecko-log=-') + cmd.append("--gecko-log=-") if self.config.get("structured_output"): cmd.append("--log-raw=-") @@ -308,89 +355,98 @@ class MarionetteTest(TestingMixin, MercurialScript, TransferMixin, # Make sure that the logging directory exists self.fatal("Could not create blobber upload directory") - test_paths = json.loads(os.environ.get('MOZHARNESS_TEST_PATHS', '""')) + test_paths = json.loads(os.environ.get("MOZHARNESS_TEST_PATHS", '""')) - if test_paths and 'marionette' in test_paths: - paths = [os.path.join(dirs['abs_test_install_dir'], 'marionette', 'tests', p) - for p in test_paths['marionette']] + if test_paths and "marionette" in test_paths: + paths = [ + os.path.join(dirs["abs_test_install_dir"], "marionette", "tests", p) + for p in test_paths["marionette"] + ] cmd.extend(paths) else: cmd.append(manifest) try_options, try_tests = self.try_args("marionette") - cmd.extend(self.query_tests_args(try_tests, - str_format_values=config_fmt_args)) + cmd.extend(self.query_tests_args(try_tests, str_format_values=config_fmt_args)) env = {} if self.query_minidump_stackwalk(): - env['MINIDUMP_STACKWALK'] = self.minidump_stackwalk_path - env['MOZ_UPLOAD_DIR'] = self.query_abs_dirs()['abs_blob_upload_dir'] - env['MINIDUMP_SAVE_PATH'] = self.query_abs_dirs()['abs_blob_upload_dir'] - env['RUST_BACKTRACE'] = 'full' + env["MINIDUMP_STACKWALK"] = self.minidump_stackwalk_path + env["MOZ_UPLOAD_DIR"] = self.query_abs_dirs()["abs_blob_upload_dir"] + env["MINIDUMP_SAVE_PATH"] = self.query_abs_dirs()["abs_blob_upload_dir"] + env["RUST_BACKTRACE"] = "full" - if self.config['allow_software_gl_layers']: - env['MOZ_LAYERS_ALLOW_SOFTWARE_GL'] = '1' + if self.config["allow_software_gl_layers"]: + env["MOZ_LAYERS_ALLOW_SOFTWARE_GL"] = "1" - if self.config['headless']: - env['MOZ_HEADLESS'] = '1' - env['MOZ_HEADLESS_WIDTH'] = self.config['headless_width'] - env['MOZ_HEADLESS_HEIGHT'] = self.config['headless_height'] + if self.config["headless"]: + env["MOZ_HEADLESS"] = "1" + env["MOZ_HEADLESS_WIDTH"] = self.config["headless_width"] + env["MOZ_HEADLESS_HEIGHT"] = self.config["headless_height"] - if not os.path.isdir(env['MOZ_UPLOAD_DIR']): - self.mkdir_p(env['MOZ_UPLOAD_DIR']) + if not os.path.isdir(env["MOZ_UPLOAD_DIR"]): + self.mkdir_p(env["MOZ_UPLOAD_DIR"]) env = self.query_env(partial_env=env) try: cwd = self._query_tests_dir() except Exception as e: - self.fatal("Don't know how to run --test-suite '{0}': {1}!".format( - self.test_suite, e)) + self.fatal( + "Don't know how to run --test-suite '{0}': {1}!".format( + self.test_suite, e + ) + ) - marionette_parser = self.parser_class(config=self.config, - log_obj=self.log_obj, - error_list=BaseErrorList + HarnessErrorList, - strict=False) - return_code = self.run_command(cmd, - cwd=cwd, - output_timeout=1000, - output_parser=marionette_parser, - env=env) + marionette_parser = self.parser_class( + config=self.config, + log_obj=self.log_obj, + error_list=BaseErrorList + HarnessErrorList, + strict=False, + ) + return_code = self.run_command( + cmd, cwd=cwd, output_timeout=1000, output_parser=marionette_parser, env=env + ) level = INFO tbpl_status, log_level, summary = marionette_parser.evaluate_parser( - return_code=return_code) + return_code=return_code + ) marionette_parser.append_tinderboxprint_line("marionette") - qemu = os.path.join(dirs['abs_work_dir'], 'qemu.log') + qemu = os.path.join(dirs["abs_work_dir"], "qemu.log") if os.path.isfile(qemu): - self.copyfile(qemu, os.path.join(dirs['abs_blob_upload_dir'], - 'qemu.log')) + self.copyfile(qemu, os.path.join(dirs["abs_blob_upload_dir"], "qemu.log")) # dump logcat output if there were failures - if self.config.get('emulator'): - if marionette_parser.failed != "0" or 'T-FAIL' in marionette_parser.tsummary: - logcat = os.path.join(dirs['abs_work_dir'], 'emulator-5554.log') + if self.config.get("emulator"): + if ( + marionette_parser.failed != "0" + or "T-FAIL" in marionette_parser.tsummary + ): + logcat = os.path.join(dirs["abs_work_dir"], "emulator-5554.log") if os.access(logcat, os.F_OK): - self.info('dumping logcat') - self.run_command(['cat', logcat], error_list=LogcatErrorList) + self.info("dumping logcat") + self.run_command(["cat", logcat], error_list=LogcatErrorList) else: - self.info('no logcat file found') + self.info("no logcat file found") else: # .. or gecko.log if it exists - gecko_log = os.path.join(self.config['base_work_dir'], 'gecko.log') + gecko_log = os.path.join(self.config["base_work_dir"], "gecko.log") if os.access(gecko_log, os.F_OK): - self.info('dumping gecko.log') - self.run_command(['cat', gecko_log]) + self.info("dumping gecko.log") + self.run_command(["cat", gecko_log]) self.rmtree(gecko_log) else: - self.info('gecko.log not found') + self.info("gecko.log not found") - marionette_parser.print_summary('marionette') + marionette_parser.print_summary("marionette") - self.log("Marionette exited with return code %s: %s" % (return_code, tbpl_status), - level=level) + self.log( + "Marionette exited with return code %s: %s" % (return_code, tbpl_status), + level=level, + ) self.record_status(tbpl_status) -if __name__ == '__main__': +if __name__ == "__main__": marionetteTest = MarionetteTest() marionetteTest.run_and_exit() diff --git a/testing/mozharness/scripts/release/bouncer_check.py b/testing/mozharness/scripts/release/bouncer_check.py index 6cb0c3732294..4a6d5907ac13 100644 --- a/testing/mozharness/scripts/release/bouncer_check.py +++ b/testing/mozharness/scripts/release/bouncer_check.py @@ -23,43 +23,61 @@ BOUNCER_URL_PATTERN = "{bouncer_prefix}?product={product}&os={os}&lang={lang}" class BouncerCheck(BaseScript): config_options = [ - [["--version"], { - "dest": "version", - "help": "Version of release, eg: 39.0b5", - }], - [["--product-field"], { - "dest": "product_field", - "help": "Version field of release from product details, eg: LATEST_FIREFOX_VERSION", - }], - [["--products-url"], { - "dest": "products_url", - "help": "The URL of the current Firefox product versions", - "type": str, - "default": "https://product-details.mozilla.org/1.0/firefox_versions.json", - }], - [["--previous-version"], { - "dest": "prev_versions", - "action": "extend", - "help": "Previous version(s)", - }], - [["--locale"], { - "dest": "locales", - # Intentionally limited for several reasons: - # 1) faster to check - # 2) do not need to deal with situation when a new locale - # introduced and we do not have partials for it yet - # 3) it mimics the old Sentry behaviour that worked for ages - # 4) no need to handle ja-JP-mac - "default": ["en-US", "de", "it", "zh-TW"], - "action": "append", - "help": "List of locales to check.", - }], - [["-j", "--parallelization"], { - "dest": "parallelization", - "default": 20, - "type": int, - "help": "Number of HTTP sessions running in parallel", - }], + [ + ["--version"], + { + "dest": "version", + "help": "Version of release, eg: 39.0b5", + }, + ], + [ + ["--product-field"], + { + "dest": "product_field", + "help": "Version field of release from product details, eg: LATEST_FIREFOX_VERSION", # NOQA: E501 + }, + ], + [ + ["--products-url"], + { + "dest": "products_url", + "help": "The URL of the current Firefox product versions", + "type": str, + "default": "https://product-details.mozilla.org/1.0/firefox_versions.json", + }, + ], + [ + ["--previous-version"], + { + "dest": "prev_versions", + "action": "extend", + "help": "Previous version(s)", + }, + ], + [ + ["--locale"], + { + "dest": "locales", + # Intentionally limited for several reasons: + # 1) faster to check + # 2) do not need to deal with situation when a new locale + # introduced and we do not have partials for it yet + # 3) it mimics the old Sentry behaviour that worked for ages + # 4) no need to handle ja-JP-mac + "default": ["en-US", "de", "it", "zh-TW"], + "action": "append", + "help": "List of locales to check.", + }, + ], + [ + ["-j", "--parallelization"], + { + "dest": "parallelization", + "default": 20, + "type": int, + "help": "Number of HTTP sessions running in parallel", + }, + ], ] def __init__(self, require_config_file=True): @@ -68,10 +86,10 @@ class BouncerCheck(BaseScript): require_config_file=require_config_file, config={ "cdn_urls": [ - 'download-installer.cdn.mozilla.net', - 'download.cdn.mozilla.net', - 'download.mozilla.org', - 'archive.mozilla.org', + "download-installer.cdn.mozilla.net", + "download.cdn.mozilla.net", + "download.mozilla.org", + "archive.mozilla.org", ], }, all_actions=[ @@ -90,14 +108,15 @@ class BouncerCheck(BaseScript): firefox_versions = self.load_json_url(self.config["products_url"]) - if self.config['product_field'] not in firefox_versions: - self.fatal('Unknown Firefox label: {}'.format(self.config['product_field'])) + if self.config["product_field"] not in firefox_versions: + self.fatal("Unknown Firefox label: {}".format(self.config["product_field"])) self.config["version"] = firefox_versions[self.config["product_field"]] self.log("Set Firefox version {}".format(self.config["version"])) def check_url(self, session, url): from redo import retry from requests.exceptions import HTTPError + try: from urllib.parse import urlparse except ImportError: @@ -114,12 +133,12 @@ class BouncerCheck(BaseScript): raise final_url = urlparse(r.url) - if final_url.scheme != 'https': - self.error('FAIL: URL scheme is not https: {}'.format(r.url)) + if final_url.scheme != "https": + self.error("FAIL: URL scheme is not https: {}".format(r.url)) self.return_code = EXIT_STATUS_DICT[TBPL_FAILURE] - if final_url.netloc not in self.config['cdn_urls']: - self.error('FAIL: host not in allowed locations: {}'.format(r.url)) + if final_url.netloc not in self.config["cdn_urls"]: + self.error("FAIL: host not in allowed locations: {}".format(r.url)) self.return_code = EXIT_STATUS_DICT[TBPL_FAILURE] try: @@ -148,8 +167,10 @@ class BouncerCheck(BaseScript): if not product["check_uptake"]: continue for prev_version in self.config.get("prev_versions", []): - product_name = product["product-name"] % {"version": self.config["version"], - "prev_version": prev_version} + product_name = product["product-name"] % { + "version": self.config["version"], + "prev_version": prev_version, + } for bouncer_platform in product["platforms"]: for locale in self.config["locales"]: url = BOUNCER_URL_PATTERN.format( @@ -163,12 +184,14 @@ class BouncerCheck(BaseScript): def check_bouncer(self): import requests import concurrent.futures as futures + session = requests.Session() http_adapter = requests.adapters.HTTPAdapter( pool_connections=self.config["parallelization"], - pool_maxsize=self.config["parallelization"]) - session.mount('https://', http_adapter) - session.mount('http://', http_adapter) + pool_maxsize=self.config["parallelization"], + ) + session.mount("https://", http_adapter) + session.mount("http://", http_adapter) with futures.ThreadPoolExecutor(self.config["parallelization"]) as e: fs = [] @@ -178,5 +201,5 @@ class BouncerCheck(BaseScript): f.result() -if __name__ == '__main__': +if __name__ == "__main__": BouncerCheck().run_and_exit() diff --git a/testing/mozharness/scripts/release/update-verify-config-creator.py b/testing/mozharness/scripts/release/update-verify-config-creator.py index 90515489479f..21cd1a3e629b 100644 --- a/testing/mozharness/scripts/release/update-verify-config-creator.py +++ b/testing/mozharness/scripts/release/update-verify-config-creator.py @@ -35,143 +35,223 @@ def is_triangualar(x): >>> all(not is_triangualar(x) for x in [4, 5, 8, 9, 11, 17, 25, 29, 39, 44, 59, 61, 72, 98, 112]) True """ - n = (math.sqrt(8*x + 1) - 1)/2 + n = (math.sqrt(8 * x + 1) - 1) / 2 return n == int(n) class UpdateVerifyConfigCreator(BaseScript): config_options = [ - [["--product"], { - "dest": "product", - "help": "Product being tested, as used in the update URL and filenames. Eg: firefox", - }], - [["--stage-product"], { - "dest": "stage_product", - "help": "Product being tested, as used in stage directories and ship it" - "If not passed this is assumed to be the same as product." - }], - [["--app-name"], { - "dest": "app_name", - "help": "App name being tested. Eg: browser", - }], - [["--branch-prefix"], { - "dest": "branch_prefix", - "help": "Prefix of release branch names. Eg: mozilla, comm", - }], - [["--channel"], { - "dest": "channel", - "help": "Channel to run update verify against", - }], - [["--aus-server"], { - "dest": "aus_server", - "default": "https://aus5.mozilla.org", - "help": "AUS server to run update verify against", - }], - [["--to-version"], { - "dest": "to_version", - "help": "The version of the release being updated to. Eg: 59.0b5", - }], - [["--to-app-version"], { - "dest": "to_app_version", - "help": "The in-app version of the release being updated to. Eg: 59.0", - }], - [["--to-display-version"], { - "dest": "to_display_version", - "help": "The human-readable version of the release being updated to. Eg: 59.0 Beta 9", - }], - [["--to-build-number"], { - "dest": "to_build_number", - "help": "The build number of the release being updated to", - }], - [["--to-buildid"], { - "dest": "to_buildid", - "help": "The buildid of the release being updated to", - }], - [["--to-revision"], { - "dest": "to_revision", - "help": "The revision that the release being updated to was built against", - }], - [["--partial-version"], { - "dest": "partial_versions", - "default": [], - "action": "append", - "help": "A previous release version that is expected to receive a partial update. " - "Eg: 59.0b4. May be specified multiple times." - }], - [["--last-watershed"], { - "dest": "last_watershed", - "help": "The earliest version to include in the update verify config. Eg: 57.0b10", - }], - [["--include-version"], { - "dest": "include_versions", - "default": [], - "action": "append", - "help": "Only include versions that match one of these regexes. " - "May be passed multiple times", - }], - [["--mar-channel-id-override"], { - "dest": "mar_channel_id_options", - "default": [], - "action": "append", - "help": "A version regex and channel id string to override those versions with." - "Eg: ^\\d+\\.\\d+(\\.\\d+)?$,firefox-mozilla-beta,firefox-mozilla-release " - "will set accepted mar channel ids to 'firefox-mozilla-beta' and " - "'firefox-mozilla-release for x.y and x.y.z versions. " - "May be passed multiple times" - }], - [["--override-certs"], { - "dest": "override_certs", - "default": None, - "help": "Certs to override the updater with prior to running update verify." - "If passed, should be one of: dep, nightly, release" - "If not passed, no certificate overriding will be configured" - }], - [["--platform"], { - "dest": "platform", - "help": "The platform to generate the update verify config for, in FTP-style", - }], - [["--updater-platform"], { - "dest": "updater_platform", - "help": "The platform to run the updater on, in FTP-style." - "If not specified, this is assumed to be the same as platform", - }], - [["--archive-prefix"], { - "dest": "archive_prefix", - "help": "The server/path to pull the current release from. " - "Eg: https://archive.mozilla.org/pub", - }], - [["--previous-archive-prefix"], { - "dest": "previous_archive_prefix", - "help": "The server/path to pull the previous releases from" - "If not specified, this is assumed to be the same as --archive-prefix" - }], - [["--repo-path"], { - "dest": "repo_path", - "help": "The repository (relative to the hg server root) that the current release was " - "built from Eg: releases/mozilla-beta" - }], - [["--output-file"], { - "dest": "output_file", - "help": "Where to write the update verify config to", - }], - [["--product-details-server"], { - "dest": "product_details_server", - "default": "https://product-details.mozilla.org", - "help": "Product Details server to pull previous release info from. " - "Using anything other than the production server is likely to " - "cause issues with update verify." - }], - [["--hg-server"], { - "dest": "hg_server", - "default": "https://hg.mozilla.org", - "help": "Mercurial server to pull various previous and current version info from", - }], - [["--full-check-locale"], { - "dest": "full_check_locales", - "default": ["de", "en-US", "ru"], - "action": "append", - "help": "A list of locales to generate full update verify checks for", - }], + [ + ["--product"], + { + "dest": "product", + "help": "Product being tested, as used in the update URL and filenames. Eg: firefox", # NOQA: E501 + }, + ], + [ + ["--stage-product"], + { + "dest": "stage_product", + "help": "Product being tested, as used in stage directories and ship it" + "If not passed this is assumed to be the same as product.", + }, + ], + [ + ["--app-name"], + { + "dest": "app_name", + "help": "App name being tested. Eg: browser", + }, + ], + [ + ["--branch-prefix"], + { + "dest": "branch_prefix", + "help": "Prefix of release branch names. Eg: mozilla, comm", + }, + ], + [ + ["--channel"], + { + "dest": "channel", + "help": "Channel to run update verify against", + }, + ], + [ + ["--aus-server"], + { + "dest": "aus_server", + "default": "https://aus5.mozilla.org", + "help": "AUS server to run update verify against", + }, + ], + [ + ["--to-version"], + { + "dest": "to_version", + "help": "The version of the release being updated to. Eg: 59.0b5", + }, + ], + [ + ["--to-app-version"], + { + "dest": "to_app_version", + "help": "The in-app version of the release being updated to. Eg: 59.0", + }, + ], + [ + ["--to-display-version"], + { + "dest": "to_display_version", + "help": "The human-readable version of the release being updated to. Eg: 59.0 Beta 9", # NOQA: E501 + }, + ], + [ + ["--to-build-number"], + { + "dest": "to_build_number", + "help": "The build number of the release being updated to", + }, + ], + [ + ["--to-buildid"], + { + "dest": "to_buildid", + "help": "The buildid of the release being updated to", + }, + ], + [ + ["--to-revision"], + { + "dest": "to_revision", + "help": "The revision that the release being updated to was built against", + }, + ], + [ + ["--partial-version"], + { + "dest": "partial_versions", + "default": [], + "action": "append", + "help": "A previous release version that is expected to receive a partial update. " + "Eg: 59.0b4. May be specified multiple times.", + }, + ], + [ + ["--last-watershed"], + { + "dest": "last_watershed", + "help": "The earliest version to include in the update verify config. Eg: 57.0b10", + }, + ], + [ + ["--include-version"], + { + "dest": "include_versions", + "default": [], + "action": "append", + "help": "Only include versions that match one of these regexes. " + "May be passed multiple times", + }, + ], + [ + ["--mar-channel-id-override"], + { + "dest": "mar_channel_id_options", + "default": [], + "action": "append", + "help": "A version regex and channel id string to override those versions with." + "Eg: ^\\d+\\.\\d+(\\.\\d+)?$,firefox-mozilla-beta,firefox-mozilla-release " + "will set accepted mar channel ids to 'firefox-mozilla-beta' and " + "'firefox-mozilla-release for x.y and x.y.z versions. " + "May be passed multiple times", + }, + ], + [ + ["--override-certs"], + { + "dest": "override_certs", + "default": None, + "help": "Certs to override the updater with prior to running update verify." + "If passed, should be one of: dep, nightly, release" + "If not passed, no certificate overriding will be configured", + }, + ], + [ + ["--platform"], + { + "dest": "platform", + "help": "The platform to generate the update verify config for, in FTP-style", + }, + ], + [ + ["--updater-platform"], + { + "dest": "updater_platform", + "help": "The platform to run the updater on, in FTP-style." + "If not specified, this is assumed to be the same as platform", + }, + ], + [ + ["--archive-prefix"], + { + "dest": "archive_prefix", + "help": "The server/path to pull the current release from. " + "Eg: https://archive.mozilla.org/pub", + }, + ], + [ + ["--previous-archive-prefix"], + { + "dest": "previous_archive_prefix", + "help": "The server/path to pull the previous releases from" + "If not specified, this is assumed to be the same as --archive-prefix", + }, + ], + [ + ["--repo-path"], + { + "dest": "repo_path", + "help": ( + "The repository (relative to the hg server root) that the current " + "release was built from Eg: releases/mozilla-beta" + ), + }, + ], + [ + ["--output-file"], + { + "dest": "output_file", + "help": "Where to write the update verify config to", + }, + ], + [ + ["--product-details-server"], + { + "dest": "product_details_server", + "default": "https://product-details.mozilla.org", + "help": "Product Details server to pull previous release info from. " + "Using anything other than the production server is likely to " + "cause issues with update verify.", + }, + ], + [ + ["--hg-server"], + { + "dest": "hg_server", + "default": "https://hg.mozilla.org", + "help": "Mercurial server to pull various previous and current version info from", + }, + ], + [ + ["--full-check-locale"], + { + "dest": "full_check_locales", + "default": ["de", "en-US", "ru"], + "action": "append", + "help": "A list of locales to generate full update verify checks for", + }, + ], ] def __init__(self): @@ -242,12 +322,12 @@ class UpdateVerifyConfigCreator(BaseScript): "WARNING", ) releases = json.load(ret)["releases"] - for release_name, release_info in \ - reversed(sorted(releases.items(), - key=lambda x: MozillaVersion(x[1]['version']))): + for release_name, release_info in reversed( + sorted(releases.items(), key=lambda x: MozillaVersion(x[1]["version"])) + ): # we need to use releases_name instead of release_info since esr # string is included in the name. later we rely on this. - product, version = release_name.split('-', 1) + product, version = release_name.split("-", 1) tag = "{}_{}_RELEASE".format(product.upper(), version.replace(".", "_")) # Exclude any releases that don't match one of our include version @@ -257,28 +337,41 @@ class UpdateVerifyConfigCreator(BaseScript): if re.match(v, version): break else: - self.log("Skipping release whose version doesn't match any " - "include_version pattern: %s" % release_name, - level=INFO) + self.log( + "Skipping release whose version doesn't match any " + "include_version pattern: %s" % release_name, + level=INFO, + ) continue # We also have to trim out previous releases that aren't in the same # product line, too old, etc. if self.config["stage_product"] != product: - self.log("Skipping release that doesn't match product name: %s" % release_name, - level=INFO) + self.log( + "Skipping release that doesn't match product name: %s" + % release_name, + level=INFO, + ) continue if MozillaVersion(version) < MozillaVersion(self.config["last_watershed"]): - self.log("Skipping release that's behind the last watershed: %s" % release_name, - level=INFO) + self.log( + "Skipping release that's behind the last watershed: %s" + % release_name, + level=INFO, + ) continue if version == self.config["to_version"]: - self.log("Skipping release that is the same as to version: %s" % release_name, - level=INFO) + self.log( + "Skipping release that is the same as to version: %s" + % release_name, + level=INFO, + ) continue if MozillaVersion(version) > MozillaVersion(self.config["to_version"]): - self.log("Skipping release that's newer than to version: %s" % release_name, - level=INFO) + self.log( + "Skipping release that's newer than to version: %s" % release_name, + level=INFO, + ) continue if version in self.update_paths: @@ -293,9 +386,11 @@ class UpdateVerifyConfigCreator(BaseScript): version, release_info["build_number"], ), - ftp2infoFile(self.config["platform"]) + ftp2infoFile(self.config["platform"]), + ) + self.log( + "Retrieving buildid from info file: %s" % info_file_url, level=DEBUG ) - self.log("Retrieving buildid from info file: %s" % info_file_url, level=DEBUG) ret = self._retry_download(info_file_url, "WARNING") buildID = ret.read().split(b"=")[1].strip().decode("utf-8") @@ -320,8 +415,12 @@ class UpdateVerifyConfigCreator(BaseScript): self.config["app_name"], ), ) - app_version = self._retry_download(app_version_url, "WARNING").read() \ - .strip().decode("utf-8") + app_version = ( + self._retry_download(app_version_url, "WARNING") + .read() + .strip() + .decode("utf-8") + ) self.log("Adding {} to update paths".format(version), level=INFO) self.update_paths[version] = { @@ -329,7 +428,9 @@ class UpdateVerifyConfigCreator(BaseScript): "locales": getPlatformLocales(shipped_locales, self.config["platform"]), "buildID": buildID, } - for pattern, mar_channel_ids in self.config["mar_channel_id_overrides"].items(): + for pattern, mar_channel_ids in self.config[ + "mar_channel_id_overrides" + ].items(): if re.match(pattern, version): self.update_paths[version]["marChannelIds"] = mar_channel_ids @@ -340,15 +441,14 @@ class UpdateVerifyConfigCreator(BaseScript): if self.update_paths: self.log("Found update paths:", level=DEBUG) self.log(pprint.pformat(self.update_paths), level=DEBUG) - elif ( - GeckoVersion.parse(self.config["to_version"]) - <= GeckoVersion.parse(self.config["last_watershed"]) + elif GeckoVersion.parse(self.config["to_version"]) <= GeckoVersion.parse( + self.config["last_watershed"] ): self.log( "Didn't find any update paths, but to_version {} is before the last_" "watershed {}, generating empty config".format( - self.config['to_version'], - self.config['last_watershed'], + self.config["to_version"], + self.config["last_watershed"], ), level=WARNING, ) @@ -359,17 +459,24 @@ class UpdateVerifyConfigCreator(BaseScript): from mozrelease.l10n import getPlatformLocales from mozrelease.platforms import ftp2updatePlatforms from mozrelease.update_verify import UpdateVerifyConfig - from mozrelease.paths import getCandidatesDir, getReleasesDir, getReleaseInstallerPath + from mozrelease.paths import ( + getCandidatesDir, + getReleasesDir, + getReleaseInstallerPath, + ) from mozrelease.versions import getPrettyVersion candidates_dir = getCandidatesDir( - self.config["stage_product"], self.config["to_version"], + self.config["stage_product"], + self.config["to_version"], self.config["to_build_number"], ) to_ = getReleaseInstallerPath( - self.config["product"], self.config["product"].title(), - self.config["to_version"], self.config["platform"], - locale="%locale%" + self.config["product"], + self.config["product"].title(), + self.config["to_version"], + self.config["platform"], + locale="%locale%", ) to_path = "{}/{}".format(candidates_dir, to_) @@ -378,8 +485,10 @@ class UpdateVerifyConfigCreator(BaseScript): to_display_version = getPrettyVersion(self.config["to_version"]) self.update_verify_config = UpdateVerifyConfig( - product=self.config["product"].title(), channel=self.config["channel"], - aus_server=self.config["aus_server"], to=to_path, + product=self.config["product"].title(), + channel=self.config["channel"], + aus_server=self.config["aus_server"], + to=to_path, to_build_id=self.config["to_buildid"], to_app_version=self.config["to_app_version"], to_display_version=to_display_version, @@ -394,9 +503,15 @@ class UpdateVerifyConfigCreator(BaseScript): self.config["app_name"], ), ) - to_shipped_locales = self._retry_download(to_shipped_locales_url, "WARNING") \ - .read().strip().decode("utf-8") - to_locales = set(getPlatformLocales(to_shipped_locales, self.config["platform"])) + to_shipped_locales = ( + self._retry_download(to_shipped_locales_url, "WARNING") + .read() + .strip() + .decode("utf-8") + ) + to_locales = set( + getPlatformLocales(to_shipped_locales, self.config["platform"]) + ) completes_only_index = 0 for fromVersion in reversed(sorted(self.update_paths, key=LooseVersion)): @@ -404,61 +519,76 @@ class UpdateVerifyConfigCreator(BaseScript): locales = sorted(list(set(from_["locales"]).intersection(to_locales))) appVersion = from_["appVersion"] build_id = from_["buildID"] - mar_channel_IDs = from_.get('marChannelIds') + mar_channel_IDs = from_.get("marChannelIds") # Use new build targets for Windows, but only on compatible # versions (42+). See bug 1185456 for additional context. - if self.config["platform"] not in ("win32", "win64") or \ - LooseVersion(fromVersion) < LooseVersion("42.0"): + if self.config["platform"] not in ("win32", "win64") or LooseVersion( + fromVersion + ) < LooseVersion("42.0"): update_platform = ftp2updatePlatforms(self.config["platform"])[0] else: update_platform = ftp2updatePlatforms(self.config["platform"])[1] - release_dir = getReleasesDir( - self.config["stage_product"], fromVersion - ) + release_dir = getReleasesDir(self.config["stage_product"], fromVersion) path_ = getReleaseInstallerPath( - self.config["product"], self.config["product"].title(), - fromVersion, self.config["platform"], locale="%locale%", + self.config["product"], + self.config["product"].title(), + fromVersion, + self.config["platform"], + locale="%locale%", ) from_path = "{}/{}".format(release_dir, path_) updater_package = "{}/{}".format( release_dir, getReleaseInstallerPath( - self.config["product"], self.config["product"].title(), - fromVersion, self.config["updater_platform"], + self.config["product"], + self.config["product"].title(), + fromVersion, + self.config["updater_platform"], locale="%locale%", - ) + ), ) # Exclude locales being full checked - quick_check_locales = [l for l in locales - if l not in self.config["full_check_locales"]] + quick_check_locales = [ + l for l in locales if l not in self.config["full_check_locales"] + ] # Get the intersection of from and to full_check_locales - this_full_check_locales = [l for l in self.config["full_check_locales"] - if l in locales] + this_full_check_locales = [ + l for l in self.config["full_check_locales"] if l in locales + ] if fromVersion in self.config["partial_versions"]: - self.info("Generating configs for partial update checks for %s" % fromVersion) + self.info( + "Generating configs for partial update checks for %s" % fromVersion + ) self.update_verify_config.addRelease( - release=appVersion, build_id=build_id, locales=locales, - patch_types=["complete", "partial"], from_path=from_path, + release=appVersion, + build_id=build_id, + locales=locales, + patch_types=["complete", "partial"], + from_path=from_path, ftp_server_from=self.config["previous_archive_prefix"], ftp_server_to=self.config["archive_prefix"], - mar_channel_IDs=mar_channel_IDs, platform=update_platform, - updater_package=updater_package + mar_channel_IDs=mar_channel_IDs, + platform=update_platform, + updater_package=updater_package, ) else: if this_full_check_locales and is_triangualar(completes_only_index): self.info("Generating full check configs for %s" % fromVersion) self.update_verify_config.addRelease( - release=appVersion, build_id=build_id, locales=this_full_check_locales, + release=appVersion, + build_id=build_id, + locales=this_full_check_locales, from_path=from_path, ftp_server_from=self.config["previous_archive_prefix"], ftp_server_to=self.config["archive_prefix"], - mar_channel_IDs=mar_channel_IDs, platform=update_platform, - updater_package=updater_package + mar_channel_IDs=mar_channel_IDs, + platform=update_platform, + updater_package=updater_package, ) # Quick test for other locales, no download if len(quick_check_locales) > 0: @@ -470,8 +600,10 @@ class UpdateVerifyConfigCreator(BaseScript): # Excluding full check locales from the quick check _locales = quick_check_locales self.update_verify_config.addRelease( - release=appVersion, build_id=build_id, - locales=_locales, platform=update_platform + release=appVersion, + build_id=build_id, + locales=_locales, + platform=update_platform, ) completes_only_index += 1 diff --git a/testing/talos/talos/test.py b/testing/talos/talos/test.py index 60f01f5afd6a..28a777cdffdf 100644 --- a/testing/talos/talos/test.py +++ b/testing/talos/talos/test.py @@ -13,12 +13,14 @@ _TESTS = {} # internal dict of Talos test classes def register_test(): """Decorator to register Talos test classes""" + def wrapper(klass): assert issubclass(klass, Test) assert klass.name() not in _TESTS _TESTS[klass.name()] = klass return klass + return wrapper @@ -29,6 +31,7 @@ def test_dict(): class Test(object): """abstract base class for a Talos test case""" + __test__ = False # not pytest cycles = None # number of cycles @@ -37,7 +40,7 @@ class Test(object): filters = filter.ignore_first.prepare(1) + filter.median.prepare() lower_is_better = True alert_threshold = 2.0 - perfherder_framework = 'talos' + perfherder_framework = "talos" subtest_alerts = False suite_should_alert = True @@ -64,7 +67,7 @@ class Test(object): """ returns a list of 2-tuples """ - retval = [('name', self.name())] + retval = [("name", self.name())] for key in self.keys: value = getattr(self, key, None) if value is not None: @@ -78,8 +81,8 @@ class Test(object): key, value = items.pop(0) lines = ["- %s: %s" % (key, value)] for key, value in items: - lines.append(' %s: %s' % (key, value)) - return '\n'.join(lines) + lines.append(" %s: %s" % (key, value)) + return "\n".join(lines) # ts-style startup tests (ts, twinopen, ts_cold, etc) @@ -87,47 +90,48 @@ class Test(object): # and taking an average of the remaining numbers. class TsBase(Test): """abstract base class for ts-style tests""" + keys = [ - 'url', - 'url_timestamp', - 'timeout', - 'cycles', - 'profile_path', # The path containing the template profile. This - # directory is copied to the temporary profile during - # initialization of the test. If some of the files may - # be overwritten by Firefox and need to be reinstalled - # before each pass, use key |reinstall| - 'gecko_profile', - 'gecko_profile_interval', - 'gecko_profile_entries', - 'gecko_profile_startup', - 'preferences', - 'xperf_counters', - 'xperf_providers', - 'xperf_user_providers', - 'xperf_stackwalk', - 'tpmozafterpaint', - 'fnbpaint', - 'tphero', - 'tpmanifest', - 'profile', - 'firstpaint', - 'userready', - 'testeventmap', - 'base_vs_ref', - 'extensions', - 'filters', - 'setup', - 'cleanup', - 'webextensions', - 'webextensions_folder', - 'reinstall', # A list of files from the profile directory that - # should be copied to the temporary profile prior to - # running each cycle, to avoid one cycle overwriting - # the data used by the next another cycle (may be used - # e.g. for sessionstore.js to ensure that all cycles - # use the exact same sessionstore.js, rather than a - # more recent copy). + "url", + "url_timestamp", + "timeout", + "cycles", + "profile_path", # The path containing the template profile. This + # directory is copied to the temporary profile during + # initialization of the test. If some of the files may + # be overwritten by Firefox and need to be reinstalled + # before each pass, use key |reinstall| + "gecko_profile", + "gecko_profile_interval", + "gecko_profile_entries", + "gecko_profile_startup", + "preferences", + "xperf_counters", + "xperf_providers", + "xperf_user_providers", + "xperf_stackwalk", + "tpmozafterpaint", + "fnbpaint", + "tphero", + "tpmanifest", + "profile", + "firstpaint", + "userready", + "testeventmap", + "base_vs_ref", + "extensions", + "filters", + "setup", + "cleanup", + "webextensions", + "webextensions_folder", + "reinstall", # A list of files from the profile directory that + # should be copied to the temporary profile prior to + # running each cycle, to avoid one cycle overwriting + # the data used by the next another cycle (may be used + # e.g. for sessionstore.js to ensure that all cycles + # use the exact same sessionstore.js, rather than a + # more recent copy). ] def __init__(self, **kw): @@ -139,7 +143,7 @@ class TsBase(Test): if not hasattr(self, "preferences"): self.preferences = { - BLOCKLIST_PREF: True, + BLOCKLIST_PREF: True, } elif BLOCKLIST_PREF not in self.preferences: self.preferences[BLOCKLIST_PREF] = True @@ -152,24 +156,25 @@ class ts_paint(TsBase): waits for [MozAfterPaint and onLoad] to fire, then records the end time and calculates the time to startup. """ + cycles = 20 timeout = 150 gecko_profile_startup = True gecko_profile_entries = 10000000 - url = 'startup_test/tspaint_test.html' + url = "startup_test/tspaint_test.html" xperf_counters = [] win7_counters = [] filters = filter.ignore_first.prepare(1) + filter.median.prepare() tpmozafterpaint = True mainthread = False responsiveness = False - unit = 'ms' + unit = "ms" @register_test() class ts_paint_webext(ts_paint): - webextensions = '${talos}/webextensions/dummy/dummy.xpi' - preferences = {'xpinstall.signatures.required': False} + webextensions = "${talos}/webextensions/dummy/dummy.xpi" + preferences = {"xpinstall.signatures.required": False} @register_test() @@ -177,12 +182,13 @@ class ts_paint_heavy(ts_paint): """ ts_paint test ran against a heavy-user profile """ - profile = 'simple' + + profile = "simple" @register_test() class ts_paint_flex(ts_paint): - preferences = {'layout.css.emulate-moz-box-with-flex': True} + preferences = {"layout.css.emulate-moz-box-with-flex": True} @register_test() @@ -192,12 +198,13 @@ class startup_about_home_paint(ts_paint): disabled, to more accurately simulate startup when the cache does not exist. """ + url = None cycles = 20 - extensions = ['${talos}/startup_test/startup_about_home_paint/addon'] - tpmanifest = '${talos}/startup_test/startup_about_home_paint/startup_about_home_paint.manifest' + extensions = ["${talos}/startup_test/startup_about_home_paint/addon"] + tpmanifest = "${talos}/startup_test/startup_about_home_paint/startup_about_home_paint.manifest" preferences = { - 'browser.startup.homepage.abouthome_cache.enabled': False, + "browser.startup.homepage.abouthome_cache.enabled": False, } @@ -207,12 +214,13 @@ class startup_about_home_paint_cached(ts_paint): Tests loading about:home on startup with the about:home startup cache enabled. """ + url = None cycles = 20 - extensions = ['${talos}/startup_test/startup_about_home_paint/addon'] - tpmanifest = '${talos}/startup_test/startup_about_home_paint/startup_about_home_paint.manifest' + extensions = ["${talos}/startup_test/startup_about_home_paint/addon"] + tpmanifest = "${talos}/startup_test/startup_about_home_paint/startup_about_home_paint.manifest" preferences = { - 'browser.startup.homepage.abouthome_cache.enabled': True, + "browser.startup.homepage.abouthome_cache.enabled": True, } @@ -221,11 +229,11 @@ class startup_about_home_paint_realworld_webextensions(ts_paint): url = None cycles = 20 extensions = [ - '${talos}/startup_test/startup_about_home_paint/addon', - '${talos}/getinfooffline' + "${talos}/startup_test/startup_about_home_paint/addon", + "${talos}/getinfooffline", ] - tpmanifest = '${talos}/startup_test/startup_about_home_paint/startup_about_home_paint.manifest' - webextensions_folder = '${talos}/webextensions' + tpmanifest = "${talos}/startup_test/startup_about_home_paint/startup_about_home_paint.manifest" + webextensions_folder = "${talos}/webextensions" @register_test() @@ -237,18 +245,19 @@ class sessionrestore(TsBase): 2. Launch Firefox. 3. Measure the delta between firstPaint and sessionRestored. """ - extensions = ['${talos}/startup_test/sessionrestore/addon'] + + extensions = ["${talos}/startup_test/sessionrestore/addon"] cycles = 10 timeout = 900 gecko_profile_startup = True gecko_profile_entries = 10000000 - profile_path = '${talos}/startup_test/sessionrestore/profile' - reinstall = ['sessionstore.jsonlz4', 'sessionstore.js', 'sessionCheckpoints.json'] + profile_path = "${talos}/startup_test/sessionrestore/profile" + reinstall = ["sessionstore.jsonlz4", "sessionstore.js", "sessionCheckpoints.json"] # Restore the session. We have to provide a URL, otherwise Talos # asks for a manifest URL. - url = 'about:home' - preferences = {'browser.startup.page': 3} - unit = 'ms' + url = "about:home" + preferences = {"browser.startup.page": 3} + unit = "ms" @register_test() @@ -260,9 +269,10 @@ class sessionrestore_no_auto_restore(sessionrestore): 2. Launch Firefox. 3. Measure the delta between firstPaint and sessionRestored. """ + preferences = { - 'browser.startup.page': 1, - 'talos.sessionrestore.norestore': True, + "browser.startup.page": 1, + "talos.sessionrestore.norestore": True, } @@ -275,7 +285,8 @@ class sessionrestore_many_windows(sessionrestore): 2. Launch Firefox. 3. Measure the delta between firstPaint and sessionRestored. """ - profile_path = '${talos}/startup_test/sessionrestore/profile-manywindows' + + profile_path = "${talos}/startup_test/sessionrestore/profile-manywindows" # pageloader tests(tp5, etc) @@ -289,35 +300,77 @@ class sessionrestore_many_windows(sessionrestore): class PageloaderTest(Test): """abstract base class for a Talos Pageloader test""" - extensions = ['${talos}/pageloader'] + + extensions = ["${talos}/pageloader"] tpmanifest = None # test manifest tpcycles = 1 # number of time to run each page cycles = None timeout = None - keys = ['tpmanifest', 'tpcycles', 'tppagecycles', 'tprender', 'tpchrome', - 'tpmozafterpaint', 'fnbpaint', 'tphero', 'tploadnocache', 'firstpaint', - 'userready', 'testeventmap', 'base_vs_ref', 'mainthread', 'resolution', - 'cycles', 'gecko_profile', 'gecko_profile_interval', 'gecko_profile_entries', - 'tptimeout', 'win_counters', 'w7_counters', 'linux_counters', 'mac_counters', - 'tpscrolltest', 'xperf_counters', 'timeout', 'responsiveness', - 'profile_path', 'xperf_providers', 'xperf_user_providers', 'xperf_stackwalk', - 'format_pagename', 'filters', 'preferences', 'extensions', 'setup', 'cleanup', - 'lower_is_better', 'alert_threshold', 'unit', 'webextensions', 'profile', - 'suite_should_alert', 'subtest_alerts', 'perfherder_framework', 'pdfpaint', - 'webextensions_folder', 'a11y'] + keys = [ + "tpmanifest", + "tpcycles", + "tppagecycles", + "tprender", + "tpchrome", + "tpmozafterpaint", + "fnbpaint", + "tphero", + "tploadnocache", + "firstpaint", + "userready", + "testeventmap", + "base_vs_ref", + "mainthread", + "resolution", + "cycles", + "gecko_profile", + "gecko_profile_interval", + "gecko_profile_entries", + "tptimeout", + "win_counters", + "w7_counters", + "linux_counters", + "mac_counters", + "tpscrolltest", + "xperf_counters", + "timeout", + "responsiveness", + "profile_path", + "xperf_providers", + "xperf_user_providers", + "xperf_stackwalk", + "format_pagename", + "filters", + "preferences", + "extensions", + "setup", + "cleanup", + "lower_is_better", + "alert_threshold", + "unit", + "webextensions", + "profile", + "suite_should_alert", + "subtest_alerts", + "perfherder_framework", + "pdfpaint", + "webextensions_folder", + "a11y", + ] class QuantumPageloadTest(PageloaderTest): """ Base class for a Quantum Pageload test """ + tpcycles = 1 tppagecycles = 25 gecko_profile_interval = 1 gecko_profile_entries = 2000000 filters = filter.ignore_first.prepare(5) + filter.median.prepare() - unit = 'ms' + unit = "ms" lower_is_better = True fnbpaint = True @@ -330,18 +383,17 @@ class twinopen(PageloaderTest): time. Multiple test windows are opened in succession. (Measures ctrl-n performance.) """ - extensions = ['${talos}/pageloader', '${talos}/tests/twinopen'] - tpmanifest = '${talos}/tests/twinopen/twinopen.manifest' + + extensions = ["${talos}/pageloader", "${talos}/tests/twinopen"] + tpmanifest = "${talos}/tests/twinopen/twinopen.manifest" tppagecycles = 20 timeout = 300 gecko_profile_interval = 1 gecko_profile_entries = 2000000 tpmozafterpaint = True filters = filter.ignore_first.prepare(5) + filter.median.prepare() - unit = 'ms' - preferences = { - 'browser.startup.homepage': 'about:blank' - } + unit = "ms" + preferences = {"browser.startup.homepage": "about:blank"} @register_test() @@ -350,14 +402,13 @@ class pdfpaint(PageloaderTest): Tests the amount of time it takes for the the first page of a PDF to be rendered. """ - tpmanifest = '${talos}/tests/pdfpaint/pdfpaint.manifest' + + tpmanifest = "${talos}/tests/pdfpaint/pdfpaint.manifest" tppagecycles = 20 gecko_profile_entries = 1000000 pdfpaint = True - unit = 'ms' - preferences = { - 'pdfjs.eventBusDispatchToDOM': True - } + unit = "ms" + preferences = {"pdfjs.eventBusDispatchToDOM": True} @register_test() @@ -367,20 +418,21 @@ class cpstartup(PageloaderTest): initialize it to the point where it can start processing incoming URLs to load. """ - extensions = ['${talos}/pageloader', '${talos}/tests/cpstartup/extension'] - tpmanifest = '${talos}/tests/cpstartup/cpstartup.manifest' + + extensions = ["${talos}/pageloader", "${talos}/tests/cpstartup/extension"] + tpmanifest = "${talos}/tests/cpstartup/cpstartup.manifest" tppagecycles = 20 gecko_profile_entries = 1000000 tploadnocache = True - unit = 'ms' + unit = "ms" preferences = { # By default, Talos is configured to open links from # content in new windows. We're overriding them so that # they open in new tabs instead. # See http://kb.mozillazine.org/Browser.link.open_newwindow # and http://kb.mozillazine.org/Browser.link.open_newwindow.restriction - 'browser.link.open_newwindow': 3, - 'browser.link.open_newwindow.restriction': 2, + "browser.link.open_newwindow": 3, + "browser.link.open_newwindow.restriction": 2, } @@ -390,21 +442,22 @@ class tabpaint(PageloaderTest): Tests the amount of time it takes to open new tabs, triggered from both the parent process and the content process. """ - extensions = ['${talos}/tests/tabpaint', '${talos}/pageloader'] - tpmanifest = '${talos}/tests/tabpaint/tabpaint.manifest' + + extensions = ["${talos}/tests/tabpaint", "${talos}/pageloader"] + tpmanifest = "${talos}/tests/tabpaint/tabpaint.manifest" tppagecycles = 20 gecko_profile_entries = 1000000 tploadnocache = True - unit = 'ms' + unit = "ms" preferences = { # By default, Talos is configured to open links from # content in new windows. We're overriding them so that # they open in new tabs instead. # See http://kb.mozillazine.org/Browser.link.open_newwindow # and http://kb.mozillazine.org/Browser.link.open_newwindow.restriction - 'browser.link.open_newwindow': 3, - 'browser.link.open_newwindow.restriction': 2, - 'browser.newtab.preload': False, + "browser.link.open_newwindow": 3, + "browser.link.open_newwindow.restriction": 2, + "browser.newtab.preload": False, } @@ -413,19 +466,18 @@ class tabswitch(PageloaderTest): """ Tests the amount of time it takes to switch between tabs """ - extensions = ['${talos}/tests/tabswitch', '${talos}/pageloader'] - tpmanifest = '${talos}/tests/tabswitch/tabswitch.manifest' + + extensions = ["${talos}/tests/tabswitch", "${talos}/pageloader"] + tpmanifest = "${talos}/tests/tabswitch/tabswitch.manifest" tppagecycles = 5 gecko_profile_entries = 5000000 tploadnocache = True preferences = { - 'addon.test.tabswitch.urlfile': os.path.join('${talos}', - 'tests', - 'tp5o.html'), - 'addon.test.tabswitch.webserver': '${webserver}', - 'addon.test.tabswitch.maxurls': -1, + "addon.test.tabswitch.urlfile": os.path.join("${talos}", "tests", "tp5o.html"), + "addon.test.tabswitch.webserver": "${webserver}", + "addon.test.tabswitch.maxurls": -1, } - unit = 'ms' + unit = "ms" @register_test() @@ -451,8 +503,9 @@ class tart(PageloaderTest): - half: average interval over the 2nd half of the animation. - all: average interval over all recorded intervals. """ - tpmanifest = '${talos}/tests/tart/tart.manifest' - extensions = ['${talos}/pageloader', '${talos}/tests/tart/addon'] + + tpmanifest = "${talos}/tests/tart/tart.manifest" + extensions = ["${talos}/pageloader", "${talos}/tests/tart/addon"] tpcycles = 1 tppagecycles = 25 tploadnocache = True @@ -467,16 +520,18 @@ class tart(PageloaderTest): OGL HW composition to disable OMTC with older firefox builds, also set 'layers.offmainthreadcomposition.enabled': False """ - preferences = {'layout.frame_rate': 0, - 'docshell.event_starvation_delay_hint': 1, - 'dom.send_after_paint_to_content': False} + preferences = { + "layout.frame_rate": 0, + "docshell.event_starvation_delay_hint": 1, + "dom.send_after_paint_to_content": False, + } filters = filter.ignore_first.prepare(1) + filter.median.prepare() - unit = 'ms' + unit = "ms" @register_test() class tart_flex(tart): - preferences = {'layout.css.emulate-moz-box-with-flex': True} + preferences = {"layout.css.emulate-moz-box-with-flex": True} @register_test() @@ -486,8 +541,9 @@ class damp(PageloaderTest): Tests the speed of DevTools toolbox open, close, and page reload for each tool, across a very simple and very complicated page. """ - tpmanifest = '${talos}/tests/devtools/damp.manifest' - extensions = ['${talos}/pageloader', '${talos}/tests/devtools/addon'] + + tpmanifest = "${talos}/tests/devtools/damp.manifest" + extensions = ["${talos}/pageloader", "${talos}/tests/devtools/addon"] cycles = 5 tpcycles = 1 tppagecycles = 5 @@ -497,10 +553,10 @@ class damp(PageloaderTest): gecko_profile_entries = 10000000 win_counters = w7_counters = linux_counters = mac_counters = None filters = filter.ignore_first.prepare(1) + filter.median.prepare() - preferences = {'devtools.memory.enabled': True} - unit = 'ms' + preferences = {"devtools.memory.enabled": True} + unit = "ms" subtest_alerts = True - perfherder_framework = 'devtools' + perfherder_framework = "devtools" @register_test() @@ -513,7 +569,8 @@ class glterrain(PageloaderTest): antialias as canvas properties. Each of these 4 runs is reported as a different test name. """ - tpmanifest = '${talos}/tests/webgl/glterrain.manifest' + + tpmanifest = "${talos}/tests/webgl/glterrain.manifest" tpcycles = 1 tppagecycles = 25 tploadnocache = True @@ -523,11 +580,13 @@ class glterrain(PageloaderTest): gecko_profile_entries = 2000000 win_counters = w7_counters = linux_counters = mac_counters = None """ ASAP mode """ - preferences = {'layout.frame_rate': 0, - 'docshell.event_starvation_delay_hint': 1, - 'dom.send_after_paint_to_content': False} + preferences = { + "layout.frame_rate": 0, + "docshell.event_starvation_delay_hint": 1, + "dom.send_after_paint_to_content": False, + } filters = filter.ignore_first.prepare(1) + filter.median.prepare() - unit = 'frame interval' + unit = "frame interval" @register_test() @@ -537,7 +596,8 @@ class glvideo(PageloaderTest): Measures mean tick time across 100 ticks. (each tick is texImage2D(<video>)+setTimeout(0)) """ - tpmanifest = '${talos}/tests/webgl/glvideo.manifest' + + tpmanifest = "${talos}/tests/webgl/glvideo.manifest" tpcycles = 1 tppagecycles = 5 tploadnocache = True @@ -547,7 +607,7 @@ class glvideo(PageloaderTest): gecko_profile_entries = 2000000 win_counters = w7_counters = linux_counters = mac_counters = None filters = filter.ignore_first.prepare(1) + filter.median.prepare() - unit = 'ms' + unit = "ms" @register_test() @@ -560,8 +620,9 @@ class tp5n(PageloaderTest): pages/home pages but to be pages that better reflect the actual content of the site in question. """ + resolution = 20 - tpmanifest = '${talos}/tests/tp5n/tp5n.manifest' + tpmanifest = "${talos}/tests/tp5n/tp5n.manifest" tpcycles = 1 tppagecycles = 1 cycles = 1 @@ -572,27 +633,39 @@ class tp5n(PageloaderTest): win_counters = [] linux_counters = [] mac_counters = [] - xperf_counters = ['main_startup_fileio', 'main_startup_netio', - 'main_normal_fileio', 'main_normal_netio', - 'nonmain_startup_fileio', 'nonmain_normal_fileio', - 'nonmain_normal_netio', 'mainthread_readcount', - 'mainthread_readbytes', 'mainthread_writecount', - 'mainthread_writebytes', - 'time_to_session_store_window_restored_ms', - ] - xperf_providers = ['PROC_THREAD', 'LOADER', 'HARD_FAULTS', 'FILENAME', - 'FILE_IO', 'FILE_IO_INIT'] - xperf_user_providers = ['Mozilla Generic Provider', - 'Microsoft-Windows-TCPIP'] - xperf_stackwalk = ['FileCreate', 'FileRead', 'FileWrite', 'FileFlush', - 'FileClose'] + xperf_counters = [ + "main_startup_fileio", + "main_startup_netio", + "main_normal_fileio", + "main_normal_netio", + "nonmain_startup_fileio", + "nonmain_normal_fileio", + "nonmain_normal_netio", + "mainthread_readcount", + "mainthread_readbytes", + "mainthread_writecount", + "mainthread_writebytes", + "time_to_session_store_window_restored_ms", + ] + xperf_providers = [ + "PROC_THREAD", + "LOADER", + "HARD_FAULTS", + "FILENAME", + "FILE_IO", + "FILE_IO_INIT", + ] + xperf_user_providers = ["Mozilla Generic Provider", "Microsoft-Windows-TCPIP"] + xperf_stackwalk = ["FileCreate", "FileRead", "FileWrite", "FileFlush", "FileClose"] filters = filter.ignore_first.prepare(1) + filter.median.prepare() timeout = 1800 - setup = '${talos}/xtalos/start_xperf.py -c ${talos}/bcontroller.json' - cleanup = '${talos}/xtalos/parse_xperf.py -c ${talos}/bcontroller.json' - preferences = {'extensions.enabledScopes': '', - 'talos.logfile': 'browser_output.txt'} - unit = 'ms' + setup = "${talos}/xtalos/start_xperf.py -c ${talos}/bcontroller.json" + cleanup = "${talos}/xtalos/parse_xperf.py -c ${talos}/bcontroller.json" + preferences = { + "extensions.enabledScopes": "", + "talos.logfile": "browser_output.txt", + } + unit = "ms" @register_test() @@ -600,29 +673,30 @@ class tp5o(PageloaderTest): """ Derived from the tp5n pageset, this is the 49 most reliable webpages. """ + tpcycles = 1 tppagecycles = 25 cycles = 1 tpmozafterpaint = True tptimeout = 5000 mainthread = False - tpmanifest = '${talos}/tests/tp5n/tp5o.manifest' - win_counters = ['% Processor Time'] - w7_counters = ['% Processor Time'] - linux_counters = ['XRes'] + tpmanifest = "${talos}/tests/tp5n/tp5o.manifest" + win_counters = ["% Processor Time"] + w7_counters = ["% Processor Time"] + linux_counters = ["XRes"] mac_counters = [] responsiveness = True gecko_profile_interval = 2 gecko_profile_entries = 4000000 filters = filter.ignore_first.prepare(5) + filter.median.prepare() timeout = 1800 - unit = 'ms' + unit = "ms" @register_test() class tp5o_webext(tp5o): - webextensions = '${talos}/webextensions/dummy/dummy.xpi' - preferences = {'xpinstall.signatures.required': False} + webextensions = "${talos}/webextensions/dummy/dummy.xpi" + preferences = {"xpinstall.signatures.required": False} @register_test() @@ -630,7 +704,8 @@ class tp5o_scroll(PageloaderTest): """ Tests scroll (like tscrollx does, including ASAP) but on the tp5o pageset. """ - tpmanifest = '${talos}/tests/tp5n/tp5o.manifest' + + tpmanifest = "${talos}/tests/tp5n/tp5o.manifest" tpcycles = 1 tppagecycles = 12 gecko_profile_interval = 2 @@ -638,14 +713,16 @@ class tp5o_scroll(PageloaderTest): tpscrolltest = True """ASAP mode""" tpmozafterpaint = False - preferences = {'layout.frame_rate': 0, - 'docshell.event_starvation_delay_hint': 1, - 'dom.send_after_paint_to_content': True, - 'apz.paint_skipping.enabled': False, - 'layout.css.scroll-behavior.spring-constant': "'10'", - 'toolkit.framesRecording.bufferSize': 10000} + preferences = { + "layout.frame_rate": 0, + "docshell.event_starvation_delay_hint": 1, + "dom.send_after_paint_to_content": True, + "apz.paint_skipping.enabled": False, + "layout.css.scroll-behavior.spring-constant": "'10'", + "toolkit.framesRecording.bufferSize": 10000, + } filters = filter.ignore_first.prepare(1) + filter.median.prepare() - unit = '1/FPS' + unit = "1/FPS" @register_test() @@ -657,15 +734,16 @@ class v8_7(PageloaderTest): The previous version of this test is V8 version 5 which was run on selective branches and operating systems. """ - tpmanifest = '${talos}/tests/v8_7/v8.manifest' + + tpmanifest = "${talos}/tests/v8_7/v8.manifest" gecko_profile_interval = 1 gecko_profile_entries = 1000000 tpcycles = 1 resolution = 20 tpmozafterpaint = False - preferences = {'dom.send_after_paint_to_content': False} + preferences = {"dom.send_after_paint_to_content": False} filters = filter.v8_subtest.prepare() - unit = 'score' + unit = "score" lower_is_better = False @@ -675,16 +753,17 @@ class kraken(PageloaderTest): This is the Kraken javascript benchmark taken verbatim and slightly modified to fit into our pageloader extension and talos harness. """ - tpmanifest = '${talos}/tests/kraken/kraken.manifest' + + tpmanifest = "${talos}/tests/kraken/kraken.manifest" tpcycles = 1 tppagecycles = 1 gecko_profile_interval = 1 gecko_profile_entries = 5000000 tpmozafterpaint = False tpchrome = False - preferences = {'dom.send_after_paint_to_content': False} + preferences = {"dom.send_after_paint_to_content": False} filters = filter.mean.prepare() - unit = 'score' + unit = "score" @register_test() @@ -692,28 +771,32 @@ class basic_compositor_video(PageloaderTest): """ Video test """ - tpmanifest = '${talos}/tests/video/video.manifest' + + tpmanifest = "${talos}/tests/video/video.manifest" tpcycles = 1 tppagecycles = 12 tpchrome = False timeout = 10000 gecko_profile_interval = 1 gecko_profile_entries = 2000000 - preferences = {'full-screen-api.allow-trusted-requests-only': False, - 'layers.acceleration.force-enabled': False, - 'layers.acceleration.disabled': True, - 'gfx.webrender.software': True, - 'layout.frame_rate': 0, - 'docshell.event_starvation_delay_hint': 1, - 'full-screen-api.warning.timeout': 500, - 'media.ruin-av-sync.enabled': True} + preferences = { + "full-screen-api.allow-trusted-requests-only": False, + "layers.acceleration.force-enabled": False, + "layers.acceleration.disabled": True, + "gfx.webrender.software": True, + "layout.frame_rate": 0, + "docshell.event_starvation_delay_hint": 1, + "full-screen-api.warning.timeout": 500, + "media.ruin-av-sync.enabled": True, + } filters = filter.ignore_first.prepare(1) + filter.median.prepare() - unit = 'ms/frame' + unit = "ms/frame" lower_is_better = True class dromaeo(PageloaderTest): """abstract base class for dramaeo tests""" + filters = filter.dromaeo.prepare() lower_is_better = False alert_threshold = 5.0 @@ -729,10 +812,11 @@ class dromaeo_css(dromaeo): Each page in the manifest is part of the dromaemo css benchmark. """ + gecko_profile_interval = 2 gecko_profile_entries = 10000000 - tpmanifest = '${talos}/tests/dromaeo/css.manifest' - unit = 'score' + tpmanifest = "${talos}/tests/dromaeo/css.manifest" + unit = "score" @register_test() @@ -744,10 +828,11 @@ class dromaeo_dom(dromaeo): Each page in the manifest is part of the dromaemo dom benchmark. """ + gecko_profile_interval = 2 gecko_profile_entries = 10000000 - tpmanifest = '${talos}/tests/dromaeo/dom.manifest' - unit = 'score' + tpmanifest = "${talos}/tests/dromaeo/dom.manifest" + unit = "score" @register_test() @@ -755,15 +840,16 @@ class tresize(PageloaderTest): """ This test does some resize thing. """ - tpmanifest = '${talos}/tests/tresize/tresize.manifest' - extensions = ['${talos}/pageloader', '${talos}/tests/tresize/addon'] + + tpmanifest = "${talos}/tests/tresize/tresize.manifest" + extensions = ["${talos}/pageloader", "${talos}/tests/tresize/addon"] tppagecycles = 20 timeout = 900 gecko_profile_interval = 2 gecko_profile_entries = 1000000 tpmozafterpaint = True filters = filter.ignore_first.prepare(5) + filter.median.prepare() - unit = 'ms' + unit = "ms" @register_test() @@ -772,7 +858,8 @@ class tsvgm(PageloaderTest): An svg-only number that measures SVG rendering performance for dynamic content only. """ - tpmanifest = '${talos}/tests/svgx/svgm.manifest' + + tpmanifest = "${talos}/tests/svgx/svgm.manifest" tpcycles = 1 tppagecycles = 7 tpmozafterpaint = False @@ -780,11 +867,13 @@ class tsvgm(PageloaderTest): gecko_profile_interval = 10 gecko_profile_entries = 1000000 """ASAP mode""" - preferences = {'layout.frame_rate': 0, - 'docshell.event_starvation_delay_hint': 1, - 'dom.send_after_paint_to_content': False} + preferences = { + "layout.frame_rate": 0, + "docshell.event_starvation_delay_hint": 1, + "dom.send_after_paint_to_content": False, + } filters = filter.ignore_first.prepare(2) + filter.median.prepare() - unit = 'ms' + unit = "ms" @register_test() @@ -793,7 +882,8 @@ class tsvgx(PageloaderTest): An svg-only number that measures SVG rendering performance for dynamic content only. """ - tpmanifest = '${talos}/tests/svgx/svgx.manifest' + + tpmanifest = "${talos}/tests/svgx/svgx.manifest" tpcycles = 1 tppagecycles = 25 tpmozafterpaint = False @@ -801,11 +891,13 @@ class tsvgx(PageloaderTest): gecko_profile_interval = 10 gecko_profile_entries = 1000000 """ASAP mode""" - preferences = {'layout.frame_rate': 0, - 'docshell.event_starvation_delay_hint': 1, - 'dom.send_after_paint_to_content': False} + preferences = { + "layout.frame_rate": 0, + "docshell.event_starvation_delay_hint": 1, + "dom.send_after_paint_to_content": False, + } filters = filter.ignore_first.prepare(5) + filter.median.prepare() - unit = 'ms' + unit = "ms" @register_test() @@ -814,7 +906,8 @@ class tsvg_static(PageloaderTest): An svg-only number that measures SVG rendering performance for static content only. """ - tpmanifest = '${talos}/tests/svg_static/svg_static.manifest' + + tpmanifest = "${talos}/tests/svg_static/svg_static.manifest" tpcycles = 1 tppagecycles = 25 tpmozafterpaint = True @@ -822,7 +915,7 @@ class tsvg_static(PageloaderTest): gecko_profile_interval = 1 gecko_profile_entries = 10000000 filters = filter.ignore_first.prepare(5) + filter.median.prepare() - unit = 'ms' + unit = "ms" @register_test() @@ -830,7 +923,8 @@ class tsvgr_opacity(PageloaderTest): """ An svg-only number that measures SVG rendering performance. """ - tpmanifest = '${talos}/tests/svg_opacity/svg_opacity.manifest' + + tpmanifest = "${talos}/tests/svg_opacity/svg_opacity.manifest" tpcycles = 1 tppagecycles = 25 tpmozafterpaint = True @@ -838,7 +932,7 @@ class tsvgr_opacity(PageloaderTest): gecko_profile_interval = 1 gecko_profile_entries = 10000000 filters = filter.ignore_first.prepare(5) + filter.median.prepare() - unit = 'ms' + unit = "ms" @register_test() @@ -846,7 +940,8 @@ class tscrollx(PageloaderTest): """ This test does some scrolly thing. """ - tpmanifest = '${talos}/tests/scroll/scroll.manifest' + + tpmanifest = "${talos}/tests/scroll/scroll.manifest" tpcycles = 1 tppagecycles = 25 tpmozafterpaint = False @@ -854,14 +949,16 @@ class tscrollx(PageloaderTest): gecko_profile_interval = 1 gecko_profile_entries = 1000000 """ ASAP mode """ - preferences = {'layout.frame_rate': 0, - 'docshell.event_starvation_delay_hint': 1, - 'dom.send_after_paint_to_content': True, - 'apz.paint_skipping.enabled': False, - 'layout.css.scroll-behavior.spring-constant': "'10'", - 'toolkit.framesRecording.bufferSize': 10000} + preferences = { + "layout.frame_rate": 0, + "docshell.event_starvation_delay_hint": 1, + "dom.send_after_paint_to_content": True, + "apz.paint_skipping.enabled": False, + "layout.css.scroll-behavior.spring-constant": "'10'", + "toolkit.framesRecording.bufferSize": 10000, + } filters = filter.ignore_first.prepare(5) + filter.median.prepare() - unit = 'ms' + unit = "ms" @register_test() @@ -870,13 +967,14 @@ class a11yr(PageloaderTest): This test ensures basic a11y tables and permutations do not cause performance regressions. """ - tpmanifest = '${talos}/tests/a11y/a11y.manifest' + + tpmanifest = "${talos}/tests/a11y/a11y.manifest" tpcycles = 1 tppagecycles = 25 tpmozafterpaint = True tpchrome = False - preferences = {'dom.send_after_paint_to_content': False} - unit = 'ms' + preferences = {"dom.send_after_paint_to_content": False} + unit = "ms" alert_threshold = 5.0 a11y = True @@ -888,32 +986,32 @@ class WebkitBenchmark(PageloaderTest): tpchrome = False format_pagename = False lower_is_better = False - unit = 'score' + unit = "score" @register_test() class stylebench(WebkitBenchmark): # StyleBench benchmark used by many browser vendors (from webkit) - tpmanifest = '${talos}/tests/stylebench/stylebench.manifest' + tpmanifest = "${talos}/tests/stylebench/stylebench.manifest" @register_test() class motionmark_animometer(WebkitBenchmark): # MotionMark benchmark used by many browser vendors (from webkit) - tpmanifest = '${talos}/tests/motionmark/animometer.manifest' + tpmanifest = "${talos}/tests/motionmark/animometer.manifest" @register_test() class motionmark_webgl(WebkitBenchmark): # MotionMark benchmark used by many browser vendors (from webkit) - tpmanifest = '${talos}/tests/motionmark/webgl.manifest' - unit = 'fps' + tpmanifest = "${talos}/tests/motionmark/webgl.manifest" + unit = "fps" @register_test() class ARES6(WebkitBenchmark): # ARES-6 benchmark used by many browser vendors (from webkit) - tpmanifest = '${talos}/tests/ares6/ares6.manifest' + tpmanifest = "${talos}/tests/ares6/ares6.manifest" tppagecycles = 1 lower_is_better = True @@ -921,13 +1019,13 @@ class ARES6(WebkitBenchmark): @register_test() class motionmark_htmlsuite(WebkitBenchmark): # MotionMark benchmark used by many browser vendors (from webkit) - tpmanifest = '${talos}/tests/motionmark/htmlsuite.manifest' + tpmanifest = "${talos}/tests/motionmark/htmlsuite.manifest" @register_test() class JetStream(WebkitBenchmark): # JetStream benchmark used by many browser vendors (from webkit) - tpmanifest = '${talos}/tests/jetstream/jetstream.manifest' + tpmanifest = "${talos}/tests/jetstream/jetstream.manifest" tppagecycles = 1 @@ -936,15 +1034,18 @@ class perf_reftest(PageloaderTest): """ Style perf-reftest a set of tests where the result is the difference of base vs ref pages """ - base_vs_ref = True # compare the two test pages with eachother and report comparison - tpmanifest = '${talos}/tests/perf-reftest/perf_reftest.manifest' + + base_vs_ref = ( + True # compare the two test pages with eachother and report comparison + ) + tpmanifest = "${talos}/tests/perf-reftest/perf_reftest.manifest" tpcycles = 1 tppagecycles = 10 tptimeout = 30000 gecko_profile_interval = 1 gecko_profile_entries = 2000000 filters = filter.ignore_first.prepare(5) + filter.median.prepare() - unit = 'ms' + unit = "ms" lower_is_better = True alert_threshold = 5.0 subtest_alerts = True @@ -955,14 +1056,17 @@ class perf_reftest_singletons(PageloaderTest): """ Style perf-reftests run as individual tests """ - tpmanifest = '${talos}/tests/perf-reftest-singletons/perf_reftest_singletons.manifest' + + tpmanifest = ( + "${talos}/tests/perf-reftest-singletons/perf_reftest_singletons.manifest" + ) tpcycles = 1 tppagecycles = 15 tptimeout = 30000 gecko_profile_interval = 1 gecko_profile_entries = 2000000 filters = filter.ignore_first.prepare(5) + filter.median.prepare() - unit = 'ms' + unit = "ms" lower_is_better = True alert_threshold = 5.0 subtest_alerts = True @@ -975,7 +1079,8 @@ class displaylist_mutate(PageloaderTest): Test modifying single items in a large display list. Measure transaction speed to the compositor. """ - tpmanifest = '${talos}/tests/layout/displaylist_mutate.manifest' + + tpmanifest = "${talos}/tests/layout/displaylist_mutate.manifest" tpcycles = 1 tppagecycles = 5 tploadnocache = True @@ -986,10 +1091,12 @@ class displaylist_mutate(PageloaderTest): win_counters = w7_counters = linux_counters = mac_counters = None filters = filter.ignore_first.prepare(1) + filter.median.prepare() """ASAP mode""" - preferences = {'layout.frame_rate': 0, - 'docshell.event_starvation_delay_hint': 1, - 'dom.send_after_paint_to_content': False} - unit = 'ms' + preferences = { + "layout.frame_rate": 0, + "docshell.event_starvation_delay_hint": 1, + "dom.send_after_paint_to_content": False, + } + unit = "ms" @register_test() @@ -998,7 +1105,8 @@ class rasterflood_svg(PageloaderTest): Test modifying single items in a large display list. Measure transaction speed to the compositor. """ - tpmanifest = '${talos}/tests/gfx/rasterflood_svg.manifest' + + tpmanifest = "${talos}/tests/gfx/rasterflood_svg.manifest" tpcycles = 1 tppagecycles = 10 tploadnocache = True @@ -1009,10 +1117,12 @@ class rasterflood_svg(PageloaderTest): win_counters = w7_counters = linux_counters = mac_counters = None filters = filter.ignore_first.prepare(1) + filter.median.prepare() """ASAP mode""" - preferences = {'layout.frame_rate': 0, - 'docshell.event_starvation_delay_hint': 1, - 'dom.send_after_paint_to_content': False} - unit = 'ms' + preferences = { + "layout.frame_rate": 0, + "docshell.event_starvation_delay_hint": 1, + "dom.send_after_paint_to_content": False, + } + unit = "ms" @register_test() @@ -1020,7 +1130,8 @@ class rasterflood_gradient(PageloaderTest): """ Test expensive rasterization while the main thread is busy. """ - tpmanifest = '${talos}/tests/gfx/rasterflood_gradient.manifest' + + tpmanifest = "${talos}/tests/gfx/rasterflood_gradient.manifest" tpcycles = 1 tppagecycles = 10 tploadnocache = True @@ -1031,11 +1142,13 @@ class rasterflood_gradient(PageloaderTest): win_counters = w7_counters = linux_counters = mac_counters = None filters = filter.ignore_first.prepare(1) + filter.median.prepare() """ASAP mode""" - preferences = {'layout.frame_rate': 0, - 'docshell.event_starvation_delay_hint': 1, - 'dom.send_after_paint_to_content': False} + preferences = { + "layout.frame_rate": 0, + "docshell.event_starvation_delay_hint": 1, + "dom.send_after_paint_to_content": False, + } lower_is_better = False - unit = 'score' + unit = "score" @register_test() @@ -1043,7 +1156,8 @@ class about_preferences_basic(PageloaderTest): """ Base class for about_preferences test """ - tpmanifest = '${talos}/tests/about-preferences/about_preferences_basic.manifest' + + tpmanifest = "${talos}/tests/about-preferences/about_preferences_basic.manifest" # this test uses 'about:blank' as a dummy page (see manifest) so that the pages # that just change url categories (i.e. about:preferences#search) will get a load event # also any of the url category pages cannot have more than one tppagecycle @@ -1052,7 +1166,7 @@ class about_preferences_basic(PageloaderTest): gecko_profile_interval = 1 gecko_profile_entries = 2000000 filters = filter.ignore_first.prepare(5) + filter.median.prepare() - unit = 'ms' + unit = "ms" lower_is_better = True fnbpaint = True @@ -1062,31 +1176,29 @@ class about_newtab_with_snippets(PageloaderTest): """ Load about ActivityStream (about:home and about:newtab) with snippets enabled """ - tpmanifest = '${talos}/tests/about-newtab/about_newtab.manifest' + + tpmanifest = "${talos}/tests/about-newtab/about_newtab.manifest" tpcycles = 25 tppagecycles = 1 responsiveness = True gecko_profile_interval = 1 gecko_profile_entries = 2000000 filters = filter.ignore_first.prepare(5) + filter.median.prepare() - unit = 'ms' + unit = "ms" lower_is_better = True fnbpaint = True preferences = { - # ensure that snippets are turned on and load the json messages - 'browser.newtabpage.activity-stream.asrouter.providers.snippets':\ - '{"id":"snippets","enabled":true,"type":"json","location":\ + # ensure that snippets are turned on and load the json messages + "browser.newtabpage.activity-stream.asrouter.providers.snippets": '{"id":"snippets","enabled":true,"type":"json","location":\ "http://fakedomain/tests/about-newtab/snippets.json",\ "updateCycleInMs":14400000}', - 'browser.newtabpage.activity-stream.feeds.snippets': True, - 'browser.newtabpage.activity-stream.feeds.system.topstories': True, - 'browser.newtabpage.activity-stream.feeds.section.topstories': True, - 'browser.newtabpage.activity-stream.feeds.section.topstories.options':\ - '{"provider_name":""}', - 'browser.newtabpage.activity-stream.discoverystream.endpoints': 'http://fakedomain', - 'browser.newtabpage.activity-stream.discoverystream.config':\ - '{"api_key_pref":"extensions.pocket.oAuthConsumerKey","collapsible":true,\ + "browser.newtabpage.activity-stream.feeds.snippets": True, + "browser.newtabpage.activity-stream.feeds.system.topstories": True, + "browser.newtabpage.activity-stream.feeds.section.topstories": True, + "browser.newtabpage.activity-stream.feeds.section.topstories.options": '{"provider_name":""}', # NOQA: E501 + "browser.newtabpage.activity-stream.discoverystream.endpoints": "http://fakedomain", + "browser.newtabpage.activity-stream.discoverystream.config": '{"api_key_pref":"extensions.pocket.oAuthConsumerKey","collapsible":true,\ "enabled":true,"show_spocs":false,"hardcoded_layout":false,"personalized":true,\ "layout_endpoint":\ - "http://fakedomain/tests/about-newtab/ds_layout.json"}' - } + "http://fakedomain/tests/about-newtab/ds_layout.json"}', + } diff --git a/testing/talos/talos/unittests/test_xtalos.py b/testing/talos/talos/unittests/test_xtalos.py index 8b97f6e30f0b..d8dee3dd7ca6 100644 --- a/testing/talos/talos/unittests/test_xtalos.py +++ b/testing/talos/talos/unittests/test_xtalos.py @@ -10,47 +10,25 @@ from talos.xtalos.etlparser import NAME_SUBSTITUTIONS def test_NAME_SUBSTITUTIONS(): filepaths_map = { # tp5n files - r'{talos}\talos\tests\tp5n\alibaba.com\i03.i.aliimg.com\images\eng\style\css_images': - r'{talos}\talos\tests\{tp5n_files}', - r'{talos}\talos\tests\tp5n\cnet.com\i.i.com.com\cnwk.1d\i\tron\fd': - r'{talos}\talos\tests\{tp5n_files}', - r'{talos}\talos\tests\tp5n\tp5n.manifest': - r'{talos}\talos\tests\{tp5n_files}', - r'{talos}\talos\tests\tp5n\tp5n.manifest.develop': - r'{talos}\talos\tests\{tp5n_files}', - r'{talos}\talos\tests\tp5n\yelp.com\media1.ct.yelpcdn.com\photo': - r'{talos}\talos\tests\{tp5n_files}', - + r"{talos}\talos\tests\tp5n\alibaba.com\i03.i.aliimg.com\images\eng\style\css_images": r"{talos}\talos\tests\{tp5n_files}", # NOQA: E501 + r"{talos}\talos\tests\tp5n\cnet.com\i.i.com.com\cnwk.1d\i\tron\fd": r"{talos}\talos\tests\{tp5n_files}", # NOQA: E501 + r"{talos}\talos\tests\tp5n\tp5n.manifest": r"{talos}\talos\tests\{tp5n_files}", + r"{talos}\talos\tests\tp5n\tp5n.manifest.develop": r"{talos}\talos\tests\{tp5n_files}", + r"{talos}\talos\tests\tp5n\yelp.com\media1.ct.yelpcdn.com\photo": r"{talos}\talos\tests\{tp5n_files}", # NOQA: E501 # cltbld for Windows 7 32bit - r'c:\users\cltbld.t-w732-ix-015.000\appdata\locallow\mozilla': - r'c:\users\{cltbld}\appdata\locallow\mozilla', - r'c:\users\cltbld.t-w732-ix-035.000\appdata\locallow\mozilla': - r'c:\users\{cltbld}\appdata\locallow\mozilla', - r'c:\users\cltbld.t-w732-ix-058.000\appdata\locallow\mozilla': - r'c:\users\{cltbld}\appdata\locallow\mozilla', - r'c:\users\cltbld.t-w732-ix-112.001\appdata\local\temp': - r'c:\users\{cltbld}\appdata\local\temp', - + r"c:\users\cltbld.t-w732-ix-015.000\appdata\locallow\mozilla": r"c:\users\{cltbld}\appdata\locallow\mozilla", # NOQA: E501 + r"c:\users\cltbld.t-w732-ix-035.000\appdata\locallow\mozilla": r"c:\users\{cltbld}\appdata\locallow\mozilla", # NOQA: E501 + r"c:\users\cltbld.t-w732-ix-058.000\appdata\locallow\mozilla": r"c:\users\{cltbld}\appdata\locallow\mozilla", # NOQA: E501 + r"c:\users\cltbld.t-w732-ix-112.001\appdata\local\temp": r"c:\users\{cltbld}\appdata\local\temp", # NOQA: E501 # nvidia's 3D Vision - r'c:\program files\nvidia corporation\3d vision\npnv3dv.dll': - r'c:\program files\{nvidia_3d_vision}', - r'c:\program files\nvidia corporation\3d vision\npnv3dvstreaming.dll': - r'c:\program files\{nvidia_3d_vision}', - r'c:\program files\nvidia corporation\3d vision\nvstereoapii.dll': - r'c:\program files\{nvidia_3d_vision}', - - r'{firefox}\browser\extensions\{45b6d270-f6ec-4930-a6ad-14bac5ea2204}.xpi': - r'{firefox}\browser\extensions\{uuid}.xpi', - - r'c:\slave\test\build\venv\lib\site-packages\pip\_vendor\html5lib\treebuilders': - r'c:\slave\test\build\venv\lib\site-packages\{pip_vendor}', - r'c:\slave\test\build\venv\lib\site-packages\pip\_vendor\colorama': - r'c:\slave\test\build\venv\lib\site-packages\{pip_vendor}', - r'c:\slave\test\build\venv\lib\site-packages\pip\_vendor\cachecontrol\caches': - r'c:\slave\test\build\venv\lib\site-packages\{pip_vendor}', - r'c:\slave\test\build\venv\lib\site-packages\pip\_vendor\requests\packages\urllib3' - r'\packages\ssl_match_hostname': - r'c:\slave\test\build\venv\lib\site-packages\{pip_vendor}', + r"c:\program files\nvidia corporation\3d vision\npnv3dv.dll": r"c:\program files\{nvidia_3d_vision}", # NOQA: E501 + r"c:\program files\nvidia corporation\3d vision\npnv3dvstreaming.dll": r"c:\program files\{nvidia_3d_vision}", # NOQA: E501 + r"c:\program files\nvidia corporation\3d vision\nvstereoapii.dll": r"c:\program files\{nvidia_3d_vision}", # NOQA: E501 + r"{firefox}\browser\extensions\{45b6d270-f6ec-4930-a6ad-14bac5ea2204}.xpi": r"{firefox}\browser\extensions\{uuid}.xpi", # NOQA: E501 + r"c:\slave\test\build\venv\lib\site-packages\pip\_vendor\html5lib\treebuilders": r"c:\slave\test\build\venv\lib\site-packages\{pip_vendor}", # NOQA: E501 + r"c:\slave\test\build\venv\lib\site-packages\pip\_vendor\colorama": r"c:\slave\test\build\venv\lib\site-packages\{pip_vendor}", # NOQA: E501 + r"c:\slave\test\build\venv\lib\site-packages\pip\_vendor\cachecontrol\caches": r"c:\slave\test\build\venv\lib\site-packages\{pip_vendor}", # NOQA: E501 + r"c:\slave\test\build\venv\lib\site-packages\pip\_vendor\requests\packages\urllib3\packages\ssl_match_hostname": r"c:\slave\test\build\venv\lib\site-packages\{pip_vendor}", # NOQA: E501 } for given_raw_path, exp_normal_path in filepaths_map.items(): @@ -60,5 +38,5 @@ def test_NAME_SUBSTITUTIONS(): assert exp_normal_path == normal_path -if __name__ == '__main__': +if __name__ == "__main__": mozunit.main() diff --git a/testing/web-platform/metamerge.py b/testing/web-platform/metamerge.py index ef952270ec28..5351d1a89f6e 100644 --- a/testing/web-platform/metamerge.py +++ b/testing/web-platform/metamerge.py @@ -89,18 +89,14 @@ def data_cls_getter(output_node, visited_node): def compile(stream, data_cls_getter=None, **kwargs): - return base.compile(Compiler, - stream, - data_cls_getter=data_cls_getter, - **kwargs) + return base.compile(Compiler, stream, data_cls_getter=data_cls_getter, **kwargs) def get_manifest(manifest_path): """Get the ExpectedManifest for a particular manifest path""" try: with open(manifest_path) as f: - return compile(f, - data_cls_getter=data_cls_getter) + return compile(f, data_cls_getter=data_cls_getter) except IOError: return None @@ -125,20 +121,25 @@ class Differences(object): modified = [] for item in self.modified: if isinstance(item, TestModified): - modified.append(" %s\n %s\n%s" % (item[0], item[1], indent(str(item[2]), 4))) + modified.append( + " %s\n %s\n%s" % (item[0], item[1], indent(str(item[2]), 4)) + ) else: assert isinstance(item, ExpectedModified) modified.append(" %s\n %s %s" % item) return "Added:\n%s\nDeleted:\n%s\nModified:\n%s\n" % ( "\n".join(" %s:\n %s" % item for item in self.added), "\n".join(" %s" % item for item in self.deleted), - "\n".join(modified)) + "\n".join(modified), + ) TestModified = namedtuple("TestModified", ["test", "test_manifest", "differences"]) -ExpectedModified = namedtuple("ExpectedModified", ["test", "ancestor_manifest", "new_manifest"]) +ExpectedModified = namedtuple( + "ExpectedModified", ["test", "ancestor_manifest", "new_manifest"] +) def compare_test(test, ancestor_manifest, new_manifest): @@ -147,8 +148,12 @@ def compare_test(test, ancestor_manifest, new_manifest): compare_expected(changes, None, ancestor_manifest, new_manifest) for subtest, ancestor_subtest_manifest in iteritems(ancestor_manifest.child_map): - compare_expected(changes, subtest, ancestor_subtest_manifest, - new_manifest.child_map.get(subtest)) + compare_expected( + changes, + subtest, + ancestor_subtest_manifest, + new_manifest.child_map.get(subtest), + ) for subtest, subtest_manifest in iteritems(new_manifest.child_map): if subtest not in ancestor_manifest.child_map: @@ -158,18 +163,32 @@ def compare_test(test, ancestor_manifest, new_manifest): def compare_expected(changes, subtest, ancestor_manifest, new_manifest): - if (not (ancestor_manifest and ancestor_manifest.has_key("expected")) and # noqa W601 - (new_manifest and new_manifest.has_key("expected"))): # noqa W601 - changes.modified.append(ExpectedModified(subtest, ancestor_manifest, new_manifest)) - elif (ancestor_manifest and ancestor_manifest.has_key("expected") and # noqa W601 - not (new_manifest and new_manifest.has_key("expected"))): # noqa W601 + if not ( + ancestor_manifest and ancestor_manifest.has_key("expected") # noqa W601 + ) and ( + new_manifest and new_manifest.has_key("expected") # noqa W601 + ): + changes.modified.append( + ExpectedModified(subtest, ancestor_manifest, new_manifest) + ) + elif ( + ancestor_manifest + and ancestor_manifest.has_key("expected") # noqa W601 + and not (new_manifest and new_manifest.has_key("expected")) # noqa W601 + ): changes.deleted.append(subtest) - elif (ancestor_manifest and ancestor_manifest.has_key("expected") and # noqa W601 - new_manifest and new_manifest.has_key("expected")): # noqa W601 + elif ( + ancestor_manifest + and ancestor_manifest.has_key("expected") # noqa W601 + and new_manifest + and new_manifest.has_key("expected") # noqa W601 + ): old_expected = ancestor_manifest.get("expected") new_expected = new_manifest.get("expected") if expected_values_changed(old_expected, new_expected): - changes.modified.append(ExpectedModified(subtest, ancestor_manifest, new_manifest)) + changes.modified.append( + ExpectedModified(subtest, ancestor_manifest, new_manifest) + ) def expected_values_changed(old_expected, new_expected): @@ -198,11 +217,11 @@ def record_changes(ancestor_manifest, new_manifest): changes.added.append((test, test_manifest)) else: ancestor_test_manifest = ancestor_manifest.child_map[test] - test_differences = compare_test(test, - ancestor_test_manifest, - test_manifest) + test_differences = compare_test(test, ancestor_test_manifest, test_manifest) if test_differences: - changes.modified.append(TestModified(test, test_manifest, test_differences)) + changes.modified.append( + TestModified(test, test_manifest, test_differences) + ) for test, test_manifest in iteritems(ancestor_manifest.child_map): if test not in new_manifest.child_map: @@ -266,7 +285,9 @@ def run(ancestor, current, new, dest): current_manifest = get_manifest(current) new_manifest = get_manifest(new) - updated_current_str = make_changes(ancestor_manifest, current_manifest, new_manifest) + updated_current_str = make_changes( + ancestor_manifest, current_manifest, new_manifest + ) if dest != "-": with open(dest, "wb") as f: diff --git a/toolkit/components/telemetry/build_scripts/mozparsers/parse_histograms.py b/toolkit/components/telemetry/build_scripts/mozparsers/parse_histograms.py index e3eac1c3d87d..4d7b6f26e090 100644 --- a/toolkit/components/telemetry/build_scripts/mozparsers/parse_histograms.py +++ b/toolkit/components/telemetry/build_scripts/mozparsers/parse_histograms.py @@ -17,6 +17,7 @@ from . import shared_telemetry_utils as utils from ctypes import c_int from .shared_telemetry_utils import ParserError from collections import OrderedDict + atexit.register(ParserError.exit_func) # Constants. @@ -25,32 +26,33 @@ MAX_LABEL_COUNT = 100 MAX_KEY_COUNT = 30 MAX_KEY_LENGTH = 20 MIN_CATEGORICAL_BUCKET_COUNT = 50 -CPP_IDENTIFIER_PATTERN = '^[a-z][a-z0-9_]+[a-z0-9]$' +CPP_IDENTIFIER_PATTERN = "^[a-z][a-z0-9_]+[a-z0-9]$" ALWAYS_ALLOWED_KEYS = [ - 'kind', - 'description', - 'operating_systems', - 'expires_in_version', - 'alert_emails', - 'keyed', - 'releaseChannelCollection', - 'bug_numbers', - 'keys', - 'record_in_processes', - 'record_into_store', - 'products', + "kind", + "description", + "operating_systems", + "expires_in_version", + "alert_emails", + "keyed", + "releaseChannelCollection", + "bug_numbers", + "keys", + "record_in_processes", + "record_into_store", + "products", ] -BASE_DOC_URL = ("https://firefox-source-docs.mozilla.org/toolkit/components/" - "telemetry/telemetry/") -HISTOGRAMS_DOC_URL = (BASE_DOC_URL + "collection/histograms.html") -SCALARS_DOC_URL = (BASE_DOC_URL + "collection/scalars.html") +BASE_DOC_URL = ( + "https://firefox-source-docs.mozilla.org/toolkit/components/" "telemetry/telemetry/" +) +HISTOGRAMS_DOC_URL = BASE_DOC_URL + "collection/histograms.html" +SCALARS_DOC_URL = BASE_DOC_URL + "collection/scalars.html" GECKOVIEW_STREAMING_SUPPORTED_KINDS = [ - 'linear', - 'exponential', - 'categorical', + "linear", + "exponential", + "categorical", ] # parse_histograms.py is used by scripts from a mozilla-central build tree @@ -62,7 +64,7 @@ try: import buildconfig # Need to update sys.path to be able to find usecounters. - sys.path.append(os.path.join(buildconfig.topsrcdir, 'dom/base/')) + sys.path.append(os.path.join(buildconfig.topsrcdir, "dom/base/")) except ImportError: # Must be in an out-of-tree usage scenario. Trust that whoever is # running this script knows we need the usecounters module and has @@ -110,18 +112,22 @@ def load_allowlist(): # the histogram-allowlists file lives in the root of the module. Account # for that when looking for the allowlist. # NOTE: if the parsers are moved, this logic will need to be updated. - telemetry_module_path = os.path.abspath(os.path.join(parsers_path, os.pardir, os.pardir)) - allowlist_path = os.path.join(telemetry_module_path, 'histogram-allowlists.json') - with open(allowlist_path, 'r') as f: + telemetry_module_path = os.path.abspath( + os.path.join(parsers_path, os.pardir, os.pardir) + ) + allowlist_path = os.path.join( + telemetry_module_path, "histogram-allowlists.json" + ) + with open(allowlist_path, "r") as f: try: allowlists = json.load(f) for name, allowlist in allowlists.items(): allowlists[name] = set(allowlist) except ValueError: - ParserError('Error parsing allowlist: %s' % allowlist_path).handle_now() + ParserError("Error parsing allowlist: %s" % allowlist_path).handle_now() except IOError: allowlists = None - ParserError('Unable to parse allowlist: %s.' % allowlist_path).handle_now() + ParserError("Unable to parse allowlist: %s." % allowlist_path).handle_now() class Histogram: @@ -129,32 +135,33 @@ class Histogram: def __init__(self, name, definition, strict_type_checks=False): """Initialize a histogram named name with the given definition. -definition is a dict-like object that must contain at least the keys: + definition is a dict-like object that must contain at least the keys: - - 'kind': The kind of histogram. Must be one of 'boolean', 'flag', - 'count', 'enumerated', 'linear', or 'exponential'. - - 'description': A textual description of the histogram. - - 'strict_type_checks': A boolean indicating whether to use the new, stricter type checks. - The server-side still has to deal with old, oddly typed submissions, - so we have to skip them there by default.""" + - 'kind': The kind of histogram. Must be one of 'boolean', 'flag', + 'count', 'enumerated', 'linear', or 'exponential'. + - 'description': A textual description of the histogram. + - 'strict_type_checks': A boolean indicating whether to use the new, stricter type checks. + The server-side still has to deal with old, oddly typed + submissions, so we have to skip them there by default. + """ self._strict_type_checks = strict_type_checks self._is_use_counter = name.startswith("USE_COUNTER2_") if self._is_use_counter: - definition.setdefault('record_in_processes', ['main', 'content']) - definition.setdefault('releaseChannelCollection', 'opt-out') - definition.setdefault('products', ['firefox', 'fennec']) + definition.setdefault("record_in_processes", ["main", "content"]) + definition.setdefault("releaseChannelCollection", "opt-out") + definition.setdefault("products", ["firefox", "fennec"]) self.verify_attributes(name, definition) self._name = name - self._description = definition['description'] - self._kind = definition['kind'] - self._keys = definition.get('keys', []) - self._keyed = definition.get('keyed', False) - self._expiration = definition.get('expires_in_version') - self._labels = definition.get('labels', []) - self._record_in_processes = definition.get('record_in_processes') - self._record_into_store = definition.get('record_into_store', ['main']) - self._products = definition.get('products') - self._operating_systems = definition.get('operating_systems', ["all"]) + self._description = definition["description"] + self._kind = definition["kind"] + self._keys = definition.get("keys", []) + self._keyed = definition.get("keyed", False) + self._expiration = definition.get("expires_in_version") + self._labels = definition.get("labels", []) + self._record_in_processes = definition.get("record_in_processes") + self._record_into_store = definition.get("record_into_store", ["main"]) + self._products = definition.get("products") + self._operating_systems = definition.get("operating_systems", ["all"]) self.compute_bucket_parameters(definition) self.set_nsITelemetry_kind() @@ -170,8 +177,8 @@ definition is a dict-like object that must contain at least the keys: def kind(self): """Return the kind of the histogram. -Will be one of 'boolean', 'flag', 'count', 'enumerated', 'categorical', 'linear', -or 'exponential'.""" + Will be one of 'boolean', 'flag', 'count', 'enumerated', 'categorical', 'linear', + or 'exponential'.""" return self._kind def expiration(self): @@ -180,7 +187,7 @@ or 'exponential'.""" def nsITelemetry_kind(self): """Return the nsITelemetry constant corresponding to the kind of -the histogram.""" + the histogram.""" return self._nsITelemetry_kind def low(self): @@ -251,61 +258,65 @@ the histogram.""" def ranges(self): """Return an array of lower bounds for each bucket in the histogram.""" bucket_fns = { - 'boolean': linear_buckets, - 'flag': linear_buckets, - 'count': linear_buckets, - 'enumerated': linear_buckets, - 'categorical': linear_buckets, - 'linear': linear_buckets, - 'exponential': exponential_buckets, + "boolean": linear_buckets, + "flag": linear_buckets, + "count": linear_buckets, + "enumerated": linear_buckets, + "categorical": linear_buckets, + "linear": linear_buckets, + "exponential": exponential_buckets, } if self._kind not in bucket_fns: - ParserError('Unknown kind "%s" for histogram "%s".' % - (self._kind, self._name)).handle_later() + ParserError( + 'Unknown kind "%s" for histogram "%s".' % (self._kind, self._name) + ).handle_later() fn = bucket_fns[self._kind] return fn(self.low(), self.high(), self.n_buckets()) def compute_bucket_parameters(self, definition): bucket_fns = { - 'boolean': Histogram.boolean_flag_bucket_parameters, - 'flag': Histogram.boolean_flag_bucket_parameters, - 'count': Histogram.boolean_flag_bucket_parameters, - 'enumerated': Histogram.enumerated_bucket_parameters, - 'categorical': Histogram.categorical_bucket_parameters, - 'linear': Histogram.linear_bucket_parameters, - 'exponential': Histogram.exponential_bucket_parameters, + "boolean": Histogram.boolean_flag_bucket_parameters, + "flag": Histogram.boolean_flag_bucket_parameters, + "count": Histogram.boolean_flag_bucket_parameters, + "enumerated": Histogram.enumerated_bucket_parameters, + "categorical": Histogram.categorical_bucket_parameters, + "linear": Histogram.linear_bucket_parameters, + "exponential": Histogram.exponential_bucket_parameters, } if self._kind not in bucket_fns: - ParserError('Unknown kind "%s" for histogram "%s".' % - (self._kind, self._name)).handle_later() + ParserError( + 'Unknown kind "%s" for histogram "%s".' % (self._kind, self._name) + ).handle_later() fn = bucket_fns[self._kind] self.set_bucket_parameters(*fn(definition)) def verify_attributes(self, name, definition): global ALWAYS_ALLOWED_KEYS - general_keys = ALWAYS_ALLOWED_KEYS + ['low', 'high', 'n_buckets'] + general_keys = ALWAYS_ALLOWED_KEYS + ["low", "high", "n_buckets"] table = { - 'boolean': ALWAYS_ALLOWED_KEYS, - 'flag': ALWAYS_ALLOWED_KEYS, - 'count': ALWAYS_ALLOWED_KEYS, - 'enumerated': ALWAYS_ALLOWED_KEYS + ['n_values'], - 'categorical': ALWAYS_ALLOWED_KEYS + ['labels', 'n_values'], - 'linear': general_keys, - 'exponential': general_keys, + "boolean": ALWAYS_ALLOWED_KEYS, + "flag": ALWAYS_ALLOWED_KEYS, + "count": ALWAYS_ALLOWED_KEYS, + "enumerated": ALWAYS_ALLOWED_KEYS + ["n_values"], + "categorical": ALWAYS_ALLOWED_KEYS + ["labels", "n_values"], + "linear": general_keys, + "exponential": general_keys, } # We removed extended_statistics_ok on the client, but the server-side, # where _strict_type_checks==False, has to deal with historical data. if not self._strict_type_checks: - table['exponential'].append('extended_statistics_ok') + table["exponential"].append("extended_statistics_ok") - kind = definition['kind'] + kind = definition["kind"] if kind not in table: - ParserError('Unknown kind "%s" for histogram "%s".' % (kind, name)).handle_later() + ParserError( + 'Unknown kind "%s" for histogram "%s".' % (kind, name) + ).handle_later() allowed_keys = table[kind] self.check_name(name) @@ -322,25 +333,29 @@ the histogram.""" self.check_record_into_store(name, definition) def check_name(self, name): - if '#' in name: - ParserError('Error for histogram name "%s": "#" is not allowed.' % - (name)).handle_later() + if "#" in name: + ParserError( + 'Error for histogram name "%s": "#" is not allowed.' % (name) + ).handle_later() # Avoid C++ identifier conflicts between histogram enums and label enum names. if name.startswith("LABELS_"): - ParserError('Error for histogram name "%s": can not start with "LABELS_".' % - (name)).handle_later() + ParserError( + 'Error for histogram name "%s": can not start with "LABELS_".' % (name) + ).handle_later() # To make it easier to generate C++ identifiers from this etc., we restrict # the histogram names to a strict pattern. # We skip this on the server to avoid failures with old Histogram.json revisions. if self._strict_type_checks: if not re.match(CPP_IDENTIFIER_PATTERN, name, re.IGNORECASE): - ParserError('Error for histogram name "%s": name does not conform to "%s"' % - (name, CPP_IDENTIFIER_PATTERN)).handle_later() + ParserError( + 'Error for histogram name "%s": name does not conform to "%s"' + % (name, CPP_IDENTIFIER_PATTERN) + ).handle_later() def check_expiration(self, name, definition): - field = 'expires_in_version' + field = "expires_in_version" expiration = definition.get(field) if not expiration: @@ -348,97 +363,125 @@ the histogram.""" # We forbid new probes from using "expires_in_version" : "default" field/value pair. # Old ones that use this are added to the allowlist. - if expiration == "default" and \ - allowlists is not None and \ - name not in allowlists['expiry_default']: - ParserError('New histogram "%s" cannot have "default" %s value.' % - (name, field)).handle_later() + if ( + expiration == "default" + and allowlists is not None + and name not in allowlists["expiry_default"] + ): + ParserError( + 'New histogram "%s" cannot have "default" %s value.' % (name, field) + ).handle_later() # Historical editions of Histograms.json can have the deprecated # expiration format 'N.Na1'. Fortunately, those scripts set # self._strict_type_checks to false. - if expiration != "default" and \ - not utils.validate_expiration_version(expiration) and \ - self._strict_type_checks: - ParserError(('Error for histogram {} - invalid {}: {}.' - '\nSee: {}#expires-in-version') - .format(name, field, expiration, HISTOGRAMS_DOC_URL)).handle_later() + if ( + expiration != "default" + and not utils.validate_expiration_version(expiration) + and self._strict_type_checks + ): + ParserError( + ( + "Error for histogram {} - invalid {}: {}." + "\nSee: {}#expires-in-version" + ).format(name, field, expiration, HISTOGRAMS_DOC_URL) + ).handle_later() expiration = utils.add_expiration_postfix(expiration) definition[field] = expiration def check_label_values(self, name, definition): - labels = definition.get('labels') + labels = definition.get("labels") if not labels: return invalid = filter(lambda l: len(l) > MAX_LABEL_LENGTH, labels) if len(list(invalid)) > 0: - ParserError('Label values for "%s" exceed length limit of %d: %s' % - (name, MAX_LABEL_LENGTH, ', '.join(invalid))).handle_later() + ParserError( + 'Label values for "%s" exceed length limit of %d: %s' + % (name, MAX_LABEL_LENGTH, ", ".join(invalid)) + ).handle_later() if len(labels) > MAX_LABEL_COUNT: - ParserError('Label count for "%s" exceeds limit of %d' % - (name, MAX_LABEL_COUNT)).handle_now() + ParserError( + 'Label count for "%s" exceeds limit of %d' % (name, MAX_LABEL_COUNT) + ).handle_now() # To make it easier to generate C++ identifiers from this etc., we restrict # the label values to a strict pattern. - invalid = filter(lambda l: not re.match(CPP_IDENTIFIER_PATTERN, l, re.IGNORECASE), labels) + invalid = filter( + lambda l: not re.match(CPP_IDENTIFIER_PATTERN, l, re.IGNORECASE), labels + ) if len(list(invalid)) > 0: - ParserError('Label values for %s are not matching pattern "%s": %s' % - (name, CPP_IDENTIFIER_PATTERN, ', '.join(invalid))).handle_later() + ParserError( + 'Label values for %s are not matching pattern "%s": %s' + % (name, CPP_IDENTIFIER_PATTERN, ", ".join(invalid)) + ).handle_later() def check_record_in_processes(self, name, definition): if not self._strict_type_checks: return - field = 'record_in_processes' + field = "record_in_processes" rip = definition.get(field) DOC_URL = HISTOGRAMS_DOC_URL + "#record-in-processes" if not rip: - ParserError('Histogram "%s" must have a "%s" field:\n%s' - % (name, field, DOC_URL)).handle_later() + ParserError( + 'Histogram "%s" must have a "%s" field:\n%s' % (name, field, DOC_URL) + ).handle_later() for process in rip: if not utils.is_valid_process_name(process): - ParserError('Histogram "%s" has unknown process "%s" in %s.\n%s' % - (name, process, field, DOC_URL)).handle_later() + ParserError( + 'Histogram "%s" has unknown process "%s" in %s.\n%s' + % (name, process, field, DOC_URL) + ).handle_later() def check_products(self, name, definition): if not self._strict_type_checks: return - field = 'products' + field = "products" products = definition.get(field) DOC_URL = HISTOGRAMS_DOC_URL + "#products" if not products: - ParserError('Histogram "%s" must have a "%s" field:\n%s' - % (name, field, DOC_URL)).handle_now() + ParserError( + 'Histogram "%s" must have a "%s" field:\n%s' % (name, field, DOC_URL) + ).handle_now() for product in products: if not utils.is_valid_product(product): - ParserError('Histogram "%s" has unknown product "%s" in %s.\n%s' % - (name, product, field, DOC_URL)).handle_later() + ParserError( + 'Histogram "%s" has unknown product "%s" in %s.\n%s' + % (name, product, field, DOC_URL) + ).handle_later() if utils.is_geckoview_streaming_product(product): - kind = definition.get('kind') + kind = definition.get("kind") if kind not in GECKOVIEW_STREAMING_SUPPORTED_KINDS: - ParserError(('Histogram "%s" is of kind "%s" which is unsupported for ' - 'product "%s".') % (name, kind, product)).handle_later() - keyed = definition.get('keyed') + ParserError( + ( + 'Histogram "%s" is of kind "%s" which is unsupported for ' + 'product "%s".' + ) + % (name, kind, product) + ).handle_later() + keyed = definition.get("keyed") if keyed: - ParserError('Keyed histograms like "%s" are unsupported for product "%s"' % - (name, product)).handle_later() + ParserError( + 'Keyed histograms like "%s" are unsupported for product "%s"' + % (name, product) + ).handle_later() def check_operating_systems(self, name, definition): if not self._strict_type_checks: return - field = 'operating_systems' + field = "operating_systems" operating_systems = definition.get(field) DOC_URL = HISTOGRAMS_DOC_URL + "#operating-systems" @@ -449,14 +492,16 @@ the histogram.""" for operating_system in operating_systems: if not utils.is_valid_os(operating_system): - ParserError('Histogram "%s" has unknown operating system "%s" in %s.\n%s' % - (name, operating_system, field, DOC_URL)).handle_later() + ParserError( + 'Histogram "%s" has unknown operating system "%s" in %s.\n%s' + % (name, operating_system, field, DOC_URL) + ).handle_later() def check_record_into_store(self, name, definition): if not self._strict_type_checks: return - field = 'record_into_store' + field = "record_into_store" DOC_URL = HISTOGRAMS_DOC_URL + "#record-into-store" if field not in definition: @@ -466,28 +511,36 @@ the histogram.""" record_into_store = definition.get(field) # record_into_store should not be empty if not record_into_store: - ParserError('Histogram "%s" has empty list of stores, which is not allowed.\n%s' % - (name, DOC_URL)).handle_later() + ParserError( + 'Histogram "%s" has empty list of stores, which is not allowed.\n%s' + % (name, DOC_URL) + ).handle_later() def check_keys_field(self, name, definition): - keys = definition.get('keys') + keys = definition.get("keys") if not self._strict_type_checks or keys is None: return - if not definition.get('keyed', False): - raise ValueError("'keys' field is not valid for %s; only allowed for keyed histograms." - % (name)) + if not definition.get("keyed", False): + raise ValueError( + "'keys' field is not valid for %s; only allowed for keyed histograms." + % (name) + ) if len(keys) == 0: - raise ValueError('The key list for %s cannot be empty' % (name)) + raise ValueError("The key list for %s cannot be empty" % (name)) if len(keys) > MAX_KEY_COUNT: - raise ValueError('Label count for %s exceeds limit of %d' % (name, MAX_KEY_COUNT)) + raise ValueError( + "Label count for %s exceeds limit of %d" % (name, MAX_KEY_COUNT) + ) invalid = filter(lambda k: len(k) > MAX_KEY_LENGTH, keys) if len(list(invalid)) > 0: - raise ValueError('"keys" values for %s are exceeding length "%d": %s' % - (name, MAX_KEY_LENGTH, ', '.join(invalid))) + raise ValueError( + '"keys" values for %s are exceeding length "%d": %s' + % (name, MAX_KEY_LENGTH, ", ".join(invalid)) + ) def check_allowlisted_kind(self, name, definition): # We don't need to run any of these checks on the server. @@ -500,16 +553,22 @@ the histogram.""" hist_kind = definition.get("kind") android_target = "android" in definition.get("operating_systems", []) - if not android_target and \ - hist_kind in ["flag", "count"] and \ - name not in allowlists["kind"]: - ParserError(('Unsupported kind "%s" for histogram "%s":\n' - 'New "%s" histograms are not supported on Desktop, you should' - ' use scalars instead:\n' - '%s\n' - 'Are you trying to add a histogram on Android?' - ' Add "operating_systems": ["android"] to your histogram definition.') - % (hist_kind, name, hist_kind, SCALARS_DOC_URL)).handle_now() + if ( + not android_target + and hist_kind in ["flag", "count"] + and name not in allowlists["kind"] + ): + ParserError( + ( + 'Unsupported kind "%s" for histogram "%s":\n' + 'New "%s" histograms are not supported on Desktop, you should' + " use scalars instead:\n" + "%s\n" + "Are you trying to add a histogram on Android?" + ' Add "operating_systems": ["android"] to your histogram definition.' + ) + % (hist_kind, name, hist_kind, SCALARS_DOC_URL) + ).handle_now() # Check for the presence of fields that old histograms are allowlisted for. def check_allowlistable_fields(self, name, definition): @@ -523,13 +582,16 @@ the histogram.""" if allowlists is None: return - for field in ['alert_emails', 'bug_numbers']: + for field in ["alert_emails", "bug_numbers"]: if field not in definition and name not in allowlists[field]: - ParserError('New histogram "%s" must have a "%s" field.' % - (name, field)).handle_later() + ParserError( + 'New histogram "%s" must have a "%s" field.' % (name, field) + ).handle_later() if field in definition and name in allowlists[field]: - msg = 'Histogram "%s" should be removed from the allowlist for "%s" in ' \ - 'histogram-allowlists.json.' + msg = ( + 'Histogram "%s" should be removed from the allowlist for "%s" in ' + "histogram-allowlists.json." + ) ParserError(msg % (name, field)).handle_later() def check_field_types(self, name, definition): @@ -576,6 +638,7 @@ the histogram.""" return eval(v, {}) except Exception: return v + for key in [k for k in coerce_fields if k in definition]: definition[key] = try_to_coerce_to_number(definition[key]) # This handles old "keyed":"true" definitions (bug 1271986). @@ -591,46 +654,56 @@ the histogram.""" if key not in definition: continue if not isinstance(definition[key], key_type): - ParserError('Value for key "{0}" in histogram "{1}" should be {2}.' - .format(key, name, nice_type_name(key_type))).handle_later() + ParserError( + 'Value for key "{0}" in histogram "{1}" should be {2}.'.format( + key, name, nice_type_name(key_type) + ) + ).handle_later() # Make sure the max range is lower than or equal to INT_MAX if "high" in definition and not c_int(definition["high"]).value > 0: - ParserError('Value for high in histogram "{0}" should be lower or equal to INT_MAX.' - .format(nice_type_name(c_int))).handle_later() + ParserError( + 'Value for high in histogram "{0}" should be lower or equal to INT_MAX.'.format( + nice_type_name(c_int) + ) + ).handle_later() for key, key_type in type_checked_list_fields.items(): if key not in definition: continue if not all(isinstance(x, key_type) for x in definition[key]): - ParserError('All values for list "{0}" in histogram "{1}" should be of type' - ' {2}.'.format(key, name, nice_type_name(key_type))).handle_later() + ParserError( + 'All values for list "{0}" in histogram "{1}" should be of type' + " {2}.".format(key, name, nice_type_name(key_type)) + ).handle_later() def check_keys(self, name, definition, allowed_keys): if not self._strict_type_checks: return for key in iter(definition.keys()): if key not in allowed_keys: - ParserError('Key "%s" is not allowed for histogram "%s".' % - (key, name)).handle_later() + ParserError( + 'Key "%s" is not allowed for histogram "%s".' % (key, name) + ).handle_later() def set_bucket_parameters(self, low, high, n_buckets): self._low = low self._high = high self._n_buckets = n_buckets - max_n_buckets = 101 if self._kind in ['enumerated', 'categorical'] else 100 - if (allowlists is not None + max_n_buckets = 101 if self._kind in ["enumerated", "categorical"] else 100 + if ( + allowlists is not None and self._n_buckets > max_n_buckets - and type(self._n_buckets) is int): - if self._name not in allowlists['n_buckets']: + and type(self._n_buckets) is int + ): + if self._name not in allowlists["n_buckets"]: ParserError( 'New histogram "%s" is not permitted to have more than 100 buckets.\n' - 'Histograms with large numbers of buckets use disproportionately high' - ' amounts of resources. Contact a Telemetry peer (e.g. in #telemetry)' - ' if you think an exception ought to be made:\n' - 'https://wiki.mozilla.org/Modules/Toolkit#Telemetry' - % self._name - ).handle_later() + "Histograms with large numbers of buckets use disproportionately high" + " amounts of resources. Contact a Telemetry peer (e.g. in #telemetry)" + " if you think an exception ought to be made:\n" + "https://wiki.mozilla.org/Modules/Toolkit#Telemetry" % self._name + ).handle_later() @staticmethod def boolean_flag_bucket_parameters(definition): @@ -638,13 +711,11 @@ the histogram.""" @staticmethod def linear_bucket_parameters(definition): - return (definition.get('low', 1), - definition['high'], - definition['n_buckets']) + return (definition.get("low", 1), definition["high"], definition["n_buckets"]) @staticmethod def enumerated_bucket_parameters(definition): - n_values = definition['n_values'] + n_values = definition["n_values"] return (1, n_values, n_values + 1) @staticmethod @@ -653,45 +724,48 @@ the histogram.""" # Otherwise when adding labels later we run into problems with the pipeline not # supporting bucket changes. # This can be overridden using the n_values field. - n_values = max(len(definition['labels']), - definition.get('n_values', 0), - MIN_CATEGORICAL_BUCKET_COUNT) + n_values = max( + len(definition["labels"]), + definition.get("n_values", 0), + MIN_CATEGORICAL_BUCKET_COUNT, + ) return (1, n_values, n_values + 1) @staticmethod def exponential_bucket_parameters(definition): - return (definition.get('low', 1), - definition['high'], - definition['n_buckets']) + return (definition.get("low", 1), definition["high"], definition["n_buckets"]) def set_nsITelemetry_kind(self): # Pick a Telemetry implementation type. types = { - 'boolean': 'BOOLEAN', - 'flag': 'FLAG', - 'count': 'COUNT', - 'enumerated': 'LINEAR', - 'categorical': 'CATEGORICAL', - 'linear': 'LINEAR', - 'exponential': 'EXPONENTIAL', + "boolean": "BOOLEAN", + "flag": "FLAG", + "count": "COUNT", + "enumerated": "LINEAR", + "categorical": "CATEGORICAL", + "linear": "LINEAR", + "exponential": "EXPONENTIAL", } if self._kind not in types: - ParserError('Unknown kind "%s" for histogram "%s".' % - (self._kind, self._name)).handle_later() + ParserError( + 'Unknown kind "%s" for histogram "%s".' % (self._kind, self._name) + ).handle_later() self._nsITelemetry_kind = "nsITelemetry::HISTOGRAM_%s" % types[self._kind] def set_dataset(self, definition): datasets = { - 'opt-in': 'DATASET_PRERELEASE_CHANNELS', - 'opt-out': 'DATASET_ALL_CHANNELS' + "opt-in": "DATASET_PRERELEASE_CHANNELS", + "opt-out": "DATASET_ALL_CHANNELS", } - value = definition.get('releaseChannelCollection', 'opt-in') + value = definition.get("releaseChannelCollection", "opt-in") if value not in datasets: - ParserError('Unknown value for releaseChannelCollection' - ' policy for histogram "%s".' % self._name).handle_later() + ParserError( + "Unknown value for releaseChannelCollection" + ' policy for histogram "%s".' % self._name + ).handle_later() self._dataset = "nsITelemetry::" + datasets[value] @@ -702,7 +776,9 @@ def load_histograms_into_dict(ordered_pairs, strict_type_checks): d = collections.OrderedDict() for key, value in ordered_pairs: if strict_type_checks and key in d: - ParserError("Found duplicate key in Histograms file: %s" % key).handle_later() + ParserError( + "Found duplicate key in Histograms file: %s" % key + ).handle_later() d[key] = value return d @@ -712,13 +788,17 @@ def load_histograms_into_dict(ordered_pairs, strict_type_checks): # routine to parse that file, and return a dictionary mapping histogram # names to histogram parameters. def from_json(filename, strict_type_checks): - with open(filename, 'r') as f: + with open(filename, "r") as f: try: + def hook(ps): return load_histograms_into_dict(ps, strict_type_checks) + histograms = json.load(f, object_pairs_hook=hook) except ValueError as e: - ParserError("error parsing histograms in %s: %s" % (filename, e.message)).handle_now() + ParserError( + "error parsing histograms in %s: %s" % (filename, e.message) + ).handle_now() return histograms @@ -731,10 +811,10 @@ def from_UseCountersWorker_conf(filename, strict_type_checks): def from_nsDeprecatedOperationList(filename, strict_type_checks): - operation_regex = re.compile('^DEPRECATED_OPERATION\\(([^)]+)\\)') + operation_regex = re.compile("^DEPRECATED_OPERATION\\(([^)]+)\\)") histograms = collections.OrderedDict() - with open(filename, 'r') as f: + with open(filename, "r") as f: for line in f: match = operation_regex.search(line) if not match: @@ -743,35 +823,42 @@ def from_nsDeprecatedOperationList(filename, strict_type_checks): op = match.group(1) def add_counter(context): - name = 'USE_COUNTER2_DEPRECATED_%s_%s' % (op, context.upper()) + name = "USE_COUNTER2_DEPRECATED_%s_%s" % (op, context.upper()) histograms[name] = { - 'expires_in_version': 'never', - 'kind': 'boolean', - 'description': 'Whether a %s used %s' % (context, op) + "expires_in_version": "never", + "kind": "boolean", + "description": "Whether a %s used %s" % (context, op), } - add_counter('document') - add_counter('page') + + add_counter("document") + add_counter("page") return histograms def to_camel_case(property_name): - return re.sub("(^|_|-)([a-z0-9])", - lambda m: m.group(2).upper(), - property_name.strip("_").strip("-")) + return re.sub( + "(^|_|-)([a-z0-9])", + lambda m: m.group(2).upper(), + property_name.strip("_").strip("-"), + ) def add_css_property_counters(histograms, property_name): def add_counter(context): - name = 'USE_COUNTER2_CSS_PROPERTY_%s_%s' % (to_camel_case(property_name), context.upper()) + name = "USE_COUNTER2_CSS_PROPERTY_%s_%s" % ( + to_camel_case(property_name), + context.upper(), + ) histograms[name] = { - 'expires_in_version': 'never', - 'kind': 'boolean', - 'description': 'Whether a %s used the CSS property %s' % (context, property_name) + "expires_in_version": "never", + "kind": "boolean", + "description": "Whether a %s used the CSS property %s" + % (context, property_name), } - add_counter('document') - add_counter('page') + add_counter("document") + add_counter("page") def from_ServoCSSPropList(filename, strict_type_checks): @@ -799,7 +886,7 @@ def from_counted_unknown_properties(filename, strict_type_checks): # This is only used for probe-scraper. def from_properties_db(filename, strict_type_checks): histograms = collections.OrderedDict() - with open(filename, 'r') as f: + with open(filename, "r") as f: in_css_properties = False for line in f: @@ -811,20 +898,28 @@ def from_properties_db(filename, strict_type_checks): if line.startswith("};"): break - if not line.startswith(" \""): + if not line.startswith(' "'): continue - name = line.split("\"")[1] + name = line.split('"')[1] add_css_property_counters(histograms, name) return histograms FILENAME_PARSERS = [ - (lambda x: from_json if x.endswith('.json') else None), - (lambda x: from_nsDeprecatedOperationList if x == 'nsDeprecatedOperationList.h' else None), - (lambda x: from_ServoCSSPropList if x == 'ServoCSSPropList.py' else None), - (lambda x: from_counted_unknown_properties if x == 'counted_unknown_properties.py' else None), - (lambda x: from_properties_db if x == 'properties-db.js' else None), + (lambda x: from_json if x.endswith(".json") else None), + ( + lambda x: from_nsDeprecatedOperationList + if x == "nsDeprecatedOperationList.h" + else None + ), + (lambda x: from_ServoCSSPropList if x == "ServoCSSPropList.py" else None), + ( + lambda x: from_counted_unknown_properties + if x == "counted_unknown_properties.py" + else None + ), + (lambda x: from_properties_db if x == "properties-db.js" else None), ] # Similarly to the dance above with buildconfig, usecounters may not be @@ -832,16 +927,19 @@ FILENAME_PARSERS = [ try: import usecounters - FILENAME_PARSERS.append(lambda x: from_UseCounters_conf if x == 'UseCounters.conf' else None) FILENAME_PARSERS.append( - lambda x: from_UseCountersWorker_conf if x == 'UseCountersWorker.conf' else None) + lambda x: from_UseCounters_conf if x == "UseCounters.conf" else None + ) + FILENAME_PARSERS.append( + lambda x: from_UseCountersWorker_conf if x == "UseCountersWorker.conf" else None + ) except ImportError: pass def from_files(filenames, strict_type_checks=True): """Return an iterator that provides a sequence of Histograms for -the histograms defined in filenames. + the histograms defined in filenames. """ if strict_type_checks: load_allowlist() @@ -878,19 +976,24 @@ the histograms defined in filenames. upper_bound = indices[-1][0] n_counters = upper_bound - lower_bound + 1 if n_counters != len(indices): - ParserError("Histograms %s must be defined in a contiguous block." % - name).handle_later() + ParserError( + "Histograms %s must be defined in a contiguous block." % name + ).handle_later() # We require that all USE_COUNTER2_*_WORKER histograms be defined in a contiguous # block. - check_continuity(all_histograms, - lambda x: x[1].startswith("USE_COUNTER2_") and x[1].endswith("_WORKER"), - "use counter worker") + check_continuity( + all_histograms, + lambda x: x[1].startswith("USE_COUNTER2_") and x[1].endswith("_WORKER"), + "use counter worker", + ) # And all other USE_COUNTER2_* histograms be defined in a contiguous # block. - check_continuity(all_histograms, - lambda x: x[1].startswith("USE_COUNTER2_") and not x[1].endswith("_WORKER"), - "use counter") + check_continuity( + all_histograms, + lambda x: x[1].startswith("USE_COUNTER2_") and not x[1].endswith("_WORKER"), + "use counter", + ) # Check that histograms that were removed from Histograms.json etc. # are also removed from the allowlists. @@ -898,9 +1001,11 @@ the histograms defined in filenames. all_allowlist_entries = itertools.chain.from_iterable(iter(allowlists.values())) orphaned = set(all_allowlist_entries) - set(all_histograms.keys()) if len(orphaned) > 0: - msg = 'The following entries are orphaned and should be removed from ' \ - 'histogram-allowlists.json:\n%s' - ParserError(msg % (', '.join(sorted(orphaned)))).handle_later() + msg = ( + "The following entries are orphaned and should be removed from " + "histogram-allowlists.json:\n%s" + ) + ParserError(msg % (", ".join(sorted(orphaned)))).handle_later() for (name, definition) in all_histograms.items(): yield Histogram(name, definition, strict_type_checks=strict_type_checks) diff --git a/tools/browsertime/mach_commands.py b/tools/browsertime/mach_commands.py index 77e83e1b2a8c..489d87c13bc4 100644 --- a/tools/browsertime/mach_commands.py +++ b/tools/browsertime/mach_commands.py @@ -2,7 +2,7 @@ # License, v. 2.0. If a copy of the MPL was not distributed with this # file, You can obtain one at http://mozilla.org/MPL/2.0/. -r'''Make it easy to install and run [browsertime](https://github.com/sitespeedio/browsertime). +r"""Make it easy to install and run [browsertime](https://github.com/sitespeedio/browsertime). Browsertime is a harness for running performance tests, similar to Mozilla's Raptor testing framework. Browsertime is written in Node.js @@ -26,7 +26,7 @@ To invoke browsertime, run ./mach browsertime [ARGS] ``` All arguments are passed through to browsertime. -''' +""" from __future__ import absolute_import, print_function, unicode_literals @@ -65,93 +65,88 @@ def silence(): def node_path(): from mozbuild.nodeutil import find_node_executable + node, _ = find_node_executable() return os.path.abspath(node) def package_path(): - '''The path to the `browsertime` directory. + """The path to the `browsertime` directory. - Override the default with the `BROWSERTIME` environment variable.''' - override = os.environ.get('BROWSERTIME', None) + Override the default with the `BROWSERTIME` environment variable.""" + override = os.environ.get("BROWSERTIME", None) if override: return override - return mozpath.join(BROWSERTIME_ROOT, 'node_modules', 'browsertime') + return mozpath.join(BROWSERTIME_ROOT, "node_modules", "browsertime") def browsertime_path(): - '''The path to the `browsertime.js` script.''' + """The path to the `browsertime.js` script.""" # On Windows, invoking `node_modules/.bin/browsertime{.cmd}` # doesn't work when invoked as an argument to our specific # binary. Since we want our version of node, invoke the # actual script directly. - return mozpath.join( - package_path(), - 'bin', - 'browsertime.js') + return mozpath.join(package_path(), "bin", "browsertime.js") def visualmetrics_path(): - '''The path to the `visualmetrics.py` script.''' - return mozpath.join( - package_path(), - 'browsertime', - 'visualmetrics.py') + """The path to the `visualmetrics.py` script.""" + return mozpath.join(package_path(), "browsertime", "visualmetrics.py") def host_platform(): - is_64bits = sys.maxsize > 2**32 + is_64bits = sys.maxsize > 2 ** 32 - if sys.platform.startswith('win'): + if sys.platform.startswith("win"): if is_64bits: - return 'win64' - elif sys.platform.startswith('linux'): + return "win64" + elif sys.platform.startswith("linux"): if is_64bits: - return 'linux64' - elif sys.platform.startswith('darwin'): - return 'darwin' + return "linux64" + elif sys.platform.startswith("darwin"): + return "darwin" - raise ValueError('sys.platform is not yet supported: {}'.format(sys.platform)) + raise ValueError("sys.platform is not yet supported: {}".format(sys.platform)) # Map from `host_platform()` to a `fetch`-like syntax. host_fetches = { - 'darwin': { - 'ffmpeg': { - 'type': 'static-url', - 'url': 'https://github.com/ncalexan/geckodriver/releases/download/v0.24.0-android/ffmpeg-4.1.1-macos64-static.zip', # noqa + "darwin": { + "ffmpeg": { + "type": "static-url", + "url": "https://github.com/ncalexan/geckodriver/releases/download/v0.24.0-android/ffmpeg-4.1.1-macos64-static.zip", # noqa # An extension to `fetch` syntax. - 'path': 'ffmpeg-4.1.1-macos64-static', + "path": "ffmpeg-4.1.1-macos64-static", }, }, - 'linux64': { - 'ffmpeg': { - 'type': 'static-url', - 'url': 'https://github.com/ncalexan/geckodriver/releases/download/v0.24.0-android/ffmpeg-4.1.4-i686-static.tar.xz', # noqa + "linux64": { + "ffmpeg": { + "type": "static-url", + "url": "https://github.com/ncalexan/geckodriver/releases/download/v0.24.0-android/ffmpeg-4.1.4-i686-static.tar.xz", # noqa # An extension to `fetch` syntax. - 'path': 'ffmpeg-4.1.4-i686-static', + "path": "ffmpeg-4.1.4-i686-static", }, # TODO: install a static ImageMagick. All easily available binaries are # not statically linked, so they will (mostly) fail at runtime due to # missing dependencies. For now we require folks to install ImageMagick # globally with their package manager of choice. }, - 'win64': { - 'ffmpeg': { - 'type': 'static-url', - 'url': 'https://github.com/ncalexan/geckodriver/releases/download/v0.24.0-android/ffmpeg-4.1.1-win64-static.zip', # noqa + "win64": { + "ffmpeg": { + "type": "static-url", + "url": "https://github.com/ncalexan/geckodriver/releases/download/v0.24.0-android/ffmpeg-4.1.1-win64-static.zip", # noqa # An extension to `fetch` syntax. - 'path': 'ffmpeg-4.1.1-win64-static', + "path": "ffmpeg-4.1.1-win64-static", }, - 'ImageMagick': { - 'type': 'static-url', + "ImageMagick": { + "type": "static-url", # 'url': 'https://imagemagick.org/download/binaries/ImageMagick-7.0.8-39-portable-Q16-x64.zip', # noqa # imagemagick.org doesn't keep old versions; the mirror below does. - 'url': 'https://ftp.icm.edu.pl/packages/ImageMagick/binaries/ImageMagick-7.0.8-39-portable-Q16-x64.zip', # noqa + "url": "https://ftp.icm.edu.pl/packages/ImageMagick/binaries/ImageMagick-7.0.8-39-portable-Q16-x64.zip", # noqa # An extension to `fetch` syntax. - 'path': 'ImageMagick-7.0.8', + "path": "ImageMagick-7.0.8", }, }, } @@ -161,23 +156,23 @@ host_fetches = { class MachBrowsertime(MachCommandBase): @property def artifact_cache_path(self): - r'''Downloaded artifacts will be kept here.''' + r"""Downloaded artifacts will be kept here.""" # The convention is $MOZBUILD_STATE_PATH/cache/$FEATURE. - return mozpath.join(self._mach_context.state_dir, 'cache', 'browsertime') + return mozpath.join(self._mach_context.state_dir, "cache", "browsertime") @property def state_path(self): - r'''Unpacked artifacts will be kept here.''' + r"""Unpacked artifacts will be kept here.""" # The convention is $MOZBUILD_STATE_PATH/$FEATURE. - return mozpath.join(self._mach_context.state_dir, 'browsertime') + return mozpath.join(self._mach_context.state_dir, "browsertime") def setup_prerequisites(self): - r'''Install browsertime and visualmetrics.py prerequisites.''' + r"""Install browsertime and visualmetrics.py prerequisites.""" from mozbuild.action.tooltool import unpack_file from mozbuild.artifact_cache import ArtifactCache - if not AUTOMATION and host_platform().startswith('linux'): + if not AUTOMATION and host_platform().startswith("linux"): # On Linux ImageMagick needs to be installed manually, and `mach bootstrap` doesn't # do that (yet). Provide some guidance. try: @@ -185,49 +180,53 @@ class MachBrowsertime(MachCommandBase): except ImportError: from shutil_which import which - im_programs = ('compare', 'convert', 'mogrify') + im_programs = ("compare", "convert", "mogrify") for im_program in im_programs: prog = which(im_program) if not prog: - print('Error: On Linux, ImageMagick must be on the PATH. ' - 'Install ImageMagick manually and try again (or update PATH). ' - 'On Ubuntu and Debian, try `sudo apt-get install imagemagick`. ' - 'On Fedora, try `sudo dnf install imagemagick`. ' - 'On CentOS, try `sudo yum install imagemagick`.') + print( + "Error: On Linux, ImageMagick must be on the PATH. " + "Install ImageMagick manually and try again (or update PATH). " + "On Ubuntu and Debian, try `sudo apt-get install imagemagick`. " + "On Fedora, try `sudo dnf install imagemagick`. " + "On CentOS, try `sudo yum install imagemagick`." + ) return 1 # Download the visualmetrics.py requirements. - artifact_cache = ArtifactCache(self.artifact_cache_path, - log=self.log, skip_cache=False) + artifact_cache = ArtifactCache( + self.artifact_cache_path, log=self.log, skip_cache=False + ) fetches = host_fetches[host_platform()] for tool, fetch in sorted(fetches.items()): - archive = artifact_cache.fetch(fetch['url']) + archive = artifact_cache.fetch(fetch["url"]) # TODO: assert type, verify sha256 (and size?). - if fetch.get('unpack', True): + if fetch.get("unpack", True): cwd = os.getcwd() try: mkdir(self.state_path) os.chdir(self.state_path) self.log( logging.INFO, - 'browsertime', - {'path': archive}, - 'Unpacking temporary location {path}') + "browsertime", + {"path": archive}, + "Unpacking temporary location {path}", + ) - if 'win64' in host_platform() and 'imagemagick' in tool.lower(): + if "win64" in host_platform() and "imagemagick" in tool.lower(): # Windows archive does not contain a subfolder # so we make one for it here - mkdir(fetch.get('path')) - os.chdir(os.path.join(self.state_path, fetch.get('path'))) + mkdir(fetch.get("path")) + os.chdir(os.path.join(self.state_path, fetch.get("path"))) unpack_file(archive) os.chdir(self.state_path) else: unpack_file(archive) # Make sure the expected path exists after extraction - path = os.path.join(self.state_path, fetch.get('path')) + path = os.path.join(self.state_path, fetch.get("path")) if not os.path.exists(path): raise Exception("Cannot find an extracted directory: %s" % path) @@ -245,42 +244,51 @@ class MachBrowsertime(MachCommandBase): os.chmod(loc_to_change, st.st_mode | stat.S_IEXEC) except Exception as e: raise Exception( - "Could not set executable bit in %s, error: %s" % (path, str(e)) + "Could not set executable bit in %s, error: %s" + % (path, str(e)) ) finally: os.chdir(cwd) - def setup(self, should_clobber=False, new_upstream_url=''): - r'''Install browsertime and visualmetrics.py prerequisites and the Node.js package.''' + def setup(self, should_clobber=False, new_upstream_url=""): + r"""Install browsertime and visualmetrics.py prerequisites and the Node.js package.""" - sys.path.append(mozpath.join(self.topsrcdir, 'tools', 'lint', 'eslint')) + sys.path.append(mozpath.join(self.topsrcdir, "tools", "lint", "eslint")) import setup_helper if not new_upstream_url: self.setup_prerequisites() if new_upstream_url: - package_json_path = os.path.join(BROWSERTIME_ROOT, 'package.json') + package_json_path = os.path.join(BROWSERTIME_ROOT, "package.json") self.log( logging.INFO, - 'browsertime', - {'new_upstream_url': new_upstream_url, 'package_json_path': package_json_path}, - 'Updating browsertime node module version in {package_json_path} ' - 'to {new_upstream_url}') + "browsertime", + { + "new_upstream_url": new_upstream_url, + "package_json_path": package_json_path, + }, + "Updating browsertime node module version in {package_json_path} " + "to {new_upstream_url}", + ) - if not re.search('/tarball/[a-f0-9]{40}$', new_upstream_url): - raise ValueError("New upstream URL does not end with /tarball/[a-f0-9]{40}: '%s'" - % new_upstream_url) + if not re.search("/tarball/[a-f0-9]{40}$", new_upstream_url): + raise ValueError( + "New upstream URL does not end with /tarball/[a-f0-9]{40}: '%s'" + % new_upstream_url + ) with open(package_json_path) as f: - existing_body = json.loads(f.read(), object_pairs_hook=collections.OrderedDict) + existing_body = json.loads( + f.read(), object_pairs_hook=collections.OrderedDict + ) - existing_body['devDependencies']['browsertime'] = new_upstream_url + existing_body["devDependencies"]["browsertime"] = new_upstream_url updated_body = json.dumps(existing_body) - with open(package_json_path, 'w') as f: + with open(package_json_path, "w") as f: f.write(updated_body) # Install the browsertime Node.js requirements. @@ -297,15 +305,17 @@ class MachBrowsertime(MachCommandBase): self.log( logging.INFO, - 'browsertime', - {'package_json': mozpath.join(BROWSERTIME_ROOT, 'package.json')}, - 'Installing browsertime node module from {package_json}') + "browsertime", + {"package_json": mozpath.join(BROWSERTIME_ROOT, "package.json")}, + "Installing browsertime node module from {package_json}", + ) status = setup_helper.package_setup( BROWSERTIME_ROOT, - 'browsertime', - should_update=new_upstream_url != '', + "browsertime", + should_update=new_upstream_url != "", should_clobber=should_clobber, - no_optional=new_upstream_url or AUTOMATION) + no_optional=new_upstream_url or AUTOMATION, + ) if status: return status @@ -316,13 +326,14 @@ class MachBrowsertime(MachCommandBase): return self.check() def node(self, args): - r'''Invoke node (interactively) with the given arguments.''' + r"""Invoke node (interactively) with the given arguments.""" return self.run_process( [node_path()] + args, append_env=self.append_env(), pass_thru=True, # Allow user to run Node interactively. ensure_exit_code=False, # Don't throw on non-zero exit code. - cwd=mozpath.join(self.topsrcdir)) + cwd=mozpath.join(self.topsrcdir), + ) def append_env(self, append_path=True): fetches = host_fetches[host_platform()] @@ -331,22 +342,30 @@ class MachBrowsertime(MachCommandBase): # {`convert`,`compare`,`mogrify`} are found. The `visualmetrics.py` # script doesn't take these as configuration, so we do this (for now). # We should update the script itself to accept this configuration. - path = os.environ.get('PATH', '').split(os.pathsep) if append_path else [] - path_to_ffmpeg = mozpath.join( - self.state_path, - fetches['ffmpeg']['path']) + path = os.environ.get("PATH", "").split(os.pathsep) if append_path else [] + path_to_ffmpeg = mozpath.join(self.state_path, fetches["ffmpeg"]["path"]) path_to_imagemagick = None - if 'ImageMagick' in fetches: + if "ImageMagick" in fetches: path_to_imagemagick = mozpath.join( - self.state_path, - fetches['ImageMagick']['path']) + self.state_path, fetches["ImageMagick"]["path"] + ) if path_to_imagemagick: # ImageMagick ships ffmpeg (on Windows, at least) so we # want to ensure that our ffmpeg goes first, just in case. - path.insert(0, self.state_path if host_platform().startswith('win') else mozpath.join(path_to_imagemagick, 'bin')) # noqa - path.insert(0, path_to_ffmpeg if host_platform().startswith('linux') else mozpath.join(path_to_ffmpeg, 'bin')) # noqa + path.insert( + 0, + self.state_path + if host_platform().startswith("win") + else mozpath.join(path_to_imagemagick, "bin"), + ) # noqa + path.insert( + 0, + path_to_ffmpeg + if host_platform().startswith("linux") + else mozpath.join(path_to_ffmpeg, "bin"), + ) # noqa # Ensure that bare `node` and `npm` in scripts, including post-install # scripts, finds the binary we're invoking with. Without this, it's @@ -358,7 +377,7 @@ class MachBrowsertime(MachCommandBase): # On windows, we need to add the ImageMagick directory to the path # otherwise compare won't be found, and the built-in OS convert # method will be used instead of the ImageMagick one. - if 'win64' in host_platform() and path_to_imagemagick: + if "win64" in host_platform() and path_to_imagemagick: # Bug 1596237 - In the windows ImageMagick distribution, the ffmpeg # binary is directly located in the root directory, so here we # insert in the 3rd position to avoid taking precedence over ffmpeg @@ -379,8 +398,7 @@ class MachBrowsertime(MachCommandBase): path.append(p) append_env = { - 'PATH': os.pathsep.join(path), - + "PATH": os.pathsep.join(path), # Bug 1560193: The JS library browsertime uses to execute commands # (execa) will muck up the PATH variable and put the directory that # node is in first in path. If this is globally-installed node, @@ -389,36 +407,41 @@ class MachBrowsertime(MachCommandBase): # # Our fork of browsertime supports a `PYTHON` environment variable # that points to the exact python executable to use. - 'PYTHON': self.virtualenv_manager.python_path, + "PYTHON": self.virtualenv_manager.python_path, } if path_to_imagemagick: - append_env.update({ - # See https://imagemagick.org/script/download.php. Harmless on other platforms. - 'LD_LIBRARY_PATH': mozpath.join(path_to_imagemagick, 'lib'), - 'DYLD_LIBRARY_PATH': mozpath.join(path_to_imagemagick, 'lib'), - 'MAGICK_HOME': path_to_imagemagick, - }) + append_env.update( + { + # See https://imagemagick.org/script/download.php. Harmless on other + # platforms. + "LD_LIBRARY_PATH": mozpath.join(path_to_imagemagick, "lib"), + "DYLD_LIBRARY_PATH": mozpath.join(path_to_imagemagick, "lib"), + "MAGICK_HOME": path_to_imagemagick, + } + ) return append_env def _need_install(self, package): from pip._internal.req.constructors import install_req_from_line + req = install_req_from_line(package) req.check_if_exists(use_user_site=False) if req.satisfied_by is None: return True - venv_site_lib = os.path.abspath(os.path.join(self.virtualenv_manager.bin_path, "..", - "lib")) + venv_site_lib = os.path.abspath( + os.path.join(self.virtualenv_manager.bin_path, "..", "lib") + ) site_packages = os.path.abspath(req.satisfied_by.location) return not site_packages.startswith(venv_site_lib) def activate_virtualenv(self, *args, **kwargs): - r'''Activates virtualenv. + r"""Activates virtualenv. This function will also install Pillow and pyssim if needed. It will raise an error in case the install failed. - ''' + """ MachCommandBase.activate_virtualenv(self, *args, **kwargs) # installing Python deps on the fly @@ -427,19 +450,20 @@ class MachBrowsertime(MachCommandBase): self.virtualenv_manager._run_pip(["install", dep]) def check(self): - r'''Run `visualmetrics.py --check`.''' + r"""Run `visualmetrics.py --check`.""" self.activate_virtualenv() - args = ['--check'] + args = ["--check"] status = self.run_process( [self.virtualenv_manager.python_path, visualmetrics_path()] + args, # For --check, don't allow user's path to interfere with # path testing except on Linux, where ImageMagick needs to # be installed manually. - append_env=self.append_env(append_path=host_platform().startswith('linux')), + append_env=self.append_env(append_path=host_platform().startswith("linux")), pass_thru=True, ensure_exit_code=False, # Don't throw on non-zero exit code. - cwd=mozpath.join(self.topsrcdir)) + cwd=mozpath.join(self.topsrcdir), + ) sys.stdout.flush() sys.stderr.flush() @@ -449,12 +473,12 @@ class MachBrowsertime(MachCommandBase): # Avoid logging the command (and, on Windows, the environment). self.log_manager.terminal_handler.setLevel(logging.CRITICAL) - print('browsertime version:', end=' ') + print("browsertime version:", end=" ") sys.stdout.flush() sys.stderr.flush() - return self.node([browsertime_path()] + ['--version']) + return self.node([browsertime_path()] + ["--version"]) def extra_default_args(self, args=[]): # Add Mozilla-specific default arguments. This is tricky because browsertime is quite @@ -462,72 +486,83 @@ class MachBrowsertime(MachCommandBase): # difficult to interpret type errors. def extract_browser_name(args): - 'Extracts the browser name if any' + "Extracts the browser name if any" # These are BT arguments, it's BT job to check them # here we just want to extract the browser name - res = re.findall("(--browser|-b)[= ]([\w]+)", ' '.join(args)) + res = re.findall("(--browser|-b)[= ]([\w]+)", " ".join(args)) if res == []: return None return res[0][-1] def matches(args, *flags): - 'Return True if any argument matches any of the given flags (maybe with an argument).' + "Return True if any argument matches any of the given flags (maybe with an argument)." for flag in flags: - if flag in args or any(arg.startswith(flag + '=') for arg in args): + if flag in args or any(arg.startswith(flag + "=") for arg in args): return True return False extra_args = [] # Default to Firefox. Override with `-b ...` or `--browser=...`. - specifies_browser = matches(args, '-b', '--browser') + specifies_browser = matches(args, "-b", "--browser") if not specifies_browser: - extra_args.extend(('-b', 'firefox')) + extra_args.extend(("-b", "firefox")) # Default to not collect HAR. Override with `--skipHar=false`. - specifies_har = matches(args, '--har', '--skipHar', '--gzipHar') + specifies_har = matches(args, "--har", "--skipHar", "--gzipHar") if not specifies_har: - extra_args.append('--skipHar') + extra_args.append("--skipHar") if not matches(args, "--android"): # If --firefox.binaryPath is not specified, default to the objdir binary # Note: --firefox.release is not a real browsertime option, but it will # silently ignore it instead and default to a release installation. - specifies_binaryPath = matches(args, '--firefox.binaryPath', - '--firefox.release', '--firefox.nightly', - '--firefox.beta', '--firefox.developer') + specifies_binaryPath = matches( + args, + "--firefox.binaryPath", + "--firefox.release", + "--firefox.nightly", + "--firefox.beta", + "--firefox.developer", + ) if not specifies_binaryPath: - specifies_binaryPath = extract_browser_name(args) == 'chrome' + specifies_binaryPath = extract_browser_name(args) == "chrome" if not specifies_binaryPath: try: - extra_args.extend(('--firefox.binaryPath', self.get_binary_path())) + extra_args.extend(("--firefox.binaryPath", self.get_binary_path())) except BinaryNotFoundException as e: - self.log(logging.ERROR, - 'browsertime', - {'error': str(e)}, - 'ERROR: {error}') - self.log(logging.INFO, - 'browsertime', - {}, - 'Please run |./mach build| ' - 'or specify a Firefox binary with --firefox.binaryPath.') + self.log( + logging.ERROR, + "browsertime", + {"error": str(e)}, + "ERROR: {error}", + ) + self.log( + logging.INFO, + "browsertime", + {}, + "Please run |./mach build| " + "or specify a Firefox binary with --firefox.binaryPath.", + ) return 1 if extra_args: self.log( logging.DEBUG, - 'browsertime', - {'extra_args': extra_args}, - 'Running browsertime with extra default arguments: {extra_args}') + "browsertime", + {"extra_args": extra_args}, + "Running browsertime with extra default arguments: {extra_args}", + ) return extra_args def _verify_node_install(self): # check if Node is installed - sys.path.append(mozpath.join(self.topsrcdir, 'tools', 'lint', 'eslint')) + sys.path.append(mozpath.join(self.topsrcdir, "tools", "lint", "eslint")) import setup_helper + with silence(): node_valid = setup_helper.check_node_executables_valid() if not node_valid: @@ -543,23 +578,45 @@ class MachBrowsertime(MachCommandBase): return True - @Command('browsertime', category='testing', - description='Run [browsertime](https://github.com/sitespeedio/browsertime) ' - 'performance tests.') - @CommandArgument('--verbose', action='store_true', - help='Verbose output for what commands the build is running.') - @CommandArgument('--update-upstream-url', default='') - @CommandArgument('--setup', default=False, action='store_true') - @CommandArgument('--clobber', default=False, action='store_true') - @CommandArgument('--skip-cache', default=False, action='store_true', - help='Skip all local caches to force re-fetching remote artifacts.') - @CommandArgument('--check', default=False, action='store_true') - @CommandArgument('--browsertime-help', default=False, action='store_true', - help='Show the browsertime help message.') - @CommandArgument('args', nargs=argparse.REMAINDER) - def browsertime(self, args, verbose=False, - update_upstream_url='', setup=False, clobber=False, - skip_cache=False, check=False, browsertime_help=False): + @Command( + "browsertime", + category="testing", + description="Run [browsertime](https://github.com/sitespeedio/browsertime) " + "performance tests.", + ) + @CommandArgument( + "--verbose", + action="store_true", + help="Verbose output for what commands the build is running.", + ) + @CommandArgument("--update-upstream-url", default="") + @CommandArgument("--setup", default=False, action="store_true") + @CommandArgument("--clobber", default=False, action="store_true") + @CommandArgument( + "--skip-cache", + default=False, + action="store_true", + help="Skip all local caches to force re-fetching remote artifacts.", + ) + @CommandArgument("--check", default=False, action="store_true") + @CommandArgument( + "--browsertime-help", + default=False, + action="store_true", + help="Show the browsertime help message.", + ) + @CommandArgument("args", nargs=argparse.REMAINDER) + def browsertime( + self, + args, + verbose=False, + update_upstream_url="", + setup=False, + clobber=False, + skip_cache=False, + check=False, + browsertime_help=False, + ): self._set_log_level(verbose) if update_upstream_url: @@ -574,7 +631,7 @@ class MachBrowsertime(MachCommandBase): return self.check() if browsertime_help: - args.append('--help') + args.append("--help") self.activate_virtualenv() default_args = self.extra_default_args(args) diff --git a/tools/lint/black.yml b/tools/lint/black.yml index cb78f80cd30b..d4f05925bc00 100644 --- a/tools/lint/black.yml +++ b/tools/lint/black.yml @@ -2,45 +2,11 @@ black: description: Reformat python exclude: - - browser/components/migration/tests/marionette/test_refresh_firefox.py - - build/pgo/genpgocert.py - - config/check_macroassembler_style.py - gfx/harfbuzz/src/meson.build - - js/src/devtools/rootAnalysis/t/testlib.py - - js/src/util/make_unicode.py - layout/style/ServoCSSPropList.mako.py - - mobile/android/mach_commands.py - - python/mozbuild/mozbuild/mach_commands.py - - python/mozbuild/mozbuild/telemetry.py - - python/mozbuild/mozbuild/test/backend/test_build.py - - python/mozbuild/mozbuild/test/backend/test_recursivemake.py - - python/mozbuild/mozbuild/test/codecoverage/test_lcov_rewrite.py - python/mozbuild/mozbuild/test/frontend/data/reader-error-syntax/moz.build - - taskcluster/taskgraph/transforms/bouncer_aliases.py - - taskcluster/taskgraph/transforms/mar_signing.py - - taskcluster/taskgraph/transforms/repackage_signing_partner.py - - testing/addtest.py - - testing/mochitest/mochitest_options.py - - testing/mozbase/mozpower/tests/test_macintelpower.py - - testing/mozharness/mozharness/mozilla/building/buildbase.py - - testing/mozharness/mozharness/mozilla/testing/errors.py - - testing/mozharness/mozharness/mozilla/testing/raptor.py - testing/mozharness/configs/test/test_malformed.py - - testing/mozharness/mozharness/mozilla/testing/testbase.py - - testing/mozharness/scripts/desktop_unittest.py - - testing/mozharness/scripts/marionette.py - - testing/mozharness/scripts/release/bouncer_check.py - - testing/mozharness/scripts/release/update-verify-config-creator.py - - testing/talos/talos/test.py - - testing/talos/talos/unittests/test_xtalos.py - - testing/web-platform/metamerge.py - testing/web-platform/tests - - toolkit/components/telemetry/build_scripts/mozparsers/parse_histograms.py - - tools/browsertime/mach_commands.py - - tools/power/mach_commands.py - - tools/tryselect/selectors/coverage.py - - tools/update-packaging/test_make_incremental_updates.py - - xpcom/components/gen_static_components.py extensions: - build - configure diff --git a/tools/power/mach_commands.py b/tools/power/mach_commands.py index 5f8d35204cfd..19e27874be41 100644 --- a/tools/power/mach_commands.py +++ b/tools/power/mach_commands.py @@ -16,28 +16,38 @@ from mozbuild.base import MachCommandBase def is_osx_10_10_or_greater(cls): import platform + release = platform.mac_ver()[0] - return release and StrictVersion(release) >= StrictVersion('10.10') + return release and StrictVersion(release) >= StrictVersion("10.10") @CommandProvider class MachCommands(MachCommandBase): - ''' + """ Get system power consumption and related measurements. - ''' - @Command('power', category='misc', - conditions=[is_osx_10_10_or_greater], - description='Get system power consumption and related measurements for ' - 'all running browsers. Available only on Mac OS X 10.10 and above. ' - 'Requires root access.') - @CommandArgument('-i', '--interval', type=int, default=30000, - help='The sample period, measured in milliseconds. Defaults to 30000.') + """ + + @Command( + "power", + category="misc", + conditions=[is_osx_10_10_or_greater], + description="Get system power consumption and related measurements for " + "all running browsers. Available only on Mac OS X 10.10 and above. " + "Requires root access.", + ) + @CommandArgument( + "-i", + "--interval", + type=int, + default=30000, + help="The sample period, measured in milliseconds. Defaults to 30000.", + ) def power(self, interval): import os import re import subprocess - rapl = os.path.join(self.topobjdir, 'dist', 'bin', 'rapl') + rapl = os.path.join(self.topobjdir, "dist", "bin", "rapl") interval = str(interval) @@ -46,23 +56,31 @@ class MachCommands(MachCommandBase): # doesn't start measuring while |powermetrics| is waiting for the root # password to be entered. try: - subprocess.check_call(['sudo', 'true']) + subprocess.check_call(["sudo", "true"]) except Exception: - print('\nsudo failed; aborting') + print("\nsudo failed; aborting") return 1 # This runs rapl in the background because nothing in this script # depends on the output. This is good because we want |rapl| and # |powermetrics| to run at the same time. - subprocess.Popen([rapl, '-n', '1', '-i', interval]) + subprocess.Popen([rapl, "-n", "1", "-i", interval]) - lines = subprocess.check_output(['sudo', 'powermetrics', - '--samplers', 'tasks', - '--show-process-coalition', - '--show-process-gpu', - '-n', '1', - '-i', interval], - universal_newlines=True) + lines = subprocess.check_output( + [ + "sudo", + "powermetrics", + "--samplers", + "tasks", + "--show-process-coalition", + "--show-process-gpu", + "-n", + "1", + "-i", + interval, + ], + universal_newlines=True, + ) # When run with --show-process-coalition, |powermetrics| groups outputs # into process coalitions, each of which has a leader. @@ -131,7 +149,10 @@ class MachCommands(MachCommandBase): # # - 'kernel' is for the kernel. # - if re.search(r'(^Name|firefox|plugin-container|Safari\b|WebKit|Chrome|Terminal|WindowServer|kernel)', line): # NOQA: E501 + if re.search( + r"(^Name|firefox|plugin-container|Safari\b|WebKit|Chrome|Terminal|WindowServer|kernel)", # NOQA: E501 + line, + ): print(line) return 0 diff --git a/tools/tryselect/selectors/coverage.py b/tools/tryselect/selectors/coverage.py index 964459fddb43..88f53528e736 100644 --- a/tools/tryselect/selectors/coverage.py +++ b/tools/tryselect/selectors/coverage.py @@ -39,34 +39,36 @@ def setup_globals(): build = MozbuildObject.from_environment(cwd=here) vcs = get_repository_object(build.topsrcdir) - root_hash = hashlib.sha256(six.ensure_binary(os.path.abspath(build.topsrcdir))).hexdigest() - cache_dir = os.path.join(get_state_dir(), 'cache', root_hash, 'chunk_mapping') + root_hash = hashlib.sha256( + six.ensure_binary(os.path.abspath(build.topsrcdir)) + ).hexdigest() + cache_dir = os.path.join(get_state_dir(), "cache", root_hash, "chunk_mapping") if not os.path.isdir(cache_dir): os.makedirs(cache_dir) - CHUNK_MAPPING_FILE = os.path.join(cache_dir, 'chunk_mapping.sqlite') - CHUNK_MAPPING_TAG_FILE = os.path.join(cache_dir, 'chunk_mapping_tag.json') + CHUNK_MAPPING_FILE = os.path.join(cache_dir, "chunk_mapping.sqlite") + CHUNK_MAPPING_TAG_FILE = os.path.join(cache_dir, "chunk_mapping_tag.json") # Maps from platform names in the chunk_mapping sqlite database to respective # substrings in task names. PLATFORM_MAP = { - 'linux': 'test-linux64/opt', - 'windows': 'test-windows10-64/opt', + "linux": "test-linux64/opt", + "windows": "test-windows10-64/opt", } # List of platform/build type combinations that are included in pushes by |mach try coverage|. OPT_TASK_PATTERNS = [ - 'macosx64/opt', - 'windows10-64/opt', - 'windows7-32/opt', - 'linux64/opt', + "macosx64/opt", + "windows10-64/opt", + "windows7-32/opt", + "linux64/opt", ] class CoverageParser(BaseTryParser): - name = 'coverage' + name = "coverage" arguments = [] - common_groups = ['push', 'task'] + common_groups = ["push", "task"] task_configs = [ "artifact", "env", @@ -78,41 +80,42 @@ class CoverageParser(BaseTryParser): def read_test_manifests(): - '''Uses TestResolver to read all test manifests in the tree. + """Uses TestResolver to read all test manifests in the tree. Returns a (tests, support_files_map) tuple that describes the tests in the tree: tests - a set of test file paths support_files_map - a dict that maps from each support file to a list with test files that require them it - ''' + """ test_resolver = TestResolver.from_environment(cwd=here) file_finder = FileFinder(build.topsrcdir) support_files_map = collections.defaultdict(list) tests = set() for test in test_resolver.resolve_tests(build.topsrcdir): - tests.add(test['srcdir_relpath']) - if 'support-files' not in test: + tests.add(test["srcdir_relpath"]) + if "support-files" not in test: continue - for support_file_pattern in test['support-files'].split(): + for support_file_pattern in test["support-files"].split(): # Get the pattern relative to topsrcdir. - if support_file_pattern.startswith('!/'): + if support_file_pattern.startswith("!/"): support_file_pattern = support_file_pattern[2:] - elif support_file_pattern.startswith('/'): + elif support_file_pattern.startswith("/"): support_file_pattern = support_file_pattern[1:] else: - support_file_pattern = os.path.normpath(os.path.join(test['dir_relpath'], - support_file_pattern)) + support_file_pattern = os.path.normpath( + os.path.join(test["dir_relpath"], support_file_pattern) + ) # If it doesn't have a glob, then it's a single file. - if '*' not in support_file_pattern: + if "*" not in support_file_pattern: # Simple case: single support file, just add it here. - support_files_map[support_file_pattern].append(test['srcdir_relpath']) + support_files_map[support_file_pattern].append(test["srcdir_relpath"]) continue for support_file, _ in file_finder.find(support_file_pattern): - support_files_map[support_file].append(test['srcdir_relpath']) + support_files_map[support_file].append(test["srcdir_relpath"]) return tests, support_files_map @@ -123,78 +126,93 @@ all_tests, all_support_files = read_test_manifests() def download_coverage_mapping(base_revision): try: - with open(CHUNK_MAPPING_TAG_FILE, 'r') as f: + with open(CHUNK_MAPPING_TAG_FILE, "r") as f: tags = json.load(f) - if tags['target_revision'] == base_revision: + if tags["target_revision"] == base_revision: return else: - print('Base revision changed.') + print("Base revision changed.") except (IOError, ValueError): - print('Chunk mapping file not found.') + print("Chunk mapping file not found.") - CHUNK_MAPPING_URL_TEMPLATE = 'https://firefox-ci-tc.services.mozilla.com/api/index/v1/task/project.relman.code-coverage.production.cron.{}/artifacts/public/chunk_mapping.tar.xz' # noqa - JSON_PUSHES_URL_TEMPLATE = 'https://hg.mozilla.org/mozilla-central/json-pushes?version=2&tipsonly=1&startdate={}' # noqa + CHUNK_MAPPING_URL_TEMPLATE = "https://firefox-ci-tc.services.mozilla.com/api/index/v1/task/project.relman.code-coverage.production.cron.{}/artifacts/public/chunk_mapping.tar.xz" # noqa + JSON_PUSHES_URL_TEMPLATE = "https://hg.mozilla.org/mozilla-central/json-pushes?version=2&tipsonly=1&startdate={}" # noqa # Get pushes from at most one month ago. PUSH_HISTORY_DAYS = 30 delta = datetime.timedelta(days=PUSH_HISTORY_DAYS) - start_time = (datetime.datetime.now() - delta).strftime('%Y-%m-%d') + start_time = (datetime.datetime.now() - delta).strftime("%Y-%m-%d") pushes_url = JSON_PUSHES_URL_TEMPLATE.format(start_time) - pushes_data = requests.get(pushes_url + '&tochange={}'.format(base_revision)).json() - if 'error' in pushes_data: - if 'unknown revision' in pushes_data['error']: - print('unknown revision {}, trying with latest mozilla-central'.format(base_revision)) + pushes_data = requests.get(pushes_url + "&tochange={}".format(base_revision)).json() + if "error" in pushes_data: + if "unknown revision" in pushes_data["error"]: + print( + "unknown revision {}, trying with latest mozilla-central".format( + base_revision + ) + ) pushes_data = requests.get(pushes_url).json() - if 'error' in pushes_data: - raise Exception(pushes_data['error']) + if "error" in pushes_data: + raise Exception(pushes_data["error"]) - pushes = pushes_data['pushes'] + pushes = pushes_data["pushes"] - print('Looking for coverage data. This might take a minute or two.') - print('Base revision:', base_revision) + print("Looking for coverage data. This might take a minute or two.") + print("Base revision:", base_revision) for push_id in sorted(pushes.keys())[::-1]: - rev = pushes[push_id]['changesets'][0] + rev = pushes[push_id]["changesets"][0] url = CHUNK_MAPPING_URL_TEMPLATE.format(rev) - print('push id: {},\trevision: {}'.format(push_id, rev)) + print("push id: {},\trevision: {}".format(push_id, rev)) r = requests.head(url) if not r.ok: continue - print('Chunk mapping found, downloading...') + print("Chunk mapping found, downloading...") r = requests.get(url, stream=True) - CHUNK_MAPPING_ARCHIVE = os.path.join(build.topsrcdir, 'chunk_mapping.tar.xz') - with open(CHUNK_MAPPING_ARCHIVE, 'wb') as f: + CHUNK_MAPPING_ARCHIVE = os.path.join(build.topsrcdir, "chunk_mapping.tar.xz") + with open(CHUNK_MAPPING_ARCHIVE, "wb") as f: r.raw.decode_content = True shutil.copyfileobj(r.raw, f) - subprocess.check_call(['tar', '-xJf', CHUNK_MAPPING_ARCHIVE, - '-C', os.path.dirname(CHUNK_MAPPING_FILE)]) + subprocess.check_call( + [ + "tar", + "-xJf", + CHUNK_MAPPING_ARCHIVE, + "-C", + os.path.dirname(CHUNK_MAPPING_FILE), + ] + ) os.remove(CHUNK_MAPPING_ARCHIVE) assert os.path.isfile(CHUNK_MAPPING_FILE) - with open(CHUNK_MAPPING_TAG_FILE, 'w') as f: - json.dump({'target_revision': base_revision, - 'chunk_mapping_revision': rev, - 'download_date': start_time}, - f) + with open(CHUNK_MAPPING_TAG_FILE, "w") as f: + json.dump( + { + "target_revision": base_revision, + "chunk_mapping_revision": rev, + "download_date": start_time, + }, + f, + ) return - raise Exception('Could not find suitable coverage data.') + raise Exception("Could not find suitable coverage data.") def is_a_test(cursor, path): - '''Checks the all_tests global and the chunk mapping database to see if a + """Checks the all_tests global and the chunk mapping database to see if a given file is a test file. - ''' + """ if path in all_tests: return True - cursor.execute('SELECT COUNT(*) from chunk_to_test WHERE path=?', (path,)) + cursor.execute("SELECT COUNT(*) from chunk_to_test WHERE path=?", (path,)) if cursor.fetchone()[0]: return True - cursor.execute('SELECT COUNT(*) from file_to_test WHERE test=?', (path,)) + cursor.execute("SELECT COUNT(*) from file_to_test WHERE test=?", (path,)) if cursor.fetchone()[0]: return True @@ -202,37 +220,34 @@ def is_a_test(cursor, path): def tests_covering_file(cursor, path): - '''Returns a set of tests that cover a given source file. - ''' - cursor.execute('SELECT test FROM file_to_test WHERE source=?', (path,)) + """Returns a set of tests that cover a given source file.""" + cursor.execute("SELECT test FROM file_to_test WHERE source=?", (path,)) return set(e[0] for e in cursor.fetchall()) def tests_in_chunk(cursor, platform, chunk): - '''Returns a set of tests that are contained in a given chunk. - ''' - cursor.execute('SELECT path FROM chunk_to_test WHERE platform=? AND chunk=?', - (platform, chunk)) + """Returns a set of tests that are contained in a given chunk.""" + cursor.execute( + "SELECT path FROM chunk_to_test WHERE platform=? AND chunk=?", (platform, chunk) + ) # Because of bug 1480103, some entries in this table contain both a file name and a test name, # separated by a space. With the split, only the file name is kept. - return set(e[0].split(' ')[0] for e in cursor.fetchall()) + return set(e[0].split(" ")[0] for e in cursor.fetchall()) def chunks_covering_file(cursor, path): - '''Returns a set of (platform, chunk) tuples with the chunks that cover a given source file. - ''' - cursor.execute('SELECT platform, chunk FROM file_to_chunk WHERE path=?', (path,)) + """Returns a set of (platform, chunk) tuples with the chunks that cover a given source file.""" + cursor.execute("SELECT platform, chunk FROM file_to_chunk WHERE path=?", (path,)) return set(cursor.fetchall()) def tests_supported_by_file(path): - '''Returns a set of tests that are using the given file as a support-file. - ''' + """Returns a set of tests that are using the given file as a support-file.""" return set(all_support_files[path]) def find_tests(changed_files): - '''Finds both individual tests and test chunks that should be run to test code changes. + """Finds both individual tests and test chunks that should be run to test code changes. Argument: a list of file paths relative to the source checkout. Returns: a (test_files, test_chunks) tuple with two sets. @@ -240,7 +255,7 @@ def find_tests(changed_files): test_chunks - contains (platform, chunk) tuples with chunks that should be run. These chunnks do not support running a subset of the tests (like cppunit or gtest), so the whole chunk must be run. - ''' + """ test_files = set() test_chunks = set() files_no_coverage = set() @@ -273,7 +288,7 @@ def find_tests(changed_files): files_no_coverage.add(path) files_covered = set(changed_files) - files_no_coverage - test_files = set(s.replace('\\', '/') for s in test_files) + test_files = set(s.replace("\\", "/") for s in test_files) _print_found_tests(files_covered, files_no_coverage, test_files, test_chunks) @@ -284,7 +299,7 @@ def find_tests(changed_files): tests = tests_in_chunk(c, platform, chunk) if tests: for test in tests: - test_files.add(test.replace('\\', '/')) + test_files.add(test.replace("\\", "/")) else: remaining_test_chunks.add((platform, chunk)) @@ -292,44 +307,50 @@ def find_tests(changed_files): def _print_found_tests(files_covered, files_no_coverage, test_files, test_chunks): - '''Print a summary of what will be run to the user's terminal. - ''' + """Print a summary of what will be run to the user's terminal.""" files_covered = sorted(files_covered) files_no_coverage = sorted(files_no_coverage) test_files = sorted(test_files) test_chunks = sorted(test_chunks) if files_covered: - print('Found {} modified source files with test coverage:'.format(len(files_covered))) + print( + "Found {} modified source files with test coverage:".format( + len(files_covered) + ) + ) for covered in files_covered: - print('\t', covered) + print("\t", covered) if files_no_coverage: - print('Found {} modified source files with no coverage:'.format(len(files_no_coverage))) + print( + "Found {} modified source files with no coverage:".format( + len(files_no_coverage) + ) + ) for f in files_no_coverage: - print('\t', f) + print("\t", f) if not files_covered: - print('No modified source files are covered by tests.') + print("No modified source files are covered by tests.") elif not files_no_coverage: - print('All modified source files are covered by tests.') + print("All modified source files are covered by tests.") if test_files: - print('Running {} individual test files.'.format(len(test_files))) + print("Running {} individual test files.".format(len(test_files))) else: - print('Could not find any individual tests to run.') + print("Could not find any individual tests to run.") if test_chunks: - print('Running {} test chunks.'.format(len(test_chunks))) + print("Running {} test chunks.".format(len(test_chunks))) for platform, chunk in test_chunks: - print('\t', platform, chunk) + print("\t", platform, chunk) else: - print('Could not find any test chunks to run.') + print("Could not find any test chunks to run.") def filter_tasks_by_chunks(tasks, chunks): - '''Find all tasks that will run the given chunks. - ''' + """Find all tasks that will run the given chunks.""" selected_tasks = set() for platform, chunk in chunks: platform = PLATFORM_MAP[platform] @@ -339,14 +360,20 @@ def filter_tasks_by_chunks(tasks, chunks): if not task.startswith(platform): continue - if not any(task[len(platform) + 1:].endswith(c) for c in [chunk, chunk + '-e10s']): + if not any( + task[len(platform) + 1 :].endswith(c) for c in [chunk, chunk + "-e10s"] + ): continue - assert selected_task is None, 'Only one task should be selected for a given platform-chunk couple ({} - {}), {} and {} were selected'.format(platform, chunk, selected_task, task) # noqa + assert ( + selected_task is None + ), "Only one task should be selected for a given platform-chunk couple ({} - {}), {} and {} were selected".format( # noqa + platform, chunk, selected_task, task + ) selected_task = task if selected_task is None: - print('Warning: no task found for chunk', platform, chunk) + print("Warning: no task found for chunk", platform, chunk) else: selected_tasks.add(selected_task) @@ -354,20 +381,27 @@ def filter_tasks_by_chunks(tasks, chunks): def is_opt_task(task): - '''True if the task runs on a supported platform and build type combination. + """True if the task runs on a supported platform and build type combination. This is used to remove -ccov/asan/pgo tasks, along with all /debug tasks. - ''' + """ return any(platform in task for platform in OPT_TASK_PATTERNS) -def run(try_config={}, full=False, parameters=None, push=True, message='{msg}', closed_tree=False): +def run( + try_config={}, + full=False, + parameters=None, + push=True, + message="{msg}", + closed_tree=False, +): setup_globals() download_coverage_mapping(vcs.base_ref) changed_sources = vcs.get_outgoing_files() test_files, test_chunks = find_tests(changed_sources) if not test_files and not test_chunks: - print('ERROR Could not find any tests or chunks to run.') + print("ERROR Could not find any tests or chunks to run.") return 1 tg = generate_tasks(parameters, full) @@ -379,25 +413,35 @@ def run(try_config={}, full=False, parameters=None, push=True, message='{msg}', tasks = list(tasks) if not tasks: - print('ERROR Did not find any matching tasks after filtering.') + print("ERROR Did not find any matching tasks after filtering.") return 1 - test_count_message = ('{test_count} test file{test_plural} that ' + - 'cover{test_singular} these changes ' + - '({task_count} task{task_plural} to be scheduled)').format( + test_count_message = ( + "{test_count} test file{test_plural} that " + + "cover{test_singular} these changes " + + "({task_count} task{task_plural} to be scheduled)" + ).format( test_count=len(test_files), - test_plural='' if len(test_files) == 1 else 's', - test_singular='s' if len(test_files) == 1 else '', + test_plural="" if len(test_files) == 1 else "s", + test_singular="s" if len(test_files) == 1 else "", task_count=len(tasks), - task_plural='' if len(tasks) == 1 else 's') - print('Found ' + test_count_message) + task_plural="" if len(tasks) == 1 else "s", + ) + print("Found " + test_count_message) # Set the test paths to be run by setting MOZHARNESS_TEST_PATHS. - path_env = {'MOZHARNESS_TEST_PATHS': six.ensure_text( - json.dumps(resolve_tests_by_suite(test_files)))} - try_config.setdefault('env', {}).update(path_env) + path_env = { + "MOZHARNESS_TEST_PATHS": six.ensure_text( + json.dumps(resolve_tests_by_suite(test_files)) + ) + } + try_config.setdefault("env", {}).update(path_env) # Build commit message. - msg = 'try coverage - ' + test_count_message - return push_to_try('coverage', message.format(msg=msg), - try_task_config=generate_try_task_config('coverage', tasks, try_config), - push=push, closed_tree=closed_tree) + msg = "try coverage - " + test_count_message + return push_to_try( + "coverage", + message.format(msg=msg), + try_task_config=generate_try_task_config("coverage", tasks, try_config), + push=push, + closed_tree=closed_tree, + ) diff --git a/tools/update-packaging/test_make_incremental_updates.py b/tools/update-packaging/test_make_incremental_updates.py index cf8357f17273..474196ded00b 100644 --- a/tools/update-packaging/test_make_incremental_updates.py +++ b/tools/update-packaging/test_make_incremental_updates.py @@ -11,11 +11,16 @@ from make_incremental_updates import PatchInfo, MarFileEntry class TestPatchInfo(unittest.TestCase): def setUp(self): - self.work_dir = 'work_dir' - self.file_exclusion_list = ['update.manifest', 'updatev2.manifest', 'updatev3.manifest'] - self.path_exclusion_list = ['/readme.txt'] + self.work_dir = "work_dir" + self.file_exclusion_list = [ + "update.manifest", + "updatev2.manifest", + "updatev3.manifest", + ] + self.path_exclusion_list = ["/readme.txt"] self.patch_info = PatchInfo( - self.work_dir, self.file_exclusion_list, self.path_exclusion_list) + self.work_dir, self.file_exclusion_list, self.path_exclusion_list + ) def testPatchInfo(self): self.assertEquals(self.work_dir, self.patch_info.work_dir) @@ -26,51 +31,68 @@ class TestPatchInfo(unittest.TestCase): self.assertEquals(self.path_exclusion_list, self.patch_info.path_exclusion_list) def test_append_add_instruction(self): - self.patch_info.append_add_instruction('file.test') + self.patch_info.append_add_instruction("file.test") self.assertEquals(['add "file.test"'], self.patch_info.manifestv2) self.assertEquals(['add "file.test"'], self.patch_info.manifestv3) def test_append_add_if_instruction(self): - self.patch_info.append_add_instruction('distribution/extensions/extension/file.test') + self.patch_info.append_add_instruction( + "distribution/extensions/extension/file.test" + ) self.assertEquals( - ['add-if "distribution/extensions/extension" "distribution/extensions/extension/file.test"'], # NOQA: E501 - self.patch_info.manifestv2) + [ + 'add-if "distribution/extensions/extension" "distribution/extensions/extension/file.test"' # NOQA: E501 + ], + self.patch_info.manifestv2, + ) self.assertEquals( - ['add-if "distribution/extensions/extension" "distribution/extensions/extension/file.test"'], # NOQA: E501 - self.patch_info.manifestv3) + [ + 'add-if "distribution/extensions/extension" "distribution/extensions/extension/file.test"' # NOQA: E501 + ], + self.patch_info.manifestv3, + ) def test_append_add_if_not_instruction(self): - self.patch_info.append_add_if_not_instruction('file.test') + self.patch_info.append_add_if_not_instruction("file.test") self.assertEquals([], self.patch_info.manifestv2) - self.assertEquals(['add-if-not "file.test" "file.test"'], self.patch_info.manifestv3) + self.assertEquals( + ['add-if-not "file.test" "file.test"'], self.patch_info.manifestv3 + ) def test_append_patch_instruction(self): - self.patch_info.append_patch_instruction('file.test', 'patchname') + self.patch_info.append_patch_instruction("file.test", "patchname") self.assertEquals(['patch "patchname" "file.test"'], self.patch_info.manifestv2) self.assertEquals(['patch "patchname" "file.test"'], self.patch_info.manifestv3) def test_append_patch_if_instruction(self): self.patch_info.append_patch_instruction( - 'distribution/extensions/extension/file.test', 'patchname') + "distribution/extensions/extension/file.test", "patchname" + ) self.assertEquals( - ['patch-if "distribution/extensions/extension" "patchname" "distribution/extensions/extension/file.test"'], # NOQA: E501 - self.patch_info.manifestv2) + [ + 'patch-if "distribution/extensions/extension" "patchname" "distribution/extensions/extension/file.test"' # NOQA: E501 + ], + self.patch_info.manifestv2, + ) self.assertEquals( - ['patch-if "distribution/extensions/extension" "patchname" "distribution/extensions/extension/file.test"'], # NOQA: E501 - self.patch_info.manifestv3) + [ + 'patch-if "distribution/extensions/extension" "patchname" "distribution/extensions/extension/file.test"' # NOQA: E501 + ], + self.patch_info.manifestv3, + ) def test_append_remove_instruction(self): - self.patch_info.append_remove_instruction('file.test') + self.patch_info.append_remove_instruction("file.test") self.assertEquals(['remove "file.test"'], self.patch_info.manifestv2) self.assertEquals(['remove "file.test"'], self.patch_info.manifestv3) def test_append_rmdir_instruction(self): - self.patch_info.append_remove_instruction('dirtest/') + self.patch_info.append_remove_instruction("dirtest/") self.assertEquals(['rmdir "dirtest/"'], self.patch_info.manifestv2) self.assertEquals(['rmdir "dirtest/"'], self.patch_info.manifestv3) def test_append_rmrfdir_instruction(self): - self.patch_info.append_remove_instruction('dirtest/*') + self.patch_info.append_remove_instruction("dirtest/*") self.assertEquals(['rmrfdir "dirtest/"'], self.patch_info.manifestv2) self.assertEquals(['rmrfdir "dirtest/"'], self.patch_info.manifestv3) @@ -80,8 +102,10 @@ class TestPatchInfo(unittest.TestCase): """ def test_build_marfile_entry_hash(self): - self.assertEquals(({}, set([]), set([])), - self.patch_info.build_marfile_entry_hash('root_path')) + self.assertEquals( + ({}, set([]), set([])), + self.patch_info.build_marfile_entry_hash("root_path"), + ) """ FIXME touches the filesystem, need refactoring @@ -112,13 +136,14 @@ class TestMarFileEntry(unittest.TestCase): class TestMakeIncrementalUpdates(unittest.TestCase): def setUp(self): - work_dir = '.' + work_dir = "." self.patch_info = PatchInfo( work_dir, - ['update.manifest', 'updatev2.manifest', 'updatev3.manifest'], - ['/readme.txt']) - root_path = '/' - filename = 'test.file' + ["update.manifest", "updatev2.manifest", "updatev3.manifest"], + ["/readme.txt"], + ) + root_path = "/" + filename = "test.file" self.mar_file_entry = MarFileEntry(root_path, filename) """ FIXME makes direct shell calls, need refactoring @@ -162,14 +187,25 @@ class TestMakeIncrementalUpdates(unittest.TestCase): """ def test_decode_filename(self): - expected = {'locale': 'lang', 'platform': 'platform', - 'product': 'product', 'version': '1.0', 'type': 'complete'} - self.assertEquals(expected, mkup.decode_filename('product-1.0.lang.platform.complete.mar')) - self.assertEquals(expected, mkup.decode_filename('platform/lang/product-1.0.complete.mar')) + expected = { + "locale": "lang", + "platform": "platform", + "product": "product", + "version": "1.0", + "type": "complete", + } + self.assertEquals( + expected, mkup.decode_filename("product-1.0.lang.platform.complete.mar") + ) + self.assertEquals( + expected, mkup.decode_filename("platform/lang/product-1.0.complete.mar") + ) with self.assertRaises(Exception) as cm: - mkup.decode_filename('fail') - self.assertTrue(cm.exception.args[0].startswith('could not parse filepath fail:')) + mkup.decode_filename("fail") + self.assertTrue( + cm.exception.args[0].startswith("could not parse filepath fail:") + ) -if __name__ == '__main__': +if __name__ == "__main__": unittest.main() diff --git a/xpcom/components/gen_static_components.py b/xpcom/components/gen_static_components.py index 80fac6217378..ab2aee41ee78 100644 --- a/xpcom/components/gen_static_components.py +++ b/xpcom/components/gen_static_components.py @@ -11,17 +11,16 @@ from perfecthash import PerfectHash import buildconfig -NO_CONTRACT_ID = 0xffffffff +NO_CONTRACT_ID = 0xFFFFFFFF PHF_SIZE = 512 -ENDIAN = '<' if buildconfig.substs['TARGET_ENDIANNESS'] == 'little' else '>' +ENDIAN = "<" if buildconfig.substs["TARGET_ENDIANNESS"] == "little" else ">" # Represents a UUID in the format used internally by Gecko, and supports # serializing it in that format to both C++ source and raw byte arrays. class UUIDRepr(object): - def __init__(self, uuid): self.uuid = uuid @@ -33,7 +32,7 @@ class UUIDRepr(object): d = list(fields[3:5]) for i in range(0, 6): - d.append(fields[5] >> (8 * (5 - i)) & 0xff) + d.append(fields[5] >> (8 * (5 - i)) & 0xFF) self.d = tuple(d) @@ -42,14 +41,12 @@ class UUIDRepr(object): @property def bytes(self): - return struct.pack(ENDIAN + 'IHHBBBBBBBB', - self.a, self.b, self.c, *self.d) + return struct.pack(ENDIAN + "IHHBBBBBBBB", self.a, self.b, self.c, *self.d) def to_cxx(self): - rest = ', '.join('0x%02x' % b for b in self.d) + rest = ", ".join("0x%02x" % b for b in self.d) - return '{ 0x%x, 0x%x, 0x%x, { %s } }' % (self.a, self.b, self.c, - rest) + return "{ 0x%x, 0x%x, 0x%x, { %s } }" % (self.a, self.b, self.c, rest) # Corresponds to the Module::ProcessSelector enum in Module.h. The actual @@ -63,63 +60,58 @@ class ProcessSelector: ALLOW_IN_VR_PROCESS = 0x8 ALLOW_IN_SOCKET_PROCESS = 0x10 ALLOW_IN_RDD_PROCESS = 0x20 - ALLOW_IN_GPU_AND_MAIN_PROCESS = (ALLOW_IN_GPU_PROCESS | - MAIN_PROCESS_ONLY) - ALLOW_IN_GPU_AND_SOCKET_PROCESS = (ALLOW_IN_GPU_PROCESS | - ALLOW_IN_SOCKET_PROCESS) + ALLOW_IN_GPU_AND_MAIN_PROCESS = ALLOW_IN_GPU_PROCESS | MAIN_PROCESS_ONLY + ALLOW_IN_GPU_AND_SOCKET_PROCESS = ALLOW_IN_GPU_PROCESS | ALLOW_IN_SOCKET_PROCESS ALLOW_IN_GPU_AND_VR_PROCESS = ALLOW_IN_GPU_PROCESS | ALLOW_IN_VR_PROCESS - ALLOW_IN_GPU_VR_AND_SOCKET_PROCESS = (ALLOW_IN_GPU_PROCESS | - ALLOW_IN_VR_PROCESS | - ALLOW_IN_SOCKET_PROCESS) - ALLOW_IN_RDD_AND_SOCKET_PROCESS = (ALLOW_IN_RDD_PROCESS | - ALLOW_IN_SOCKET_PROCESS) - ALLOW_IN_GPU_RDD_AND_SOCKET_PROCESS = (ALLOW_IN_GPU_PROCESS | - ALLOW_IN_RDD_PROCESS | - ALLOW_IN_SOCKET_PROCESS) - ALLOW_IN_GPU_RDD_VR_AND_SOCKET_PROCESS = (ALLOW_IN_GPU_PROCESS | - ALLOW_IN_RDD_PROCESS | - ALLOW_IN_VR_PROCESS | - ALLOW_IN_SOCKET_PROCESS) + ALLOW_IN_GPU_VR_AND_SOCKET_PROCESS = ( + ALLOW_IN_GPU_PROCESS | ALLOW_IN_VR_PROCESS | ALLOW_IN_SOCKET_PROCESS + ) + ALLOW_IN_RDD_AND_SOCKET_PROCESS = ALLOW_IN_RDD_PROCESS | ALLOW_IN_SOCKET_PROCESS + ALLOW_IN_GPU_RDD_AND_SOCKET_PROCESS = ( + ALLOW_IN_GPU_PROCESS | ALLOW_IN_RDD_PROCESS | ALLOW_IN_SOCKET_PROCESS + ) + ALLOW_IN_GPU_RDD_VR_AND_SOCKET_PROCESS = ( + ALLOW_IN_GPU_PROCESS + | ALLOW_IN_RDD_PROCESS + | ALLOW_IN_VR_PROCESS + | ALLOW_IN_SOCKET_PROCESS + ) # Maps ProcessSelector constants to the name of the corresponding # Module::ProcessSelector enum value. PROCESSES = { - ProcessSelector.ANY_PROCESS: 'ANY_PROCESS', - ProcessSelector.MAIN_PROCESS_ONLY: 'MAIN_PROCESS_ONLY', - ProcessSelector.CONTENT_PROCESS_ONLY: 'CONTENT_PROCESS_ONLY', - ProcessSelector.ALLOW_IN_GPU_PROCESS: 'ALLOW_IN_GPU_PROCESS', - ProcessSelector.ALLOW_IN_VR_PROCESS: 'ALLOW_IN_VR_PROCESS', - ProcessSelector.ALLOW_IN_SOCKET_PROCESS: 'ALLOW_IN_SOCKET_PROCESS', - ProcessSelector.ALLOW_IN_RDD_PROCESS: 'ALLOW_IN_RDD_PROCESS', - ProcessSelector.ALLOW_IN_GPU_AND_MAIN_PROCESS: 'ALLOW_IN_GPU_AND_MAIN_PROCESS', - ProcessSelector.ALLOW_IN_GPU_AND_SOCKET_PROCESS: 'ALLOW_IN_GPU_AND_SOCKET_PROCESS', - ProcessSelector.ALLOW_IN_GPU_AND_VR_PROCESS: 'ALLOW_IN_GPU_AND_VR_PROCESS', - ProcessSelector.ALLOW_IN_GPU_VR_AND_SOCKET_PROCESS: 'ALLOW_IN_GPU_VR_AND_SOCKET_PROCESS', - ProcessSelector.ALLOW_IN_RDD_AND_SOCKET_PROCESS: - 'ALLOW_IN_RDD_AND_SOCKET_PROCESS', - ProcessSelector.ALLOW_IN_GPU_RDD_AND_SOCKET_PROCESS: - 'ALLOW_IN_GPU_RDD_AND_SOCKET_PROCESS', - ProcessSelector.ALLOW_IN_GPU_RDD_VR_AND_SOCKET_PROCESS: - 'ALLOW_IN_GPU_RDD_VR_AND_SOCKET_PROCESS', + ProcessSelector.ANY_PROCESS: "ANY_PROCESS", + ProcessSelector.MAIN_PROCESS_ONLY: "MAIN_PROCESS_ONLY", + ProcessSelector.CONTENT_PROCESS_ONLY: "CONTENT_PROCESS_ONLY", + ProcessSelector.ALLOW_IN_GPU_PROCESS: "ALLOW_IN_GPU_PROCESS", + ProcessSelector.ALLOW_IN_VR_PROCESS: "ALLOW_IN_VR_PROCESS", + ProcessSelector.ALLOW_IN_SOCKET_PROCESS: "ALLOW_IN_SOCKET_PROCESS", + ProcessSelector.ALLOW_IN_RDD_PROCESS: "ALLOW_IN_RDD_PROCESS", + ProcessSelector.ALLOW_IN_GPU_AND_MAIN_PROCESS: "ALLOW_IN_GPU_AND_MAIN_PROCESS", + ProcessSelector.ALLOW_IN_GPU_AND_SOCKET_PROCESS: "ALLOW_IN_GPU_AND_SOCKET_PROCESS", + ProcessSelector.ALLOW_IN_GPU_AND_VR_PROCESS: "ALLOW_IN_GPU_AND_VR_PROCESS", + ProcessSelector.ALLOW_IN_GPU_VR_AND_SOCKET_PROCESS: "ALLOW_IN_GPU_VR_AND_SOCKET_PROCESS", + ProcessSelector.ALLOW_IN_RDD_AND_SOCKET_PROCESS: "ALLOW_IN_RDD_AND_SOCKET_PROCESS", + ProcessSelector.ALLOW_IN_GPU_RDD_AND_SOCKET_PROCESS: "ALLOW_IN_GPU_RDD_AND_SOCKET_PROCESS", + ProcessSelector.ALLOW_IN_GPU_RDD_VR_AND_SOCKET_PROCESS: "ALLOW_IN_GPU_RDD_VR_AND_SOCKET_PROCESS", # NOQA: E501 } # Emits the C++ symbolic constant corresponding to a ProcessSelector constant. def lower_processes(processes): - return 'Module::ProcessSelector::%s' % PROCESSES[processes] + return "Module::ProcessSelector::%s" % PROCESSES[processes] # Emits the C++ symbolic constant for a ModuleEntry's ModuleID enum entry. def lower_module_id(module): - return 'ModuleID::%s' % module.name + return "ModuleID::%s" % module.name # Represents a static string table, indexed by offset. This allows us to # reference strings from static data structures without requiring runtime # relocations. class StringTable(object): - def __init__(self): self.entries = {} self.entry_list = [] @@ -136,7 +128,7 @@ class StringTable(object): assert not self._serialized - assert len(string) == len(string.encode('utf-8')) + assert len(string) == len(string.encode("utf-8")) idx = self.size self.size += len(string) + 1 @@ -155,23 +147,20 @@ class StringTable(object): idx = 0 for entry in self.entry_list: - str_ = entry.replace('\\', '\\\\').replace('"', r'\"') \ - .replace('\n', r'\n') + str_ = entry.replace("\\", "\\\\").replace('"', r"\"").replace("\n", r"\n") lines.append(' /* 0x%x */ "%s\\0"\n' % (idx, str_)) idx += len(entry) + 1 - return ''.join(lines) + return "".join(lines) # Returns a `StringEntry` struct initializer for the string table entry # corresponding to the given string. If no matching entry exists, it is # first created. def entry_to_cxx(self, string): idx = self.get_idx(string) - return '{ 0x%x } /* %s */' % ( - idx, - pretty_string(string)) + return "{ 0x%x } /* %s */" % (idx, pretty_string(string)) strings = StringTable() @@ -183,7 +172,6 @@ interfaces = [] # sub-namespaces. This is used to generate pre-declarations for incomplete # types referenced in XPCOM manifests. class Namespace(object): - def __init__(self, name=None): self.name = name self.classes = set() @@ -202,16 +190,16 @@ class Namespace(object): def to_cxx(self): res = "" if self.name: - res += 'namespace %s {\n' % self.name + res += "namespace %s {\n" % self.name for clas in sorted(self.classes): - res += 'class %s;\n' % clas + res += "class %s;\n" % clas for ns in sorted(self.namespaces.keys()): res += self.namespaces[ns].to_cxx() if self.name: - res += '} // namespace %s\n' % self.name + res += "} // namespace %s\n" % self.name return res @@ -221,19 +209,20 @@ class ModuleEntry(object): next_anon_id = 0 def __init__(self, data, init_idx): - self.cid = UUIDRepr(UUID(data['cid'])) - self.contract_ids = data.get('contract_ids', []) - self.type = data.get('type', 'nsISupports') - self.categories = data.get('categories', {}) - self.processes = data.get('processes', 0) - self.headers = data.get('headers', []) + self.cid = UUIDRepr(UUID(data["cid"])) + self.contract_ids = data.get("contract_ids", []) + self.type = data.get("type", "nsISupports") + self.categories = data.get("categories", {}) + self.processes = data.get("processes", 0) + self.headers = data.get("headers", []) - self.js_name = data.get('js_name', None) - self.interfaces = data.get('interfaces', []) + self.js_name = data.get("js_name", None) + self.interfaces = data.get("interfaces", []) if len(self.interfaces) > 255: - raise Exception('JS service %s may not have more than 255 ' - 'interfaces' % self.js_name) + raise Exception( + "JS service %s may not have more than 255 " "interfaces" % self.js_name + ) self.interfaces_offset = len(interfaces) for iface in self.interfaces: @@ -247,56 +236,67 @@ class ModuleEntry(object): # module's constructor. self.init_idx = init_idx - self.constructor = data.get('constructor', None) - self.legacy_constructor = data.get('legacy_constructor', None) - self.init_method = data.get('init_method', []) + self.constructor = data.get("constructor", None) + self.legacy_constructor = data.get("legacy_constructor", None) + self.init_method = data.get("init_method", []) - self.jsm = data.get('jsm', None) + self.jsm = data.get("jsm", None) - self.external = data.get('external', not (self.headers or - self.legacy_constructor)) - self.singleton = data.get('singleton', False) - self.overridable = data.get('overridable', False) + self.external = data.get( + "external", not (self.headers or self.legacy_constructor) + ) + self.singleton = data.get("singleton", False) + self.overridable = data.get("overridable", False) - if 'name' in data: + if "name" in data: self.anonymous = False - self.name = data['name'] + self.name = data["name"] else: self.anonymous = True - self.name = 'Anonymous%03d' % ModuleEntry.next_anon_id + self.name = "Anonymous%03d" % ModuleEntry.next_anon_id ModuleEntry.next_anon_id += 1 def error(str_): - raise Exception("Error defining component %s (%s): %s" % ( - str(self.cid), ', '.join(map(repr, self.contract_ids)), - str_)) + raise Exception( + "Error defining component %s (%s): %s" + % (str(self.cid), ", ".join(map(repr, self.contract_ids)), str_) + ) if self.jsm: if not self.constructor: error("JavaScript components must specify a constructor") - for prop in ('init_method', 'legacy_constructor', 'headers'): + for prop in ("init_method", "legacy_constructor", "headers"): if getattr(self, prop): - error("JavaScript components may not specify a '%s' " - "property" % prop) + error( + "JavaScript components may not specify a '%s' " + "property" % prop + ) elif self.external: if self.constructor or self.legacy_constructor: - error("Externally-constructed components may not specify " - "'constructor' or 'legacy_constructor' properties") + error( + "Externally-constructed components may not specify " + "'constructor' or 'legacy_constructor' properties" + ) if self.init_method: - error("Externally-constructed components may not specify " - "'init_method' properties") - if self.type == 'nsISupports': - error("Externally-constructed components must specify a type " - "other than nsISupports") + error( + "Externally-constructed components may not specify " + "'init_method' properties" + ) + if self.type == "nsISupports": + error( + "Externally-constructed components must specify a type " + "other than nsISupports" + ) if self.constructor and self.legacy_constructor: - error("The 'constructor' and 'legacy_constructor' properties " - "are mutually exclusive") + error( + "The 'constructor' and 'legacy_constructor' properties " + "are mutually exclusive" + ) if self.overridable and not self.contract_ids: - error("Overridable components must specify at least one contract " - "ID") + error("Overridable components must specify at least one contract " "ID") @property def contract_id(self): @@ -305,9 +305,11 @@ class ModuleEntry(object): # Generates the C++ code for a StaticModule struct initializer # representing this component. def to_cxx(self): - contract_id = (strings.entry_to_cxx(self.contract_id) - if self.overridable - else '{ 0x%x }' % NO_CONTRACT_ID) + contract_id = ( + strings.entry_to_cxx(self.contract_id) + if self.overridable + else "{ 0x%x }" % NO_CONTRACT_ID + ) return """ /* {name} */ {{ @@ -315,10 +317,13 @@ class ModuleEntry(object): {cid}, {contract_id}, {processes}, - }}""".format(name=self.name, cid=self.cid.to_cxx(), - cid_string=str(self.cid), - contract_id=contract_id, - processes=lower_processes(self.processes)) + }}""".format( + name=self.name, + cid=self.cid.to_cxx(), + cid_string=str(self.cid), + contract_id=contract_id, + processes=lower_processes(self.processes), + ) # Generates the C++ code for a JSServiceEntry represengin this module. def lower_js_service(self): @@ -328,10 +333,12 @@ class ModuleEntry(object): ModuleID::{name}, {{ {iface_offset} }}, {iface_count} - }}""".format(js_name=strings.entry_to_cxx(self.js_name), - name=self.name, - iface_offset=self.interfaces_offset, - iface_count=len(self.interfaces)) + }}""".format( + js_name=strings.entry_to_cxx(self.js_name), + name=self.name, + iface_offset=self.interfaces_offset, + iface_count=len(self.interfaces), + ) # Generates the C++ code necessary to construct an instance of this # component. @@ -345,40 +352,45 @@ class ModuleEntry(object): # # And which returns an `nsresult` indicating success or failure. def lower_constructor(self): - res = '' + res = "" if self.init_idx is not None: - res += ' MOZ_TRY(CallInitFunc(%d));\n' % self.init_idx + res += " MOZ_TRY(CallInitFunc(%d));\n" % self.init_idx if self.legacy_constructor: - res += (' return /* legacy */ %s(nullptr, aIID, aResult);\n' - % self.legacy_constructor) + res += ( + " return /* legacy */ %s(nullptr, aIID, aResult);\n" + % self.legacy_constructor + ) return res if self.jsm: res += ( - ' nsCOMPtr<nsISupports> inst;\n' - ' MOZ_TRY(ConstructJSMComponent(nsLiteralCString(%s),\n' - ' %s,\n' - ' getter_AddRefs(inst)));' - '\n' % (json.dumps(self.jsm), json.dumps(self.constructor))) + " nsCOMPtr<nsISupports> inst;\n" + " MOZ_TRY(ConstructJSMComponent(nsLiteralCString(%s),\n" + " %s,\n" + " getter_AddRefs(inst)));" + "\n" % (json.dumps(self.jsm), json.dumps(self.constructor)) + ) elif self.external: - res += (' nsCOMPtr<nsISupports> inst = ' - 'mozCreateComponent<%s>();\n' % self.type) + res += ( + " nsCOMPtr<nsISupports> inst = " + "mozCreateComponent<%s>();\n" % self.type + ) # The custom constructor may return null, so check before calling # any methods. - res += ' NS_ENSURE_TRUE(inst, NS_ERROR_FAILURE);\n' + res += " NS_ENSURE_TRUE(inst, NS_ERROR_FAILURE);\n" else: - res += ' RefPtr<%s> inst = ' % self.type + res += " RefPtr<%s> inst = " % self.type if not self.constructor: - res += 'new %s();\n' % self.type + res += "new %s();\n" % self.type else: - res += '%s();\n' % self.constructor + res += "%s();\n" % self.constructor # The `new` operator is infallible, so we don't need to worry # about it returning null, but custom constructors may, so # check before calling any methods. - res += ' NS_ENSURE_TRUE(inst, NS_ERROR_OUT_OF_MEMORY);\n' + res += " NS_ENSURE_TRUE(inst, NS_ERROR_OUT_OF_MEMORY);\n" # Check that the constructor function returns an appropriate # `already_AddRefed` value for our declared type. @@ -392,12 +404,15 @@ class ModuleEntry(object): std::is_base_of<%(type)s, T>::value, "Singleton constructor must return correct already_AddRefed"); -""" % {'type': self.type, 'constructor': self.constructor} +""" % { + "type": self.type, + "constructor": self.constructor, + } if self.init_method: - res += ' MOZ_TRY(inst->%s());\n' % self.init_method + res += " MOZ_TRY(inst->%s());\n" % self.init_method - res += ' return inst->QueryInterface(aIID, aResult);\n' + res += " return inst->QueryInterface(aIID, aResult);\n" return res @@ -409,11 +424,12 @@ class ModuleEntry(object): assert not self.anonymous substs = { - 'name': self.name, - 'id': '::mozilla::xpcom::ModuleID::%s' % self.name, + "name": self.name, + "id": "::mozilla::xpcom::ModuleID::%s" % self.name, } - res = """ + res = ( + """ namespace %(name)s { static inline const nsID& CID() { return ::mozilla::xpcom::Components::GetCID(%(id)s); @@ -422,18 +438,26 @@ static inline const nsID& CID() { static inline ::mozilla::xpcom::GetServiceHelper Service(nsresult* aRv = nullptr) { return {%(id)s, aRv}; } -""" % substs +""" + % substs + ) if not self.singleton: - res += """ + res += ( + """ static inline ::mozilla::xpcom::CreateInstanceHelper Create(nsresult* aRv = nullptr) { return {%(id)s, aRv}; } -""" % substs +""" + % substs + ) - res += """\ + res += ( + """\ } // namespace %(name)s -""" % substs +""" + % substs + ) return res @@ -442,14 +466,12 @@ static inline ::mozilla::xpcom::CreateInstanceHelper Create(nsresult* aRv = null # certain special characters replaced so that it can be used in a C++-style # (/* ... */) comment. def pretty_string(string): - return (json.dumps(string).replace('*/', r'*\/') - .replace('/*', r'/\*')) + return json.dumps(string).replace("*/", r"*\/").replace("/*", r"/\*") # Represents a static contract ID entry, corresponding to a C++ ContractEntry # struct, mapping a contract ID to a static module entry. class ContractEntry(object): - def __init__(self, contract, module): self.contract = contract self.module = module @@ -459,8 +481,10 @@ class ContractEntry(object): {{ {contract}, {module_id}, - }}""".format(contract=strings.entry_to_cxx(self.contract), - module_id=lower_module_id(self.module)) + }}""".format( + contract=strings.entry_to_cxx(self.contract), + module_id=lower_module_id(self.module), + ) # Generates the C++ code for the StaticCategoryEntry and StaticCategory @@ -473,26 +497,30 @@ def gen_categories(substs, categories): for category, entries in sorted(categories.items()): entries.sort() - cats.append(' { %s,\n' - ' %d, %d },\n' - % (strings.entry_to_cxx(category), - count, len(entries))) + cats.append( + " { %s,\n" + " %d, %d },\n" % (strings.entry_to_cxx(category), count, len(entries)) + ) count += len(entries) - ents.append(' /* %s */\n' % pretty_string(category)) + ents.append(" /* %s */\n" % pretty_string(category)) for entry, value, processes in entries: - ents.append(' { %s,\n' - ' %s,\n' - ' %s },\n' - % (strings.entry_to_cxx(entry), - strings.entry_to_cxx(value), - lower_processes(processes))) - ents.append('\n') + ents.append( + " { %s,\n" + " %s,\n" + " %s },\n" + % ( + strings.entry_to_cxx(entry), + strings.entry_to_cxx(value), + lower_processes(processes), + ) + ) + ents.append("\n") ents.pop() - substs['category_count'] = len(cats) - substs['categories'] = ''.join(cats) - substs['category_entries'] = ''.join(ents) + substs["category_count"] = len(cats) + substs["categories"] = "".join(cats) + substs["category_entries"] = "".join(ents) # Generates the C++ code for all Init and Unload functions declared in XPCOM @@ -509,26 +537,29 @@ def gen_module_funcs(substs, funcs): """ for i, (init, unload) in enumerate(funcs): - init_code = '%s();' % init if init else '/* empty */' + init_code = "%s();" % init if init else "/* empty */" inits.append(template % (i, init_code)) if unload: - unloads.append("""\ + unloads.append( + """\ if (CalledInit(%d)) { %s(); } -""" % (i, unload)) +""" + % (i, unload) + ) - substs['init_funcs'] = ''.join(inits) - substs['unload_funcs'] = ''.join(unloads) - substs['init_count'] = len(funcs) + substs["init_funcs"] = "".join(inits) + substs["unload_funcs"] = "".join(unloads) + substs["init_count"] = len(funcs) def gen_interfaces(ifaces): res = [] for iface in ifaces: - res.append(' nsXPTInterface::%s,\n' % iface) - return ''.join(res) + res.append(" nsXPTInterface::%s,\n" % iface) + return "".join(res) # Generates class pre-declarations for any types referenced in `Classes` array @@ -538,7 +569,7 @@ def gen_decls(types): root_ns = Namespace() for type_ in sorted(types): - parts = type_.split('::') + parts = type_.split("::") ns = root_ns for part in parts[:-1]: @@ -554,14 +585,17 @@ def gen_decls(types): def gen_constructors(entries): constructors = [] for entry in entries: - constructors.append("""\ + constructors.append( + """\ case {id}: {{ {constructor}\ }} -""".format(id=lower_module_id(entry), - constructor=entry.lower_constructor())) +""".format( + id=lower_module_id(entry), constructor=entry.lower_constructor() + ) + ) - return ''.join(constructors) + return "".join(constructors) # Generates the getter code for each named component entry in the @@ -570,9 +604,7 @@ def gen_getters(entries): entries = list(entries) entries.sort(key=lambda e: e.name) - return ''.join(entry.lower_getters() - for entry in entries - if not entry.anonymous) + return "".join(entry.lower_getters() for entry in entries if not entry.anonymous) def gen_includes(substs, all_headers): @@ -580,23 +612,24 @@ def gen_includes(substs, all_headers): absolute_headers = set() for header in all_headers: - if header.startswith('/'): + if header.startswith("/"): absolute_headers.add(header) else: headers.add(header) includes = ['#include "%s"' % header for header in sorted(headers)] - substs['includes'] = '\n'.join(includes) + '\n' + substs["includes"] = "\n".join(includes) + "\n" - relative_includes = ['#include "../..%s"' % header - for header in sorted(absolute_headers)] - substs['relative_includes'] = '\n'.join(relative_includes) + '\n' + relative_includes = [ + '#include "../..%s"' % header for header in sorted(absolute_headers) + ] + substs["relative_includes"] = "\n".join(relative_includes) + "\n" def to_list(val): if isinstance(val, (list, tuple)): return val - return val, + return (val,) def gen_substs(manifests): @@ -608,19 +641,19 @@ def gen_substs(manifests): categories = defaultdict(list) for manifest in manifests: - headers |= set(manifest.get('Headers', [])) + headers |= set(manifest.get("Headers", [])) init_idx = None - init = manifest.get('InitFunc') - unload = manifest.get('UnloadFunc') + init = manifest.get("InitFunc") + unload = manifest.get("UnloadFunc") if init or unload: init_idx = len(module_funcs) module_funcs.append((init, unload)) - for clas in manifest['Classes']: + for clas in manifest["Classes"]: modules.append(ModuleEntry(clas, init_idx)) - for category, entries in manifest.get('Categories', {}).items(): + for category, entries in manifest.get("Categories", {}).items(): for key, entry in entries.items(): if isinstance(entry, tuple): value, process = entry @@ -642,7 +675,7 @@ def gen_substs(manifests): for contract_id in mod.contract_ids: if contract_id in contract_map: - raise Exception('Duplicate contract ID: %s' % contract_id) + raise Exception("Duplicate contract ID: %s" % contract_id) entry = ContractEntry(contract_id, mod) contracts.append(entry) @@ -650,8 +683,7 @@ def gen_substs(manifests): for category, entries in mod.categories.items(): for entry in to_list(entries): - categories[category].append((entry, mod.contract_id, - mod.processes)) + categories[category].append((entry, mod.contract_id, mod.processes)) if mod.type and not mod.headers: types.add(mod.type) @@ -661,90 +693,87 @@ def gen_substs(manifests): if mod.js_name: if mod.js_name in js_services: - raise Exception('Duplicate JS service name: %s' % mod.js_name) + raise Exception("Duplicate JS service name: %s" % mod.js_name) js_services[mod.js_name] = mod if str(mod.cid) in cids: - raise Exception('Duplicate cid: %s' % str(mod.cid)) + raise Exception("Duplicate cid: %s" % str(mod.cid)) cids.add(str(mod.cid)) - cid_phf = PerfectHash(modules, PHF_SIZE, - key=lambda module: module.cid.bytes) + cid_phf = PerfectHash(modules, PHF_SIZE, key=lambda module: module.cid.bytes) - contract_phf = PerfectHash(contracts, PHF_SIZE, - key=lambda entry: entry.contract) + contract_phf = PerfectHash(contracts, PHF_SIZE, key=lambda entry: entry.contract) - js_services_phf = PerfectHash(list(js_services.values()), PHF_SIZE, - key=lambda entry: entry.js_name) + js_services_phf = PerfectHash( + list(js_services.values()), PHF_SIZE, key=lambda entry: entry.js_name + ) substs = {} gen_categories(substs, categories) - substs['module_ids'] = ''.join(' %s,\n' % entry.name - for entry in cid_phf.entries) + substs["module_ids"] = "".join(" %s,\n" % entry.name for entry in cid_phf.entries) - substs['module_count'] = len(modules) - substs['contract_count'] = len(contracts) + substs["module_count"] = len(modules) + substs["contract_count"] = len(contracts) gen_module_funcs(substs, module_funcs) gen_includes(substs, headers) - substs['component_jsms'] = '\n'.join(' %s,' % strings.entry_to_cxx(jsm) - for jsm in sorted(jsms)) + '\n' + substs["component_jsms"] = ( + "\n".join(" %s," % strings.entry_to_cxx(jsm) for jsm in sorted(jsms)) + "\n" + ) - substs['interfaces'] = gen_interfaces(interfaces) + substs["interfaces"] = gen_interfaces(interfaces) - substs['decls'] = gen_decls(types) + substs["decls"] = gen_decls(types) - substs['constructors'] = gen_constructors(cid_phf.entries) + substs["constructors"] = gen_constructors(cid_phf.entries) - substs['component_getters'] = gen_getters(cid_phf.entries) + substs["component_getters"] = gen_getters(cid_phf.entries) - substs['module_cid_table'] = cid_phf.cxx_codegen( - name='ModuleByCID', - entry_type='StaticModule', - entries_name='gStaticModules', + substs["module_cid_table"] = cid_phf.cxx_codegen( + name="ModuleByCID", + entry_type="StaticModule", + entries_name="gStaticModules", lower_entry=lambda entry: entry.to_cxx(), + return_type="const StaticModule*", + return_entry=( + "return entry.CID().Equals(aKey) && entry.Active()" " ? &entry : nullptr;" + ), + key_type="const nsID&", + key_bytes="reinterpret_cast<const char*>(&aKey)", + key_length="sizeof(nsID)", + ) - return_type='const StaticModule*', - return_entry=('return entry.CID().Equals(aKey) && entry.Active()' - ' ? &entry : nullptr;'), - - key_type='const nsID&', - key_bytes='reinterpret_cast<const char*>(&aKey)', - key_length='sizeof(nsID)') - - substs['module_contract_id_table'] = contract_phf.cxx_codegen( - name='LookupContractID', - entry_type='ContractEntry', - entries_name='gContractEntries', + substs["module_contract_id_table"] = contract_phf.cxx_codegen( + name="LookupContractID", + entry_type="ContractEntry", + entries_name="gContractEntries", lower_entry=lambda entry: entry.to_cxx(), + return_type="const ContractEntry*", + return_entry="return entry.Matches(aKey) ? &entry : nullptr;", + key_type="const nsACString&", + key_bytes="aKey.BeginReading()", + key_length="aKey.Length()", + ) - return_type='const ContractEntry*', - return_entry='return entry.Matches(aKey) ? &entry : nullptr;', - - key_type='const nsACString&', - key_bytes='aKey.BeginReading()', - key_length='aKey.Length()') - - substs['js_services_table'] = js_services_phf.cxx_codegen( - name='LookupJSService', - entry_type='JSServiceEntry', - entries_name='gJSServices', + substs["js_services_table"] = js_services_phf.cxx_codegen( + name="LookupJSService", + entry_type="JSServiceEntry", + entries_name="gJSServices", lower_entry=lambda entry: entry.lower_js_service(), - - return_type='const JSServiceEntry*', - return_entry='return entry.Name() == aKey ? &entry : nullptr;', - - key_type='const nsACString&', - key_bytes='aKey.BeginReading()', - key_length='aKey.Length()') + return_type="const JSServiceEntry*", + return_entry="return entry.Name() == aKey ? &entry : nullptr;", + key_type="const nsACString&", + key_bytes="aKey.BeginReading()", + key_length="aKey.Length()", + ) # Do this only after everything else has been emitted so we're sure the # string table is complete. - substs['strings'] = strings.to_cxx() + substs["strings"] = strings.to_cxx() return substs @@ -754,9 +783,11 @@ def defined(subst): def read_manifest(filename): - glbl = {'buildconfig': buildconfig, - 'defined': defined, - 'ProcessSelector': ProcessSelector} + glbl = { + "buildconfig": buildconfig, + "defined": defined, + "ProcessSelector": ProcessSelector, + } exec(open(filename).read(), glbl) return glbl @@ -765,33 +796,34 @@ def main(fd, conf_file, template_file): def open_output(filename): return FileAvoidWrite(os.path.join(os.path.dirname(fd.name), filename)) - conf = json.load(open(conf_file, 'r')) + conf = json.load(open(conf_file, "r")) deps = set() manifests = [] - for filename in conf['manifests']: + for filename in conf["manifests"]: deps.add(filename) manifest = read_manifest(filename) manifests.append(manifest) - manifest.setdefault('Priority', 50) - manifest['__filename__'] = filename + manifest.setdefault("Priority", 50) + manifest["__filename__"] = filename - manifests.sort(key=lambda man: (man['Priority'], man['__filename__'])) + manifests.sort(key=lambda man: (man["Priority"], man["__filename__"])) substs = gen_substs(manifests) def replacer(match): return substs[match.group(1)] - with open_output('StaticComponents.cpp') as fh: - with open(template_file, 'r') as tfh: + with open_output("StaticComponents.cpp") as fh: + with open(template_file, "r") as tfh: template = tfh.read() - fh.write(re.sub(r'//# @([a-zA-Z_]+)@\n', replacer, template)) + fh.write(re.sub(r"//# @([a-zA-Z_]+)@\n", replacer, template)) - with open_output('StaticComponentData.h') as fh: - fh.write("""\ + with open_output("StaticComponentData.h") as fh: + fh.write( + """\ /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ /* vim: set ts=8 sts=2 et sw=2 tw=80: */ /* This Source Code Form is subject to the terms of the Mozilla Public @@ -816,9 +848,12 @@ static constexpr size_t kModuleInitCount = %(init_count)d; } // namespace mozilla #endif -""" % substs) +""" + % substs + ) - fd.write("""\ + fd.write( + """\ /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ /* vim: set ts=8 sts=2 et sw=2 tw=80: */ /* This Source Code Form is subject to the terms of the Mozilla Public @@ -893,6 +928,8 @@ namespace components { } // namespace mozilla #endif -""" % substs) +""" + % substs + ) return deps