Merge inbound to mozilla-central. a=merge

This commit is contained in:
Ciure Andrei 2018-02-13 00:08:37 +02:00
commit 72080bd73c
154 changed files with 1915 additions and 853 deletions

View File

@ -20,9 +20,14 @@ var newTab;
function test() {
waitForExplicitFinish();
originalTab = gBrowser.selectedTab;
nextStep(step2);
// This test assumes that time passes between operations. But if the precision
// is low enough, and the test fast enough, an operation, and a successive call
// to Date.now() will have the same time value.
SpecialPowers.pushPrefEnv({"set": [["privacy.reduceTimerPrecision", false]]},
function() {
originalTab = gBrowser.selectedTab;
nextStep(step2);
});
}
function step2() {

View File

@ -25,6 +25,8 @@ add_task(async function() {
const TEST_NOTIFICATION_INTERVAL_MS = 2000;
await SpecialPowers.pushPrefEnv({set: [["browser.storageManager.enabled", true]]});
await SpecialPowers.pushPrefEnv({set: [["browser.storageManager.pressureNotification.minIntervalMS", TEST_NOTIFICATION_INTERVAL_MS]]});
// Commenting this to see if we really need it
// await SpecialPowers.pushPrefEnv({set: [["privacy.reduceTimerPrecision", false]]});
await notifyStoragePressure();
let notificationbox = document.getElementById("high-priority-global-notificationbox");

View File

@ -18,6 +18,14 @@ add_task(async function setup() {
});
add_task(async function test_keyword() {
// This is set because we see (undiagnosed) test timeouts without it
let timerPrecision = Preferences.get("privacy.reduceTimerPrecision");
Preferences.set("privacy.reduceTimerPrecision", false);
registerCleanupFunction(function() {
Preferences.set("privacy.reduceTimerPrecision", timerPrecision);
});
await promiseAutocompleteResultPopup("keyword bear");
gURLBar.focus();
EventUtils.synthesizeKey("d", {});
@ -100,9 +108,17 @@ add_task(async function test_delay() {
// Set a large delay.
let delay = Preferences.get("browser.urlbar.delay");
Preferences.set("browser.urlbar.delay", TIMEOUT);
// This may be a real test regression on Bug 1283329, if we can get it to
// fail at a realistic 2ms.
let timerPrecision = Preferences.get("privacy.reduceTimerPrecision");
Preferences.set("privacy.reduceTimerPrecision", true);
let timerPrecisionUSec = Preferences.get("privacy.resistFingerprinting.reduceTimerPrecision.microseconds");
Preferences.set("privacy.resistFingerprinting.reduceTimerPrecision.microseconds", 2000);
registerCleanupFunction(function() {
Preferences.set("browser.urlbar.delay", delay);
Preferences.set("privacy.reduceTimerPrecision", timerPrecision);
Preferences.set("privacy.resistFingerprinting.reduceTimerPrecision.microseconds", timerPrecisionUSec);
});
// This is needed to clear the current value, otherwise autocomplete may think

View File

@ -21,6 +21,10 @@ function checkTabInfo(expected, actual) {
}
add_task(async function test_sessions_get_recently_closed_tabs() {
// Below, the test makes assumptions about the last accessed time of tabs that are
// not true is we execute fast and reduce the timer precision enough
await SpecialPowers.pushPrefEnv({set: [["privacy.reduceTimerPrecision", false]]});
async function background() {
browser.test.onMessage.addListener(async msg => {
if (msg == "check-sessions") {

View File

@ -83,6 +83,20 @@ add_task(async function testCache() {
});
add_task(async function testCookies() {
// Above in setUpCookies we create an 'old' cookies, wait 10ms, then log a timestamp.
// Here we ask the browser to delete all cookies after the timestamp, with the intention
// that the 'old' cookie is not removed. The issue arises when the timer precision is
// low enough such that the timestamp that gets logged is the same as the 'old' cookie.
// We hardcode a precision value to ensure that there is time between the 'old' cookie
// and the timestamp generation.
Services.prefs.setBoolPref("privacy.reduceTimerPrecision", true);
Services.prefs.setIntPref("privacy.resistFingerprinting.reduceTimerPrecision.microseconds", 2000);
registerCleanupFunction(function() {
Services.prefs.clearUserPref("privacy.reduceTimerPrecision");
Services.prefs.clearUserPref("privacy.resistFingerprinting.reduceTimerPrecision.microseconds");
});
function background() {
browser.test.onMessage.addListener(async (msg, options) => {
if (msg == "removeCookies") {

View File

@ -52,9 +52,11 @@
}
}
ok(false, "Looming Test Failure, Additional Debugging Info: Expected Precision: " + expectedPrecision + " Measured Value: " + x +
" Rounded Vaue: " + rounded + " Fuzzy1: " + Math.abs(rounded - x + expectedPrecision) +
" Fuzzy 2: " + Math.abs(rounded - x));
// We are temporarily disabling this extra debugging failure because we expect to return false in some instances
// When we correct things we will re-enable it for debugging assistance
// opener.ok(false, "Looming Test Failure, Additional Debugging Info: Expected Precision: " + expectedPrecision + " Measured Value: " + x +
// " Rounded Vaue: " + rounded + " Fuzzy1: " + Math.abs(rounded - x + expectedPrecision) +
// " Fuzzy 2: " + Math.abs(rounded - x));
return false;
};
@ -65,14 +67,24 @@
waitForCondition(
() => animation.currentTime > 100,
() => {
opener.ok(isRounded(animation.startTime),
// We have disabled Time Precision Reduction for CSS Animations, so we expect those tests to fail.
// If we are testing that preference, turn failures into successes and successes into failures
var maybeInvert = function(value) {
if (opener.prefName.includes("privacy.reduceTimerPrecision") &&
!opener.prefName.includes("privacy.resistFingerprinting"))
return !value;
return value;
};
opener.ok(maybeInvert(isRounded(animation.startTime)),
"pref: " + opener.prefName + " - animation.startTime with precision " + expectedPrecision + " is not rounded: " + animation.startTime);
opener.ok(isRounded(animation.currentTime),
opener.ok(maybeInvert(isRounded(animation.currentTime)),
"pref: " + opener.prefName + " - animation.currentTime with precision " + expectedPrecision + " is not rounded: " + animation.currentTime);
opener.ok(isRounded(animation.timeline.currentTime),
opener.ok(maybeInvert(isRounded(animation.timeline.currentTime)),
"pref: " + opener.prefName + " - animation.timeline.currentTime with precision " + expectedPrecision + " is not rounded: " + animation.timeline.currentTime);
if (document.timeline) {
opener.ok(isRounded(document.timeline.currentTime),
opener.ok(maybeInvert(isRounded(document.timeline.currentTime)),
"pref: " + opener.prefName + " - document.timeline.currentTime with precision " + expectedPrecision + " is not rounded: " + document.timeline.currentTime);
}
opener.done();

View File

@ -38,6 +38,7 @@ add_task(async function test_cancelManageCreditCardsDialogWithESC() {
});
add_task(async function test_removingSingleAndMultipleCreditCards() {
await SpecialPowers.pushPrefEnv({"set": [["privacy.reduceTimerPrecision", false]]});
await saveCreditCard(TEST_CREDIT_CARD_1);
await saveCreditCard(TEST_CREDIT_CARD_2);
await saveCreditCard(TEST_CREDIT_CARD_3);
@ -90,6 +91,7 @@ add_task(async function test_creditCardsDialogWatchesStorageChanges() {
});
add_task(async function test_showCreditCards() {
await SpecialPowers.pushPrefEnv({"set": [["privacy.reduceTimerPrecision", false]]});
await saveCreditCard(TEST_CREDIT_CARD_1);
await saveCreditCard(TEST_CREDIT_CARD_2);
await saveCreditCard(TEST_CREDIT_CARD_3);

View File

@ -36,7 +36,7 @@ initPopupListener();
add_task(async function check_storage_after_form_submitted() {
// We already verified the first time use case in browser test
await SpecialPowers.pushPrefEnv({
set: [["extensions.formautofill.firstTimeUse", false]],
"set": [["extensions.formautofill.firstTimeUse", false]],
});
for (let key in TEST_ADDRESSES[0]) {
@ -56,6 +56,8 @@ add_task(async function check_storage_after_form_submitted() {
// Submit another new address.
add_task(async function check_storage_after_another_address_submitted() {
await SpecialPowers.pushPrefEnv({"set": [["privacy.reduceTimerPrecision", false]]});
document.querySelector("form").reset();
for (let key in TEST_ADDRESSES[1]) {
await setInput("#" + key, TEST_ADDRESSES[1][key]);

View File

@ -254,6 +254,9 @@ const MERGE_TESTCASES = [
},
];
ChromeUtils.defineModuleGetter(this, "Preferences",
"resource://gre/modules/Preferences.jsm");
let do_check_record_matches = (recordWithMeta, record) => {
for (let key in record) {
Assert.equal(recordWithMeta[key], record[key]);
@ -369,6 +372,16 @@ add_task(async function test_add() {
});
add_task(async function test_update() {
// Test assumes that when an entry is saved a second time, it's last modified date will
// be different from the first. With high values of precision reduction, we execute too
// fast for that to be true.
let timerPrecision = Preferences.get("privacy.reduceTimerPrecision");
Preferences.set("privacy.reduceTimerPrecision", false);
registerCleanupFunction(function() {
Preferences.set("privacy.reduceTimerPrecision", timerPrecision);
});
let profileStorage = await initProfileStorage(TEST_STORE_FILE_NAME,
[TEST_ADDRESS_1, TEST_ADDRESS_2]);

View File

@ -5,6 +5,8 @@
"use strict";
const {FormAutofillStorage} = ChromeUtils.import("resource://formautofill/FormAutofillStorage.jsm", {});
ChromeUtils.defineModuleGetter(this, "Preferences",
"resource://gre/modules/Preferences.jsm");
const TEST_STORE_FILE_NAME = "test-credit-card.json";
const COLLECTION_NAME = "creditCards";
@ -290,6 +292,16 @@ add_task(async function test_add() {
});
add_task(async function test_update() {
// Test assumes that when an entry is saved a second time, it's last modified date will
// be different from the first. With high values of precision reduction, we execute too
// fast for that to be true.
let timerPrecision = Preferences.get("privacy.reduceTimerPrecision");
Preferences.set("privacy.reduceTimerPrecision", false);
registerCleanupFunction(function() {
Preferences.set("privacy.reduceTimerPrecision", timerPrecision);
});
let path = getTempFile(TEST_STORE_FILE_NAME).path;
await prepareTestCreditCards(path);

View File

@ -10,6 +10,8 @@ ChromeUtils.import("resource://shield-recipe-client/lib/PreferenceExperiments.js
add_task(async function testTelemetry() {
// setup
await SpecialPowers.pushPrefEnv({set: [["privacy.reduceTimerPrecision", true]]});
await TelemetryController.submitExternalPing("testfoo", {foo: 1});
await TelemetryController.submitExternalPing("testbar", {bar: 2});
await TelemetryController.submitExternalPing("testfoo", {foo: 3});

View File

@ -6,11 +6,21 @@
"use strict";
requestLongerTimeout(2);
ChromeUtils.defineModuleGetter(this, "Preferences",
"resource://gre/modules/Preferences.jsm");
// Checks that the play/pause button goes to the right state when the scrubber has reached
// the end of the timeline but there are infinite animations playing.
add_task(function* () {
// TODO see if this is needed?
// let timerPrecision = Preferences.get("privacy.reduceTimerPrecision");
// Preferences.set("privacy.reduceTimerPrecision", false);
// registerCleanupFunction(function () {
// Preferences.set("privacy.reduceTimerPrecision", timerPrecision);
// });
yield addTab(URL_ROOT + "doc_simple_animation.html");
let {panel, inspector} = yield openAnimationInspector();

View File

@ -37,7 +37,7 @@ function* ifTestingSupported() {
for (let i = 0; i < functionCalls.length - 1; i += 2) {
ok(functionCalls[i].timestamp > 0, "The timestamp of the called function is larger than 0.");
ok(functionCalls[i].timestamp < currentTime, "The timestamp has been minus the frame start time.");
ok(functionCalls[i + 1].timestamp > functionCalls[i].timestamp, "The timestamp of the called function is correct.");
ok(functionCalls[i + 1].timestamp >= functionCalls[i].timestamp, "The timestamp of the called function is correct.");
}
yield removeTab(target.tab);

View File

@ -82,6 +82,6 @@ function testFunctionCallTimestamp(functionCalls, currentTime) {
for ( let i = 0; i < functionCalls.length-1; i += 2 ) {
ok( functionCalls[i].timestamp > 0, "The timestamp of the called function is larger than 0." );
ok( functionCalls[i].timestamp < currentTime, "The timestamp has been minus the frame start time." );
ok( functionCalls[i+1].timestamp > functionCalls[i].timestamp, "The timestamp of the called function is correct." );
ok( functionCalls[i+1].timestamp >= functionCalls[i].timestamp, "The timestamp of the called function is correct." );
}
}

View File

@ -9,6 +9,8 @@
// Have to use the same timer functions used by the inspector.
const {clearTimeout} = ChromeUtils.import("resource://gre/modules/Timer.jsm", {});
ChromeUtils.defineModuleGetter(this, "Preferences",
"resource://gre/modules/Preferences.jsm");
const TEST_URL = URL_ROOT + "doc_markup_flashing.html";
@ -92,6 +94,13 @@ const TEST_DATA = [{
}];
add_task(function* () {
let timerPrecision = Preferences.get("privacy.reduceTimerPrecision");
Preferences.set("privacy.reduceTimerPrecision", false);
registerCleanupFunction(function () {
Preferences.set("privacy.reduceTimerPrecision", timerPrecision);
});
let {inspector, testActor} = yield openInspectorForURL(TEST_URL);
// Make sure mutated nodes flash for a very long time so we can more easily

View File

@ -43,8 +43,8 @@ add_task(function* () {
is(har.log.entries.length, 1, "There must be one request");
let page = har.log.pages[0];
ok(page.pageTimings.onContentLoad > 0, "There must be onContentLoad time");
ok(page.pageTimings.onLoad > 0, "There must be onLoad time");
ok("onContentLoad" in page.pageTimings, "There must be onContentLoad time");
ok("onLoad" in page.pageTimings, "There must be onLoad time");
let entry = har.log.entries[0];
is(entry.request.method, "GET", "Check the method");

View File

@ -15,6 +15,8 @@ add_task(async () => {
store.dispatch(Actions.batchEnable(false));
await SpecialPowers.pushPrefEnv({ "set": [["privacy.reduceTimerPrecision", false]]});
let requestsDone = waitForAllRequestsFinished(monitor);
let markersDone = waitForTimelineMarkers(monitor);
tab.linkedBrowser.reload();

View File

@ -12,7 +12,24 @@ function test() {
requestLongerTimeout(3);
waitForExplicitFinish();
addTab(URI).then(function (tab) {
runCodeMirrorTest(tab.linkedBrowser);
});
/*
* In devtools/client/sourceeditor/test/codemirror/search_test.js there is a test
* multilineInsensitiveSlow which assumes an operation takes less than 100ms.
* With a precision of 100ms, if we get unlikely and begin execution towards the
* end of one spot (e.g. at 95 ms) we will clamp down, take (e.g.) 10ms to execute
* and it will appear to take 100ms.
*
* To avoid this, we hardcode to 2ms of precision.
*
* In theory we don't need to set the pref for all of CodeMirror, in practice
* it seems very difficult to set a pref for just one of the tests.
*/
SpecialPowers.pushPrefEnv(
{ set: [["privacy.reduceTimerPrecision", true],
["privacy.resistFingerprinting.reduceTimerPrecision.microseconds", 2000]]},
function () {
addTab(URI).then(function (tab) {
runCodeMirrorTest(tab.linkedBrowser);
});
});
}

View File

@ -81,7 +81,7 @@ function getCleanedPacket(key, packet) {
// Clean timer properties on the message.
// Those properties are found on console.time and console.timeEnd calls,
// and those time can vary, which is why we need to clean them.
if (res.message.timer.duration) {
if ("duration" in res.message.timer) {
res.message.timer.duration = existingPacket.message.timer.duration;
}
}

View File

@ -14,6 +14,7 @@ const WAIT_TIME = 1000;
const { PerformanceFront } = require("devtools/shared/fronts/performance");
add_task(function* () {
yield SpecialPowers.pushPrefEnv({"set": [["privacy.reduceTimerPrecision", false]]});
yield addTab(MAIN_DOMAIN + "doc_perf.html");
initDebuggerServer();

View File

@ -12,8 +12,11 @@ const { MemoryFront } = require("devtools/shared/fronts/memory");
// Always log packets when running tests.
Services.prefs.setBoolPref("devtools.debugger.log", true);
var gReduceTimePrecision = Services.prefs.getBoolPref("privacy.reduceTimerPrecision");
Services.prefs.setBoolPref("privacy.reduceTimerPrecision", false);
SimpleTest.registerCleanupFunction(function () {
Services.prefs.clearUserPref("devtools.debugger.log");
Services.prefs.setBoolPref("privacy.reduceTimerPrecision", gReduceTimePrecision);
});
function startServerAndGetSelectedTabMemory() {

View File

@ -11,7 +11,17 @@ const { PromisesFront } = require("devtools/shared/fronts/promises");
var EventEmitter = require("devtools/shared/event-emitter");
ChromeUtils.defineModuleGetter(this, "Preferences",
"resource://gre/modules/Preferences.jsm");
add_task(function* () {
let timerPrecision = Preferences.get("privacy.reduceTimerPrecision");
Preferences.set("privacy.reduceTimerPrecision", false);
registerCleanupFunction(function () {
Preferences.set("privacy.reduceTimerPrecision", timerPrecision);
});
let client = yield startTestDebuggerServer("promises-object-test");
let chromeActors = yield getChromeActors(client);
@ -63,7 +73,8 @@ function* testPromiseCreationTimestamp(client, form, makePromise) {
let creationTimestamp = grip.promiseState.creationTimestamp;
ok(start - 1 <= creationTimestamp && creationTimestamp <= end + 1,
"Expect promise creation timestamp to be within elapsed time range.");
"Expect promise creation timestamp to be within elapsed time range: " +
(start - 1) + " <= " + creationTimestamp + " <= " + (end + 1));
yield front.detach();
// Appease eslint

View File

@ -60,7 +60,7 @@ https://bugzilla.mozilla.org/show_bug.cgi?id=453650
}
info("times: " + start + ", " + end);
ok(start < end, "reflow start time lower than end time");
ok(start <= end, "reflow start time lower than end time");
done();
},
@ -72,7 +72,7 @@ https://bugzilla.mozilla.org/show_bug.cgi?id=453650
}
info("times: " + start + ", " + end);
ok(start < end, "reflow start time lower than end time");
ok(start <= end, "reflow start time lower than end time");
done();
},

View File

@ -33,7 +33,7 @@ public:
if (!aTime.IsNull()) {
result.SetValue(
nsRFPService::ReduceTimePrecisionAsMSecs(aTime.Value().ToMilliseconds())
nsRFPService::ReduceTimePrecisionAsMSecs(aTime.Value().ToMilliseconds(), TimerPrecisionType::RFPOnly)
);
}

View File

@ -7,7 +7,9 @@
'use strict';
setup({explicit_done: true});
SpecialPowers.pushPrefEnv(
{ "set": [["dom.animations-api.core.enabled", true]]},
{ "set":
[["dom.animations-api.core.enabled", true],
["privacy.reduceTimerPrecision", false]]},
function() {
window.open("file_event-dispatch.html");
});

View File

@ -7,9 +7,7 @@
'use strict';
setup({explicit_done: true});
SpecialPowers.pushPrefEnv(
{ "set":
[["dom.animations-api.core.enabled", true],
["privacy.reduceTimerPrecision", false]]},
{ "set": [["dom.animations-api.core.enabled", true]]},
function() {
window.open("file_animation-starttime.html");
});

View File

@ -7,9 +7,7 @@
'use strict';
setup({explicit_done: true});
SpecialPowers.pushPrefEnv(
{ "set":
[["dom.animations-api.core.enabled", true],
["privacy.reduceTimerPrecision", false]]},
{ "set": [["dom.animations-api.core.enabled", true]]},
function() {
window.open("file_document-timeline.html");
});

View File

@ -6,8 +6,13 @@
<script>
'use strict';
setup({explicit_done: true});
// This test appears like it might get racey and cause a timeout with too low of a
// precision, so we hardcode it to something reasonable.
SpecialPowers.pushPrefEnv(
{ "set": [["dom.animations-api.core.enabled", true]]},
{ "set":
[["dom.animations-api.core.enabled", true],
["privacy.reduceTimerPrecision", true],
["privacy.resistFingerprinting.reduceTimerPrecision.microseconds", 2000]]},
function() {
window.open("file_transition_finish_on_compositor.html");
});

View File

@ -1933,7 +1933,7 @@ NS_IMPL_CYCLE_COLLECTION_TRAVERSE_BEGIN_INTERNAL(FragmentOrElement)
}
nsAutoCString orphan;
if (!tmp->IsInUncomposedDoc() &&
if (!tmp->IsInComposedDoc() &&
// Ignore xbl:content, which is never in the document and hence always
// appears to be orphaned.
!tmp->NodeInfo()->Equals(nsGkAtoms::content, kNameSpaceID_XBL)) {

View File

@ -21,10 +21,15 @@ https://bugzilla.mozilla.org/show_bug.cgi?id=403852
<script class="testbody" type="text/javascript">
SimpleTest.waitForExplicitFinish();
var url = SimpleTest.getTestFileURL("bug403852_fileOpener.js");
var script = SpecialPowers.loadChromeScript(url);
script.addMessageListener("file.opened", onOpened);
script.sendAsyncMessage("file.open");
var script = '';
SpecialPowers.pushPrefEnv({ "set":
[["privacy.reduceTimerPrecision", false]]},
function() {
var url = SimpleTest.getTestFileURL("bug403852_fileOpener.js");
script = SpecialPowers.loadChromeScript(url);
script.addMessageListener("file.opened", onOpened);
script.sendAsyncMessage("file.open");
});
function onOpened(message) {
var fileList = document.getElementById('fileList');

View File

@ -38,9 +38,16 @@ SimpleTest.requestFlakyTimeout("untriaged");
// We don't use SpecialPowers.pushPrefEnv since it can delay the test
// function until after the load event has fired which means we can't
// test the timestamp of the load event.
const kPrefName = "dom.event.highrestimestamp.enabled";
var prevPrefValue = SpecialPowers.getBoolPref(kPrefName);
SpecialPowers.setBoolPref(kPrefName, true);
const kHighResTimestampsPrefName = "dom.event.highrestimestamp.enabled";
var highRestimerPrevPrefValue = SpecialPowers.getBoolPref(kHighResTimestampsPrefName);
SpecialPowers.setBoolPref(kHighResTimestampsPrefName, true);
// This file performs tests that normalize the timeOrigin within a worker
// and compare it to the page. When this occurs, time can appear to go backwards.
// This is a known (and accepted) regression.
const kReduceTimePrecisionPrefName = "privacy.reduceTimerPrecision";
var reduceTimePrecisionPrevPrefValue = SpecialPowers.getBoolPref(kReduceTimePrecisionPrefName);
SpecialPowers.setBoolPref(kReduceTimePrecisionPrefName, false);
testRegularEvents();
// Event.timeStamp should be relative to the time origin which is:
@ -111,7 +118,8 @@ function testSharedWorkerEvents() {
}
var finishTests = function() {
SpecialPowers.setBoolPref(kPrefName, prevPrefValue);
SpecialPowers.setBoolPref(kHighResTimestampsPrefName, highRestimerPrevPrefValue);
SpecialPowers.setBoolPref(kReduceTimePrecisionPrefName, reduceTimePrecisionPrevPrefValue);
SimpleTest.finish();
};

View File

@ -23,9 +23,10 @@ startTest({
prefs: [
[ "media.test.video-suspend", true ],
[ "media.suspend-bkgnd-video.enabled", true ],
// User a short delay to ensure video decode suspend happens before end
// Use a short delay to ensure video decode suspend happens before end
// of video.
[ "media.suspend-bkgnd-video.delay-ms", MIN_DELAY ]
[ "media.suspend-bkgnd-video.delay-ms", MIN_DELAY ],
[ "privacy.reduceTimerPrecision", false ]
],
tests: gDecodeSuspendTests,
runTest: (test, token) => {

View File

@ -120,7 +120,10 @@
[testGetContributingSources]);
test.setMediaConstraints([{audio: true}], [{audio: true}]);
test.pcLocal.audioElementsOnly = true;
test.run();
SpecialPowers.pushPrefEnv(
{ "set": [["privacy.reduceTimerPrecision", false]]}, function() {
test.run();
});
});
</script>
</pre>

View File

@ -34,9 +34,13 @@
}
}
SpecialPowers.setBoolPref("privacy.reduceTimerPrecision", reduceTimePrecisionPrevPrefValue);
SimpleTest.finish();
}
var reduceTimePrecisionPrevPrefValue = SpecialPowers.getBoolPref("privacy.reduceTimerPrecision");
SpecialPowers.setBoolPref("privacy.reduceTimerPrecision", false);
SimpleTest.waitForExplicitFinish();
addLoadEvent(next);
</script>

View File

@ -15,12 +15,15 @@
var sw = new SharedWorker('sharedworker_performance_user_timing.js');
sw.port.onmessage = function(event) {
if (event.data.type == 'finish') {
SpecialPowers.setBoolPref("privacy.reduceTimerPrecision", reduceTimePrecisionPrevPrefValue);
SimpleTest.finish();
} else if (event.data.type == 'status') {
ok(event.data.status, event.data.msg);
}
}
var reduceTimePrecisionPrevPrefValue = SpecialPowers.getBoolPref("privacy.reduceTimerPrecision");
SpecialPowers.setBoolPref("privacy.reduceTimerPrecision", false);
SimpleTest.waitForExplicitFinish();
</script>
</body>

View File

@ -4,7 +4,6 @@
<title>Test for performance.timeOrigin</title>
<meta http-equiv="content-type" content="text/html; charset=UTF-8">
<script type="text/javascript" src="/tests/SimpleTest/SimpleTest.js"></script>
<script type="text/javascript" src="test_performance_user_timing.js"></script>
</head>
<body>
<script type="text/js-worker" id="worker-src">
@ -52,6 +51,7 @@ function testSharedWorker() {
var tests = [ testBasic, testWorker, testSharedWorker ];
function next() {
if (!tests.length) {
SpecialPowers.setBoolPref("privacy.reduceTimerPrecision", reduceTimePrecisionPrevPrefValue);
SimpleTest.finish();
return;
}
@ -61,6 +61,14 @@ function next() {
}
SimpleTest.waitForExplicitFinish();
// It is a known issue that comparing time between a worker and a window
// when timer clamping is in effect may cause time to go backwards.
// Do not run this test with this preference set. For large values of
// clamping you will see failures. For small values, it is intermitant.
var reduceTimePrecisionPrevPrefValue = SpecialPowers.getBoolPref("privacy.reduceTimerPrecision");
SpecialPowers.setBoolPref("privacy.reduceTimerPrecision", false);
addLoadEvent(next);
</script>
</pre>

View File

@ -11,6 +11,13 @@
<script class="testbody" type="text/javascript">
SimpleTest.waitForExplicitFinish();
// The worker assumes it will take some amount of time to load a resource.
// With a low enough precision, the duration to load a resource may clamp
// down to zero.
var reduceTimePrecisionPrevPrefValue = SpecialPowers.getBoolPref("privacy.reduceTimerPrecision");
SpecialPowers.setBoolPref("privacy.reduceTimerPrecision", false);
var worker = new Worker('test_worker_performance_entries.js');
worker.onmessage = function(event) {
if (event.data.type == "check") {
@ -19,6 +26,7 @@ worker.onmessage = function(event) {
}
if (event.data.type == "finish") {
SpecialPowers.setBoolPref("privacy.reduceTimerPrecision", reduceTimePrecisionPrevPrefValue);
SimpleTest.finish();
return;
}

View File

@ -11,9 +11,14 @@
<script class="testbody" type="text/javascript">
SimpleTest.waitForExplicitFinish();
var reduceTimePrecisionPrevPrefValue = SpecialPowers.getBoolPref("privacy.reduceTimerPrecision");
SpecialPowers.setBoolPref("privacy.reduceTimerPrecision", false);
var worker = new Worker('test_worker_performance_now.js');
worker.onmessage = function(event) {
if (event.data.type == 'finish') {
SpecialPowers.setBoolPref("privacy.reduceTimerPrecision", reduceTimePrecisionPrevPrefValue);
SimpleTest.finish();
} else if (event.data.type == 'status') {

View File

@ -15,12 +15,15 @@
var worker = new Worker('worker_performance_user_timing.js');
worker.onmessage = function(event) {
if (event.data.type == 'finish') {
SpecialPowers.setBoolPref("privacy.reduceTimerPrecision", reduceTimePrecisionPrevPrefValue);
SimpleTest.finish();
} else if (event.data.type == 'status') {
ok(event.data.status, event.data.msg);
}
}
var reduceTimePrecisionPrevPrefValue = SpecialPowers.getBoolPref("privacy.reduceTimerPrecision");
SpecialPowers.setBoolPref("privacy.reduceTimerPrecision", false);
SimpleTest.waitForExplicitFinish();
</script>
</body>

View File

@ -237,8 +237,13 @@ function sanityCheckEvent(evt)
is(evt.bubbles, false, "Event should not bubble");
is(evt.cancelable, false, "Event should not be cancelable");
if (SpecialPowers.getBoolPref("dom.event.highrestimestamp.enabled")) {
var lessThanOrEqualsAllowed = SpecialPowers.getBoolPref("privacy.reduceTimerPrecision");
var now = window.performance.now();
ok(evt.timeStamp > 0 && evt.timeStamp < now,
ok(evt.timeStamp > 0 &&
(
(evt.timeStamp < now && !lessThanOrEqualsAllowed) ||
(evt.timeStamp <= now && lessThanOrEqualsAllowed)
),
"Event timeStamp (" + evt.timeStamp + ") should be > 0 but " +
"before the current time (" + now + ")");
} else {

View File

@ -5,6 +5,15 @@
<script src="/MochiKit/Base.js"></script>
<script src="/MochiKit/Async.js"></script>
<script src="/tests/SimpleTest/SimpleTest.js"></script>
<script type="text/javascript">
// On Linux 64 Stylo Disabled, we observed a failure in the
// test 'stop() - several in queue' in fx.js with a low precision value.
// We specify a value that seems safe. The root cause of this issue is
// believed to be jQuery's use of new Date as a mechanism to advance animations.
SpecialPowers.pushPrefEnv({set:
[["privacy.reduceTimerPrecision", true],
["privacy.resistFingerprinting.reduceTimerPrecision.microseconds", 2000]]});
</script>
<script type="text/javascript" src="../lib/AJAX_setup.js"></script>
<link rel="stylesheet" type="text/css" href="../lib/test.css" />
</head>

View File

@ -14,6 +14,8 @@
ok(n >= 0, "The value of now() should be equal to or greater than 0.");
ok(window.performance.now() >= n, "The value of now() should monotonically increase.");
SimpleTest.waitForExplicitFinish();
var reduceTimePrecisionPrevPrefValue = SpecialPowers.getBoolPref("privacy.reduceTimerPrecision");
SpecialPowers.setBoolPref("privacy.reduceTimerPrecision", false);
SimpleTest.requestFlakyTimeout("untriaged");
// The spec says performance.now() should have micro-second resolution, but allows 1ms if the platform doesn't support it.
@ -58,6 +60,7 @@
", iters: " + checks +
", dt: " + (d2 - d) +
", now(): " + n2 + ").");
SpecialPowers.setBoolPref("privacy.reduceTimerPrecision", reduceTimePrecisionPrevPrefValue);
SimpleTest.finish();
};
setTimeout(checkAfterTimeout, 1);

View File

@ -20,7 +20,9 @@ https://bugzilla.mozilla.org/show_bug.cgi?id=822480
SimpleTest.waitForExplicitFinish();
// Resource timing is prefed off by default, so we had to use this workaround
SpecialPowers.pushPrefEnv({"set": [["dom.enable_resource_timing", true]]}, start);
SpecialPowers.pushPrefEnv({"set": [
["dom.enable_resource_timing", true],
["privacy.reduceTimerPrecision", false]]}, start);
var subwindow = null;
function start() {

View File

@ -21,7 +21,9 @@ SimpleTest.waitForExplicitFinish();
// Resource timing is prefed off by default, so we had to use this workaround
var subwindow = null;
SpecialPowers.pushPrefEnv({"set": [["dom.enable_resource_timing", true]]}, start);
SpecialPowers.pushPrefEnv({"set": [
["dom.enable_resource_timing", true],
["privacy.reduceTimerPrecision", false]]}, start);
function start() {
subwindow = window.open("resource_timing_cross_origin.html");

View File

@ -63,7 +63,10 @@ fn main() {
)
.unwrap();
let register_data = rx.recv().unwrap();
let register_data = try_or!(rx.recv(), |_| {
panic!("Problem receiving, unable to continue");
return;
});
println!("Register result: {}", base64::encode(&register_data));
println!("Asking a security key to sign now, with the data from the register...");
let credential = u2f_get_key_handle_from_register_response(&register_data).unwrap();
@ -85,7 +88,9 @@ fn main() {
)
.unwrap();
let (_, sign_data) = rx.recv().unwrap();
let (_, sign_data) = try_or!(rx.recv(), |_| {
println!("Problem receiving");
});
println!("Sign result: {}", base64::encode(&sign_data));
println!("Done.");
}

View File

@ -178,11 +178,6 @@ pub unsafe extern "C" fn rust_u2f_mgr_sign(
return 0;
}
// Need at least one key handle.
if (*khs).len() < 1 {
return 0;
}
let flags = ::SignFlags::from_bits_truncate(flags);
let challenge = from_raw(challenge_ptr, challenge_len);
let application = from_raw(application_ptr, application_len);

View File

@ -165,13 +165,6 @@ impl U2FManager {
));
}
if key_handles.len() < 1 {
return Err(io::Error::new(
io::ErrorKind::InvalidInput,
"No key handles given",
));
}
for key_handle in &key_handles {
if key_handle.credential.len() > 256 {
return Err(io::Error::new(

View File

@ -1464,6 +1464,8 @@ class DrawTargetCapture : public DrawTarget
public:
virtual bool IsCaptureDT() const override { return true; }
virtual void Dump() = 0;
/**
* Returns true if the recording only contains FillGlyph calls with
* a single font and color. Returns the list of Glyphs along with

View File

@ -12,6 +12,7 @@
#include <vector>
#include "DrawCommand.h"
#include "Logging.h"
namespace mozilla {
namespace gfx {
@ -87,6 +88,15 @@ public:
uint8_t* mEnd;
};
void Log(TreeLog& aStream)
{
for (iterator iter(*this); !iter.Done(); iter.Next()) {
DrawingCommand* cmd = iter.Get();
cmd->Log(aStream);
aStream << "\n";
}
}
private:
CaptureCommandList(const CaptureCommandList& aOther) = delete;
void operator =(const CaptureCommandList& aOther) = delete;

View File

@ -14,6 +14,7 @@
#include "Filters.h"
#include <vector>
#include "FilterNodeCapture.h"
#include "Logging.h"
namespace mozilla {
namespace gfx {
@ -55,6 +56,7 @@ public:
virtual void ExecuteOnDT(DrawTarget* aDT, const Matrix* aTransform = nullptr) const = 0;
virtual bool GetAffectedRect(Rect& aDeviceRect, const Matrix& aTransform) const { return false; }
virtual void CloneInto(CaptureCommandList* aList) = 0;
virtual void Log(TreeLog& aLog) const = 0;
CommandType GetType() { return mType; }

View File

@ -16,6 +16,7 @@
#include "CaptureCommandList.h"
#include "DrawCommand.h"
#include "FilterNodeCapture.h"
#include "Logging.h"
namespace mozilla {
namespace gfx {
@ -81,6 +82,16 @@ public:
reinterpret_cast<Pattern*>(mPattern)->~Pattern();
}
Pattern* Get()
{
return reinterpret_cast<Pattern*>(mPattern);
}
const Pattern* Get() const
{
return reinterpret_cast<const Pattern*>(mPattern);
}
operator Pattern&()
{
return *reinterpret_cast<Pattern*>(mPattern);
@ -135,6 +146,16 @@ public:
aDT->DrawSurface(mSurface, mDest, mSource, mSurfOptions, mOptions);
}
void Log(TreeLog& aStream) const override
{
aStream << "[DrawSurface surf=" << mSurface;
aStream << " dest=" << mDest;
aStream << " src=" << mSource;
aStream << " surfOpt=" << mSurfOptions;
aStream << " opt=" << mOptions;
aStream << "]";
}
static const bool AffectsSnapshot = true;
static const CommandType Type = CommandType::DRAWSURFACE;
@ -175,6 +196,17 @@ public:
aDT->DrawSurfaceWithShadow(mSurface, mDest, mColor, mOffset, mSigma, mOperator);
}
void Log(TreeLog& aStream) const override
{
aStream << "[DrawSurfaceWithShadow surf=" << mSurface;
aStream << " dest=" << mDest;
aStream << " color=" << mColor;
aStream << " offset=" << mOffset;
aStream << " sigma=" << mSigma;
aStream << " op=" << mOperator;
aStream << "]";
}
static const bool AffectsSnapshot = true;
static const CommandType Type = CommandType::DRAWSURFACEWITHSHADOW;
@ -212,6 +244,15 @@ public:
aDT->DrawFilter(filter, mSourceRect, mDestPoint, mOptions);
}
void Log(TreeLog& aStream) const override
{
aStream << "[DrawFilter surf=" << mFilter;
aStream << " src=" << mSourceRect;
aStream << " dest=" << mDestPoint;
aStream << " opt=" << mOptions;
aStream << "]";
}
static const bool AffectsSnapshot = true;
static const CommandType Type = CommandType::DRAWFILTER;
@ -241,6 +282,11 @@ public:
aDT->ClearRect(mRect);
}
void Log(TreeLog& aStream) const override
{
aStream << "[ClearRect rect=" << mRect << "]";
}
static const bool AffectsSnapshot = true;
static const CommandType Type = CommandType::CLEARRECT;
@ -276,6 +322,14 @@ public:
aDT->CopySurface(mSurface, mSourceRect, IntPoint(uint32_t(dest.x), uint32_t(dest.y)));
}
void Log(TreeLog& aStream) const override
{
aStream << "[CopySurface surf=" << mSurface;
aStream << " src=" << mSourceRect;
aStream << " dest=" << mDestination;
aStream << "]";
}
static const bool AffectsSnapshot = true;
static const CommandType Type = CommandType::COPYSURFACE;
@ -314,6 +368,14 @@ public:
return true;
}
void Log(TreeLog& aStream) const override
{
aStream << "[FillRect rect=" << mRect;
aStream << " pattern=" << mPattern.Get();
aStream << " opt=" << mOptions;
aStream << "]";
}
static const bool AffectsSnapshot = true;
static const CommandType Type = CommandType::FILLRECT;
@ -347,6 +409,14 @@ public:
aDT->StrokeRect(mRect, mPattern, mStrokeOptions, mOptions);
}
void Log(TreeLog& aStream) const override
{
aStream << "[StrokeRect rect=" << mRect;
aStream << " pattern=" << mPattern.Get();
aStream << " opt=" << mOptions;
aStream << "]";
}
static const bool AffectsSnapshot = true;
static const CommandType Type = CommandType::STROKERECT;
@ -382,6 +452,15 @@ public:
aDT->StrokeLine(mStart, mEnd, mPattern, mStrokeOptions, mOptions);
}
void Log(TreeLog& aStream) const override
{
aStream << "[StrokeLine start=" << mStart;
aStream << " end=" << mEnd;
aStream << " pattern=" << mPattern.Get();
aStream << " opt=" << mOptions;
aStream << "]";
}
static const bool AffectsSnapshot = true;
static const CommandType Type = CommandType::STROKELINE;
@ -421,6 +500,14 @@ public:
return true;
}
void Log(TreeLog& aStream) const override
{
aStream << "[FillCommand path=" << mPath;
aStream << " pattern=" << mPattern.Get();
aStream << " opt=" << mOptions;
aStream << "]";
}
static const bool AffectsSnapshot = true;
static const CommandType Type = CommandType::FILL;
@ -501,6 +588,14 @@ public:
return true;
}
void Log(TreeLog& aStream) const override
{
aStream << "[Stroke path=" << mPath;
aStream << " pattern=" << mPattern.Get();
aStream << " opt=" << mOptions;
aStream << "]";
}
static const bool AffectsSnapshot = true;
static const CommandType Type = CommandType::STROKE;
@ -544,6 +639,15 @@ public:
aDT->FillGlyphs(mFont, buf, mPattern, mOptions);
}
void Log(TreeLog& aStream) const override
{
aStream << "[FillGlyphs font=" << mFont;
aStream << " glyphCount=" << mGlyphs.size();
aStream << " pattern=" << mPattern.Get();
aStream << " opt=" << mOptions;
aStream << "]";
}
static const bool AffectsSnapshot = true;
static const CommandType Type = CommandType::FILLGLYPHS;
@ -589,6 +693,15 @@ public:
aDT->StrokeGlyphs(mFont, buf, mPattern, mStrokeOptions, mOptions);
}
void Log(TreeLog& aStream) const override
{
aStream << "[StrokeGlyphs font=" << mFont;
aStream << " glyphCount=" << mGlyphs.size();
aStream << " pattern=" << mPattern.Get();
aStream << " opt=" << mOptions;
aStream << "]";
}
static const bool AffectsSnapshot = true;
static const CommandType Type = CommandType::STROKEGLYPHS;
@ -622,6 +735,14 @@ public:
aDT->Mask(mSource, mMask, mOptions);
}
void Log(TreeLog& aStream) const override
{
aStream << "[Mask source=" << mSource.Get();
aStream << " mask=" << mMask.Get();
aStream << " opt=" << mOptions;
aStream << "]";
}
static const bool AffectsSnapshot = true;
static const CommandType Type = CommandType::MASK;
@ -656,6 +777,15 @@ public:
aDT->MaskSurface(mSource, mMask, mOffset, mOptions);
}
void Log(TreeLog& aStream) const override
{
aStream << "[Mask source=" << mSource.Get();
aStream << " mask=" << mMask;
aStream << " offset=" << &mOffset;
aStream << " opt=" << mOptions;
aStream << "]";
}
static const bool AffectsSnapshot = true;
static const CommandType Type = CommandType::MASKSURFACE;
@ -685,6 +815,11 @@ public:
aDT->PushClip(mPath);
}
void Log(TreeLog& aStream) const override
{
aStream << "[PushClip path=" << mPath << "]";
}
static const bool AffectsSnapshot = false;
static const CommandType Type = CommandType::PUSHCLIP;
@ -711,6 +846,11 @@ public:
aDT->PushClipRect(mRect);
}
void Log(TreeLog& aStream) const override
{
aStream << "[PushClipRect rect=" << mRect << "]";
}
static const bool AffectsSnapshot = false;
static const CommandType Type = CommandType::PUSHCLIPRECT;
@ -748,6 +888,17 @@ public:
mMaskTransform, mBounds, mCopyBackground);
}
void Log(TreeLog& aStream) const override
{
aStream << "[PushLayer opaque=" << mOpaque;
aStream << " opacity=" << mOpacity;
aStream << " mask=" << mMask;
aStream << " maskTransform=" << mMaskTransform;
aStream << " bounds=" << mBounds;
aStream << " copyBackground=" << mCopyBackground;
aStream << "]";
}
static const bool AffectsSnapshot = false;
static const CommandType Type = CommandType::PUSHLAYER;
@ -778,6 +929,11 @@ public:
aDT->PopClip();
}
void Log(TreeLog& aStream) const override
{
aStream << "[PopClip]";
}
static const bool AffectsSnapshot = false;
static const CommandType Type = CommandType::POPCLIP;
};
@ -800,6 +956,11 @@ public:
aDT->PopLayer();
}
void Log(TreeLog& aStream) const override
{
aStream << "[PopLayer]";
}
static const bool AffectsSnapshot = true;
static const CommandType Type = CommandType::POPLAYER;
};
@ -828,6 +989,11 @@ public:
}
}
void Log(TreeLog& aStream) const override
{
aStream << "[SetTransform transform=" << mTransform << "]";
}
static const bool AffectsSnapshot = false;
static const CommandType Type = CommandType::SETTRANSFORM;
@ -855,6 +1021,11 @@ public:
aDT->SetPermitSubpixelAA(mPermitSubpixelAA);
}
void Log(TreeLog& aStream) const override
{
aStream << "[SetPermitSubpixelAA permitSubpixelAA=" << mPermitSubpixelAA << "]";
}
static const bool AffectsSnapshot = false;
static const CommandType Type = CommandType::SETPERMITSUBPIXELAA;
@ -880,6 +1051,11 @@ public:
aDT->Flush();
}
void Log(TreeLog& aStream) const override
{
aStream << "[Flush]";
}
static const bool AffectsSnapshot = false;
static const CommandType Type = CommandType::FLUSH;
};
@ -902,6 +1078,11 @@ public:
aDT->Blur(mBlur);
}
void Log(TreeLog& aStream) const override
{
aStream << "[Blur]";
}
static const bool AffectsSnapshot = true;
static const CommandType Type = CommandType::BLUR;

View File

@ -443,5 +443,15 @@ DrawTargetCaptureImpl::CreateFilter(FilterType aType)
}
}
void
DrawTargetCaptureImpl::Dump()
{
TreeLog output;
output << "DrawTargetCapture(" << (void*)(this) << ")\n";
TreeAutoIndent indent(output);
mCommands.Log(output);
output << "\n";
}
} // namespace gfx
} // namespace mozilla

View File

@ -149,6 +149,8 @@ public:
bool ContainsOnlyColoredGlyphs(RefPtr<ScaledFont>& aScaledFont, Color& aColor, std::vector<Glyph>& aGlyphs) override;
void Dump() override;
protected:
virtual ~DrawTargetCaptureImpl();

View File

@ -20,6 +20,7 @@
#if defined(MOZ_WIDGET_ANDROID)
#include "nsDebug.h"
#endif
#include "2D.h"
#include "Point.h"
#include "BaseRect.h"
#include "Matrix.h"
@ -357,6 +358,12 @@ public:
}
return *this;
}
Log &operator <<(const Color& aColor) {
if (MOZ_UNLIKELY(LogIt())) {
mMessage << "Color(" << aColor.r << ", " << aColor.g << ", " << aColor.b << ", " << aColor.a << ")";
}
return *this;
}
template <typename T, typename Sub, typename Coord>
Log &operator <<(const BasePoint<T, Sub, Coord>& aPoint) {
if (MOZ_UNLIKELY(LogIt())) {
@ -394,6 +401,205 @@ public:
return *this;
}
Log &operator<<(const SourceSurface* aSurface) {
if (MOZ_UNLIKELY(LogIt())) {
mMessage << "SourceSurface(" << (void*)(aSurface) << ")";
}
return *this;
}
Log &operator<<(const Path* aPath) {
if (MOZ_UNLIKELY(LogIt())) {
mMessage << "Path(" << (void*)(aPath) << ")";
}
return *this;
}
Log &operator<<(const Pattern* aPattern) {
if (MOZ_UNLIKELY(LogIt())) {
mMessage << "Pattern(" << (void*)(aPattern) << ")";
}
return *this;
}
Log &operator<<(const ScaledFont* aFont) {
if (MOZ_UNLIKELY(LogIt())) {
mMessage << "ScaledFont(" << (void*)(aFont) << ")";
}
return *this;
}
Log &operator<<(const FilterNode* aFilter) {
if (MOZ_UNLIKELY(LogIt())) {
mMessage << "FilterNode(" << (void*)(aFilter) << ")";
}
return *this;
}
Log &operator<<(const DrawOptions& aOptions) {
if (MOZ_UNLIKELY(LogIt())) {
mMessage << "DrawOptions(" << aOptions.mAlpha << ", ";
(*this) << aOptions.mCompositionOp;
mMessage << ", ";
(*this) << aOptions.mAntialiasMode;
mMessage << ")";
}
return *this;
}
Log &operator<<(const DrawSurfaceOptions& aOptions) {
if (MOZ_UNLIKELY(LogIt())) {
mMessage << "DrawSurfaceOptions(";
(*this) << aOptions.mSamplingFilter;
mMessage << ", ";
(*this) << aOptions.mSamplingBounds;
mMessage << ")";
}
return *this;
}
Log& operator<<(SamplingBounds aBounds) {
if (MOZ_UNLIKELY(LogIt())) {
switch(aBounds) {
case SamplingBounds::UNBOUNDED:
mMessage << "SamplingBounds::UNBOUNDED";
break;
case SamplingBounds::BOUNDED:
mMessage << "SamplingBounds::BOUNDED";
break;
default:
mMessage << "Invalid SamplingBounds (" << (int)aBounds << ")";
break;
}
}
return *this;
}
Log& operator<<(SamplingFilter aFilter) {
if (MOZ_UNLIKELY(LogIt())) {
switch(aFilter) {
case SamplingFilter::GOOD:
mMessage << "SamplingFilter::GOOD";
break;
case SamplingFilter::LINEAR:
mMessage << "SamplingFilter::LINEAR";
break;
case SamplingFilter::POINT:
mMessage << "SamplingFilter::POINT";
break;
default:
mMessage << "Invalid SamplingFilter (" << (int)aFilter << ")";
break;
}
}
return *this;
}
Log& operator<<(AntialiasMode aMode) {
if (MOZ_UNLIKELY(LogIt())) {
switch(aMode) {
case AntialiasMode::NONE:
mMessage << "AntialiasMode::NONE";
break;
case AntialiasMode::GRAY:
mMessage << "AntialiasMode::GRAY";
break;
case AntialiasMode::SUBPIXEL:
mMessage << "AntialiasMode::SUBPIXEL";
break;
case AntialiasMode::DEFAULT:
mMessage << "AntialiasMode::DEFAULT";
break;
default:
mMessage << "Invalid AntialiasMode (" << (int)aMode << ")";
break;
}
}
return *this;
}
Log& operator<<(CompositionOp aOp) {
if (MOZ_UNLIKELY(LogIt())) {
switch(aOp) {
case CompositionOp::OP_OVER:
mMessage << "CompositionOp::OP_OVER";
break;
case CompositionOp::OP_ADD:
mMessage << "CompositionOp::OP_ADD";
break;
case CompositionOp::OP_ATOP:
mMessage << "CompositionOp::OP_ATOP";
break;
case CompositionOp::OP_OUT:
mMessage << "CompositionOp::OP_OUT";
break;
case CompositionOp::OP_IN:
mMessage << "CompositionOp::OP_IN";
break;
case CompositionOp::OP_SOURCE:
mMessage << "CompositionOp::OP_SOURCE";
break;
case CompositionOp::OP_DEST_IN:
mMessage << "CompositionOp::OP_DEST_IN";
break;
case CompositionOp::OP_DEST_OUT:
mMessage << "CompositionOp::OP_DEST_OUT";
break;
case CompositionOp::OP_DEST_OVER:
mMessage << "CompositionOp::OP_DEST_OVER";
break;
case CompositionOp::OP_DEST_ATOP:
mMessage << "CompositionOp::OP_DEST_ATOP";
break;
case CompositionOp::OP_XOR:
mMessage << "CompositionOp::OP_XOR";
break;
case CompositionOp::OP_MULTIPLY:
mMessage << "CompositionOp::OP_MULTIPLY";
break;
case CompositionOp::OP_SCREEN:
mMessage << "CompositionOp::OP_SCREEN";
break;
case CompositionOp::OP_OVERLAY:
mMessage << "CompositionOp::OP_OVERLAY";
break;
case CompositionOp::OP_DARKEN:
mMessage << "CompositionOp::OP_DARKEN";
break;
case CompositionOp::OP_LIGHTEN:
mMessage << "CompositionOp::OP_LIGHTEN";
break;
case CompositionOp::OP_COLOR_DODGE:
mMessage << "CompositionOp::OP_COLOR_DODGE";
break;
case CompositionOp::OP_COLOR_BURN:
mMessage << "CompositionOp::OP_COLOR_BURN";
break;
case CompositionOp::OP_HARD_LIGHT:
mMessage << "CompositionOp::OP_HARD_LIGHT";
break;
case CompositionOp::OP_SOFT_LIGHT:
mMessage << "CompositionOp::OP_SOFT_LIGHT";
break;
case CompositionOp::OP_DIFFERENCE:
mMessage << "CompositionOp::OP_DIFFERENCE";
break;
case CompositionOp::OP_EXCLUSION:
mMessage << "CompositionOp::OP_EXCLUSION";
break;
case CompositionOp::OP_HUE:
mMessage << "CompositionOp::OP_HUE";
break;
case CompositionOp::OP_SATURATION:
mMessage << "CompositionOp::OP_SATURATION";
break;
case CompositionOp::OP_COLOR:
mMessage << "CompositionOp::OP_COLOR";
break;
case CompositionOp::OP_LUMINOSITY:
mMessage << "CompositionOp::OP_LUMINOSITY";
break;
case CompositionOp::OP_COUNT:
mMessage << "CompositionOp::OP_COUNT";
break;
default:
mMessage << "Invalid CompositionOp (" << (int)aOp << ")";
break;
}
}
return *this;
}
Log& operator<<(SurfaceFormat aFormat) {
if (MOZ_UNLIKELY(LogIt())) {
switch(aFormat) {
@ -637,7 +843,10 @@ public:
return *this;
}
if (mStartOfLine) {
mLog << '[' << mPrefix << "] " << std::string(mDepth * INDENT_PER_LEVEL, ' ');
if (!mPrefix.empty()) {
mLog << '[' << mPrefix << "] ";
}
mLog << std::string(mDepth * INDENT_PER_LEVEL, ' ');
mStartOfLine = false;
}
mLog << aObject;

View File

@ -291,6 +291,10 @@ PaintThread::PaintContents(CapturedPaintState* aState,
MOZ_ASSERT(NS_IsMainThread());
MOZ_ASSERT(aState);
if (gfxPrefs::LayersOMTPDumpCapture() && aState->mCapture) {
aState->mCapture->Dump();
}
RefPtr<CompositorBridgeChild> cbc(CompositorBridgeChild::Get());
RefPtr<CapturedPaintState> state(aState);
@ -361,6 +365,10 @@ PaintThread::PaintTiledContents(CapturedTiledPaintState* aState)
MOZ_ASSERT(NS_IsMainThread());
MOZ_ASSERT(aState);
if (gfxPrefs::LayersOMTPDumpCapture() && aState->mCapture) {
aState->mCapture->Dump();
}
RefPtr<CompositorBridgeChild> cbc(CompositorBridgeChild::Get());
RefPtr<CapturedTiledPaintState> state(aState);

View File

@ -8,6 +8,17 @@
#include "mozilla/Likely.h"
#include "mozilla/Types.h" // for decltype
#include "mozilla/layers/SharedSurfacesChild.h"
#ifdef DEBUG
/**
* If defined, this makes SourceSurfaceSharedData::Finalize memory protect the
* underlying shared buffer in the producing process (the content or UI
* process). Given flushing the page table is expensive, and its utility is
* predominantly diagnostic (in case of overrun), turn it off by default.
*/
#define SHARED_SURFACE_PROTECT_FINALIZED
#endif
namespace mozilla {
namespace gfx {
@ -52,7 +63,8 @@ SourceSurfaceSharedDataWrapper::Init(SourceSurfaceSharedData* aSurface)
bool
SourceSurfaceSharedData::Init(const IntSize &aSize,
int32_t aStride,
SurfaceFormat aFormat)
SurfaceFormat aFormat,
bool aShare /* = true */)
{
mSize = aSize;
mStride = aStride;
@ -66,6 +78,10 @@ SourceSurfaceSharedData::Init(const IntSize &aSize,
return false;
}
if (aShare) {
layers::SharedSurfacesChild::Share(this);
}
return true;
}
@ -126,12 +142,11 @@ SourceSurfaceSharedData::CloseHandleInternal()
if (mClosed) {
MOZ_ASSERT(mHandleCount == 0);
MOZ_ASSERT(mFinalized);
MOZ_ASSERT(mShared);
return;
}
if (mFinalized && mShared) {
if (mShared) {
mBuf->CloseHandle();
mClosed = true;
}
@ -143,7 +158,14 @@ SourceSurfaceSharedData::ReallocHandle()
MutexAutoLock lock(mMutex);
MOZ_ASSERT(mHandleCount > 0);
MOZ_ASSERT(mClosed);
MOZ_ASSERT(mFinalized);
if (NS_WARN_IF(!mFinalized)) {
// We haven't finished populating the surface data yet, which means we are
// out of luck, as we have no means of synchronizing with the producer to
// write new data to a new buffer. This should be fairly rare, caused by a
// crash in the GPU process, while we were decoding an image.
return false;
}
size_t len = GetAlignedDataLength();
RefPtr<SharedMemoryBasic> buf = new SharedMemoryBasic();
@ -154,7 +176,9 @@ SourceSurfaceSharedData::ReallocHandle()
size_t copyLen = GetDataLength();
memcpy(buf->memory(), mBuf->memory(), copyLen);
#ifdef SHARED_SURFACE_PROTECT_FINALIZED
buf->Protect(static_cast<char*>(buf->memory()), len, RightsRead);
#endif
if (mMapCount > 0 && !mOldBuf) {
mOldBuf = Move(mBuf);
@ -169,14 +193,14 @@ void
SourceSurfaceSharedData::Finalize()
{
MutexAutoLock lock(mMutex);
MOZ_ASSERT(!mClosed);
MOZ_ASSERT(!mFinalized);
#ifdef SHARED_SURFACE_PROTECT_FINALIZED
size_t len = GetAlignedDataLength();
mBuf->Protect(static_cast<char*>(mBuf->memory()), len, RightsRead);
#endif
mFinalized = true;
CloseHandleInternal();
}
} // namespace gfx

View File

@ -136,9 +136,16 @@ public:
{
}
/**
* Initialize the surface by creating a shared memory buffer with a size
* determined by aSize, aStride and aFormat. If aShare is true, it will also
* immediately attempt to share the surface with the GPU process via
* SharedSurfacesChild.
*/
bool Init(const IntSize& aSize,
int32_t aStride,
SurfaceFormat aFormat);
SurfaceFormat aFormat,
bool aShare = true);
uint8_t* GetData() override
{
@ -235,7 +242,7 @@ public:
/**
* Signals we have finished writing to the buffer and it may be marked as
* read only. May release the handle if possible (see CloseHandleInternal).
* read only.
*/
void Finalize();

View File

@ -184,26 +184,23 @@ SharedSurfacesChild::DestroySharedUserData(void* aClosure)
}
/* static */ nsresult
SharedSurfacesChild::Share(SourceSurfaceSharedData* aSurface,
WebRenderLayerManager* aManager,
wr::IpcResourceUpdateQueue& aResources,
wr::ImageKey& aKey)
SharedSurfacesChild::ShareInternal(SourceSurfaceSharedData* aSurface,
SharedUserData** aUserData)
{
MOZ_ASSERT(NS_IsMainThread());
MOZ_ASSERT(aSurface);
MOZ_ASSERT(aManager);
MOZ_ASSERT(aUserData);
CompositorManagerChild* manager = CompositorManagerChild::GetInstance();
if (NS_WARN_IF(!manager || !manager->CanSend())) {
// We cannot try to share the surface, most likely because the GPU process
// crashed. Ideally, we would retry when it is ready, but the handles may be
// a scarce resource, which can cause much more serious problems if we run
// out. Better to copy into a fresh buffer later.
aSurface->FinishedSharing();
return NS_ERROR_NOT_INITIALIZED;
}
// Each time the surface changes, the producers of SourceSurfaceSharedData
// surfaces promise to increment the invalidation counter each time the
// surface has changed. We can use this counter to determine whether or not
// we should upate our paired ImageKey.
int32_t invalidations = aSurface->Invalidations();
static UserDataKey sSharedKey;
SharedUserData* data =
static_cast<SharedUserData*>(aSurface->GetUserData(&sSharedKey));
@ -215,8 +212,8 @@ SharedSurfacesChild::Share(SourceSurfaceSharedData* aSurface,
// to the GPU process crashing. All previous mappings have been released.
data->SetId(manager->GetNextExternalImageId());
} else if (data->IsShared()) {
// It has already been shared with the GPU process, reuse the id.
aKey = data->UpdateKey(aManager, aResources, invalidations);
// It has already been shared with the GPU process.
*aUserData = data;
return NS_OK;
}
@ -234,7 +231,7 @@ SharedSurfacesChild::Share(SourceSurfaceSharedData* aSurface,
if (pid == base::GetCurrentProcId()) {
SharedSurfacesParent::AddSameProcess(data->Id(), aSurface);
data->MarkShared();
aKey = data->UpdateKey(aManager, aResources, invalidations);
*aUserData = data;
return NS_OK;
}
@ -269,10 +266,73 @@ SharedSurfacesChild::Share(SourceSurfaceSharedData* aSurface,
SurfaceDescriptorShared(aSurface->GetSize(),
aSurface->Stride(),
format, handle));
aKey = data->UpdateKey(aManager, aResources, invalidations);
*aUserData = data;
return NS_OK;
}
/* static */ void
SharedSurfacesChild::Share(SourceSurfaceSharedData* aSurface)
{
MOZ_ASSERT(aSurface);
// The IPDL actor to do sharing can only be accessed on the main thread so we
// need to dispatch if off the main thread. However there is no real danger if
// we end up racing because if it is already shared, this method will do
// nothing.
if (!NS_IsMainThread()) {
class ShareRunnable final : public Runnable
{
public:
explicit ShareRunnable(SourceSurfaceSharedData* aSurface)
: Runnable("SharedSurfacesChild::Share")
, mSurface(aSurface)
{ }
NS_IMETHOD Run() override
{
SharedUserData* unused = nullptr;
SharedSurfacesChild::ShareInternal(mSurface, &unused);
return NS_OK;
}
private:
RefPtr<SourceSurfaceSharedData> mSurface;
};
SystemGroup::Dispatch(TaskCategory::Other,
MakeAndAddRef<ShareRunnable>(aSurface));
return;
}
SharedUserData* unused = nullptr;
SharedSurfacesChild::ShareInternal(aSurface, &unused);
}
/* static */ nsresult
SharedSurfacesChild::Share(SourceSurfaceSharedData* aSurface,
WebRenderLayerManager* aManager,
wr::IpcResourceUpdateQueue& aResources,
wr::ImageKey& aKey)
{
MOZ_ASSERT(NS_IsMainThread());
MOZ_ASSERT(aSurface);
MOZ_ASSERT(aManager);
// Each time the surface changes, the producers of SourceSurfaceSharedData
// surfaces promise to increment the invalidation counter each time the
// surface has changed. We can use this counter to determine whether or not
// we should upate our paired ImageKey.
int32_t invalidations = aSurface->Invalidations();
SharedUserData* data = nullptr;
nsresult rv = SharedSurfacesChild::ShareInternal(aSurface, &data);
if (NS_SUCCEEDED(rv)) {
MOZ_ASSERT(data);
aKey = data->UpdateKey(aManager, aResources, invalidations);
}
return rv;
}
/* static */ nsresult
SharedSurfacesChild::Share(ImageContainer* aContainer,
WebRenderLayerManager* aManager,

View File

@ -31,11 +31,32 @@ class WebRenderLayerManager;
class SharedSurfacesChild final
{
public:
/**
* Request that the surface be mapped into the compositor thread's memory
* space. This is useful for when the caller itself has no present need for
* the surface to be mapped, but knows there will be such a need in the
* future. This may be called from any thread, but it may cause a dispatch to
* the main thread.
*/
static void Share(gfx::SourceSurfaceSharedData* aSurface);
/**
* Request that the surface be mapped into the compositor thread's memory
* space, and a valid ImageKey be generated for it for use with WebRender.
* This must be called from the main thread.
*/
static nsresult Share(gfx::SourceSurfaceSharedData* aSurface,
WebRenderLayerManager* aManager,
wr::IpcResourceUpdateQueue& aResources,
wr::ImageKey& aKey);
/**
* Request that the first surface in the image container's current images be
* mapped into the compositor thread's memory space, and a valid ImageKey be
* generated for it for use with WebRender. If a different method should be
* used to share the image data for this particular container, it will return
* NS_ERROR_NOT_IMPLEMENTED. This must be called from the main thread.
*/
static nsresult Share(ImageContainer* aContainer,
WebRenderLayerManager* aManager,
wr::IpcResourceUpdateQueue& aResources,
@ -48,6 +69,9 @@ private:
class ImageKeyData;
class SharedUserData;
static nsresult ShareInternal(gfx::SourceSurfaceSharedData* aSurface,
SharedUserData** aUserData);
static void Unshare(const wr::ExternalImageId& aId, nsTArray<ImageKeyData>& aKeys);
static void DestroySharedUserData(void* aClosure);
};

View File

@ -615,6 +615,7 @@ private:
DECL_GFX_PREF(Once, "layers.mlgpu.enable-container-resizing", AdvancedLayersEnableContainerResizing, bool, true);
DECL_GFX_PREF(Once, "layers.offmainthreadcomposition.force-disabled", LayersOffMainThreadCompositionForceDisabled, bool, false);
DECL_GFX_PREF(Live, "layers.offmainthreadcomposition.frame-rate", LayersCompositionFrameRate, int32_t,-1);
DECL_GFX_PREF(Live, "layers.omtp.dump-capture", LayersOMTPDumpCapture, bool, false);
DECL_GFX_PREF(Live, "layers.omtp.paint-workers", LayersOMTPPaintWorkers, int32_t, 1);
DECL_GFX_PREF(Live, "layers.omtp.release-capture-on-main-thread", LayersOMTPReleaseCaptureOnMainThread, bool, false);
DECL_GFX_PREF(Live, "layers.orientation.sync.timeout", OrientationSyncMillis, uint32_t, (uint32_t)0);

View File

@ -2912,13 +2912,7 @@ gfxFontGroup::FindFontForChar(uint32_t aCh, uint32_t aPrevCh, uint32_t aNextCh,
// fallback within the family to handle cases where some faces
// such as Italic or Black have reduced character sets compared
// to the family's Regular face.
gfxFontEntry* fe = firstFont->GetFontEntry();
if (!fe->IsNormalStyle()) {
// If style/weight/stretch was not Normal, see if we can
// fall back to a next-best face (e.g. Arial Black -> Bold,
// or Arial Narrow -> Regular).
font = FindFallbackFaceForChar(mFonts[0].Family(), aCh);
}
font = FindFallbackFaceForChar(mFonts[0].Family(), aCh);
}
if (font) {
*aMatchType = gfxTextRange::kFontGroup;
@ -3029,8 +3023,7 @@ gfxFontGroup::FindFontForChar(uint32_t aCh, uint32_t aPrevCh, uint32_t aNextCh,
// fallback to handle styles with reduced character sets (see
// also above).
fe = ff.FontEntry();
if (!fe->mIsUserFontContainer && !fe->IsUserFont() &&
!fe->IsNormalStyle()) {
if (!fe->mIsUserFontContainer && !fe->IsUserFont()) {
font = FindFallbackFaceForChar(ff.Family(), aCh);
if (font) {
*aMatchType = gfxTextRange::kFontGroup;
@ -3423,20 +3416,15 @@ gfxFontGroup::WhichPrefFontSupportsChar(uint32_t aCh, uint32_t aNextCh)
return prefFont;
}
// If we requested a styled font (bold and/or italic), and the char
// was not available, check the regular face as well.
if (!fe->IsNormalStyle()) {
// If style/weight/stretch was not Normal, see if we can
// fall back to a next-best face (e.g. Arial Black -> Bold,
// or Arial Narrow -> Regular).
gfxFont* prefFont = FindFallbackFaceForChar(family, aCh);
if (prefFont) {
mLastPrefFamily = family;
mLastPrefFont = prefFont;
mLastPrefLang = charLang;
mLastPrefFirstFont = (i == 0 && j == 0);
return prefFont;
}
// If the char was not available, see if we can fall back to an
// alternative face in the same family.
gfxFont* prefFont = FindFallbackFaceForChar(family, aCh);
if (prefFont) {
mLastPrefFamily = family;
mLastPrefFont = prefFont;
mLastPrefLang = charLang;
mLastPrefFirstFont = (i == 0 && j == 0);
return prefFont;
}
}
}

View File

@ -577,12 +577,7 @@ RasterImage::GetFrameAtSize(const IntSize& aSize,
#endif
auto result = GetFrameInternal(aSize, Nothing(), aWhichFrame, aFlags);
RefPtr<SourceSurface> surf = mozilla::Get<2>(result).forget();
// If we are here, it suggests the image is embedded in a canvas or some
// other path besides layers, and we won't need the file handle.
MarkSurfaceShared(surf);
return surf.forget();
return mozilla::Get<2>(result).forget();
}
Tuple<ImgDrawResult, IntSize, RefPtr<SourceSurface>>

View File

@ -776,12 +776,7 @@ VectorImage::GetFrameAtSize(const IntSize& aSize,
#endif
auto result = GetFrameInternal(aSize, Nothing(), aWhichFrame, aFlags);
RefPtr<SourceSurface> surf = Get<2>(result).forget();
// If we are here, it suggests the image is embedded in a canvas or some
// other path besides layers, and we won't need the file handle.
MarkSurfaceShared(surf);
return surf.forget();
return Get<2>(result).forget();
}
Tuple<ImgDrawResult, IntSize, RefPtr<SourceSurface>>
@ -1042,10 +1037,6 @@ VectorImage::Draw(gfxContext* aContext,
new gfxSurfaceDrawable(sourceSurface, params.size);
Show(drawable, params);
SendFrameComplete(didCache, params.flags);
// Image got put into a painted layer, it will not be shared with another
// process.
MarkSurfaceShared(sourceSurface);
return ImgDrawResult::SUCCESS;
}

View File

@ -138,19 +138,6 @@ ClearSurface(DataSourceSurface* aSurface, const IntSize& aSize, SurfaceFormat aF
return true;
}
void
MarkSurfaceShared(SourceSurface* aSurface)
{
// Depending on what requested the image decoding, the buffer may or may not
// end up being shared with another process (e.g. put in a painted layer,
// used inside a canvas). If not shared, we should ensure are not keeping the
// handle only because we have yet to share it.
if (aSurface && aSurface->GetType() == SurfaceType::DATA_SHARED) {
auto sharedSurface = static_cast<SourceSurfaceSharedData*>(aSurface);
sharedSurface->FinishedSharing();
}
}
// Returns true if an image of aWidth x aHeight is allowed and legal.
static bool
AllowedImageSize(int32_t aWidth, int32_t aHeight)
@ -586,9 +573,6 @@ bool imgFrame::Draw(gfxContext* aContext, const ImageRegion& aRegion,
aSamplingFilter, aImageFlags, aOpacity);
}
// Image got put into a painted layer, it will not be shared with another
// process.
MarkSurfaceShared(surf);
return true;
}

View File

@ -527,8 +527,6 @@ private:
RefPtr<imgFrame> mFrame;
};
void MarkSurfaceShared(gfx::SourceSurface* aSurface);
} // namespace image
} // namespace mozilla

View File

@ -224,8 +224,39 @@ class BumpChunk : public SingleLinkedListElement<BumpChunk>
sizeof(uintptr_t) == 4 ? uintptr_t(0x4c69666f) : uintptr_t(0x4c69666f42756d70);
#endif
// Poison the memory with memset, in order to catch errors due to
// use-after-free, with undefinedChunkMemory pattern, or to catch
// use-before-init with uninitializedChunkMemory.
#if defined(DEBUG)
# define LIFO_HAVE_MEM_CHECKS 1
// Byte used for poisoning unused memory after releasing memory.
static constexpr int undefinedChunkMemory = 0xcd;
// Byte used for poisoning uninitialized memory after reserving memory.
static constexpr int uninitializedChunkMemory = 0xce;
# define LIFO_MAKE_MEM_NOACCESS(addr, size) \
do { \
uint8_t* base = (addr); \
size_t sz = (size); \
memset(base, undefinedChunkMemory, sz); \
MOZ_MAKE_MEM_NOACCESS(base, sz); \
} while (0)
# define LIFO_MAKE_MEM_UNDEFINED(addr, size) \
do { \
uint8_t* base = (addr); \
size_t sz = (size); \
MOZ_MAKE_MEM_UNDEFINED(base, sz); \
memset(base, uninitializedChunkMemory, sz); \
MOZ_MAKE_MEM_UNDEFINED(base, sz); \
} while(0)
#elif defined(MOZ_HAVE_MEM_CHECKS)
# define LIFO_HAVE_MEM_CHECKS 1
# define LIFO_MAKE_MEM_NOACCESS(addr, size) MOZ_MAKE_MEM_NOACCESS((addr), (size))
# define LIFO_MAKE_MEM_UNDEFINED(addr, size) MOZ_MAKE_MEM_UNDEFINED((addr), (size))
#endif
void assertInvariants() {
MOZ_DIAGNOSTIC_ASSERT(magic_ == magicNumber);
@ -253,6 +284,12 @@ class BumpChunk : public SingleLinkedListElement<BumpChunk>
"Checked that the baked-in value correspond to computed value");
assertInvariants();
#if defined(LIFO_HAVE_MEM_CHECKS)
// The memory is freshly allocated and marked as undefined by the
// allocator of the BumpChunk. Instead, we mark this memory as
// no-access, as it has not been allocated within the BumpChunk.
LIFO_MAKE_MEM_NOACCESS(bump_, capacity_ - bump_);
#endif
}
// Cast |this| into a uint8_t* pointer.
@ -274,22 +311,14 @@ class BumpChunk : public SingleLinkedListElement<BumpChunk>
assertInvariants();
MOZ_ASSERT(begin() <= newBump);
MOZ_ASSERT(newBump <= capacity_);
#if defined(DEBUG) || defined(MOZ_HAVE_MEM_CHECKS)
uint8_t* prev = bump_;
#if defined(LIFO_HAVE_MEM_CHECKS)
// Poison/Unpoison memory that we just free'd/allocated.
if (bump_ > newBump)
LIFO_MAKE_MEM_NOACCESS(newBump, bump_ - newBump);
else if (newBump > bump_)
LIFO_MAKE_MEM_UNDEFINED(bump_, newBump - bump_);
#endif
bump_ = newBump;
#ifdef DEBUG
// Clobber the now-free space.
if (prev > bump_)
memset(bump_, undefinedChunkMemory, prev - bump_);
#endif
#if defined(MOZ_HAVE_MEM_CHECKS)
// Poison/Unpoison memory that we just free'd/allocated.
if (prev > bump_)
MOZ_MAKE_MEM_NOACCESS(bump_, prev - bump_);
else if (bump_ > prev)
MOZ_MAKE_MEM_UNDEFINED(prev, bump_ - prev);
#endif
}
public:

View File

@ -178,14 +178,14 @@ class GCSchedulingTunables
*
* Fraction of threshold.gcBytes() which triggers an incremental GC.
*/
UnprotectedData<float> allocThresholdFactor_;
UnprotectedData<double> allocThresholdFactor_;
/*
* JSGC_ALLOCATION_THRESHOLD_FACTOR_AVOID_INTERRUPT
*
* The same except when doing so would interrupt an already running GC.
*/
UnprotectedData<float> allocThresholdFactorAvoidInterrupt_;
UnprotectedData<double> allocThresholdFactorAvoidInterrupt_;
/*
* Number of bytes to allocate between incremental slices in GCs triggered
@ -256,8 +256,8 @@ class GCSchedulingTunables
size_t maxMallocBytes() const { return maxMallocBytes_; }
size_t gcMaxNurseryBytes() const { return gcMaxNurseryBytes_; }
size_t gcZoneAllocThresholdBase() const { return gcZoneAllocThresholdBase_; }
float allocThresholdFactor() const { return allocThresholdFactor_; }
float allocThresholdFactorAvoidInterrupt() const { return allocThresholdFactorAvoidInterrupt_; }
double allocThresholdFactor() const { return allocThresholdFactor_; }
double allocThresholdFactorAvoidInterrupt() const { return allocThresholdFactorAvoidInterrupt_; }
size_t zoneAllocDelayBytes() const { return zoneAllocDelayBytes_; }
bool isDynamicHeapGrowthEnabled() const { return dynamicHeapGrowthEnabled_; }
uint64_t highFrequencyThresholdUsec() const { return highFrequencyThresholdUsec_; }

View File

@ -49,7 +49,7 @@ class ZoneHeapThreshold
double gcHeapGrowthFactor() const { return gcHeapGrowthFactor_; }
size_t gcTriggerBytes() const { return gcTriggerBytes_; }
double allocTrigger(bool highFrequencyGC) const;
double eagerAllocTrigger(bool highFrequencyGC) const;
void updateAfterGC(size_t lastBytes, JSGCInvocationKind gckind,
const GCSchedulingTunables& tunables, const GCSchedulingState& state,

View File

@ -35,7 +35,8 @@ var template = function (set) {
// If we bailout in the inner loop, then x will have a smaller value
// than the number of iterations.
cont = assertEqIf(lastX > 0, x, set.length);
lastX = x;
if (inIon())
lastX = x;
x = 0;
}
return y;

View File

@ -9838,6 +9838,11 @@ CodeGenerator::linkSharedStubs(JSContext* cx)
bool
CodeGenerator::link(JSContext* cx, CompilerConstraintList* constraints)
{
// We cancel off-thread Ion compilations in a few places during GC, but if
// this compilation was performed off-thread it will already have been
// removed from the relevant lists by this point. Don't allow GC here.
JS::AutoAssertNoGC nogc(cx);
RootedScript script(cx, gen->info().script());
OptimizationLevel optimizationLevel = gen->optimizationInfo().level();
@ -9918,7 +9923,7 @@ CodeGenerator::link(JSContext* cx, CompilerConstraintList* constraints)
// read barriers which were skipped while compiling the script off thread.
Linker linker(masm);
AutoFlushICache afc("IonLink");
JitCode* code = linker.newCode<CanGC>(cx, ION_CODE, !patchableBackedges_.empty());
JitCode* code = linker.newCode<NoGC>(cx, ION_CODE, !patchableBackedges_.empty());
if (!code)
return false;

View File

@ -1855,7 +1855,7 @@ IonBuilder::inspectOpcode(JSOp op)
return jsop_deflexical(GET_UINT32_INDEX(pc));
case JSOP_DEFFUN:
return jsop_deffun(GET_UINT32_INDEX(pc));
return jsop_deffun();
case JSOP_EQ:
case JSOP_NE:
@ -12447,7 +12447,7 @@ IonBuilder::jsop_deflexical(uint32_t index)
}
AbortReasonOr<Ok>
IonBuilder::jsop_deffun(uint32_t index)
IonBuilder::jsop_deffun()
{
MOZ_ASSERT(usesEnvironmentChain());

View File

@ -504,7 +504,7 @@ class IonBuilder
AbortReasonOr<Ok> jsop_setarg(uint32_t arg);
AbortReasonOr<Ok> jsop_defvar(uint32_t index);
AbortReasonOr<Ok> jsop_deflexical(uint32_t index);
AbortReasonOr<Ok> jsop_deffun(uint32_t index);
AbortReasonOr<Ok> jsop_deffun();
AbortReasonOr<Ok> jsop_notearg();
AbortReasonOr<Ok> jsop_throwsetconst();
AbortReasonOr<Ok> jsop_checklexical();

View File

@ -738,8 +738,7 @@ class LNode
size_t numTemps() const {
return numTemps_;
}
virtual LDefinition* getTemp(size_t index) = 0;
virtual void setTemp(size_t index, const LDefinition& a) = 0;
inline LDefinition* getTemp(size_t index);
// Returns the number of successors of this instruction, if it is a control
// transfer instruction, or zero otherwise.
@ -968,12 +967,11 @@ class LPhi final : public LNode
MOZ_ASSERT(index < numOperands());
inputs_[index] = a;
}
LDefinition* getTemp(size_t index) override {
MOZ_CRASH("no temps");
}
void setTemp(size_t index, const LDefinition& temp) override {
MOZ_CRASH("no temps");
}
// Phis don't have temps, so calling numTemps/getTemp is pointless.
size_t numTemps() const = delete;
LDefinition* getTemp(size_t index) = delete;
size_t numSuccessors() const override {
return 0;
}
@ -1086,8 +1084,7 @@ namespace details {
template <size_t Defs, size_t Temps>
class LInstructionFixedDefsTempsHelper : public LInstruction
{
mozilla::Array<LDefinition, Defs> defs_;
mozilla::Array<LDefinition, Temps> temps_;
mozilla::Array<LDefinition, Defs + Temps> defsAndTemps_;
protected:
explicit LInstructionFixedDefsTempsHelper(uint32_t numOperands)
@ -1096,24 +1093,28 @@ namespace details {
public:
LDefinition* getDef(size_t index) final override {
return &defs_[index];
MOZ_ASSERT(index < Defs);
return &defsAndTemps_[index];
}
LDefinition* getTemp(size_t index) final override {
return &temps_[index];
LDefinition* getTemp(size_t index) {
MOZ_ASSERT(index < Temps);
return &defsAndTemps_[Defs + index];
}
void setDef(size_t index, const LDefinition& def) final override {
defs_[index] = def;
MOZ_ASSERT(index < Defs);
defsAndTemps_[index] = def;
}
void setTemp(size_t index, const LDefinition& a) final override {
temps_[index] = a;
void setTemp(size_t index, const LDefinition& a) {
MOZ_ASSERT(index < Temps);
defsAndTemps_[Defs + index] = a;
}
void setInt64Temp(size_t index, const LInt64Definition& a) {
#if JS_BITS_PER_WORD == 32
temps_[index] = a.low();
temps_[index + 1] = a.high();
setTemp(index, a.low());
setTemp(index + 1, a.high());
#else
temps_[index] = a.value();
setTemp(index, a.value());
#endif
}
@ -1121,11 +1122,10 @@ namespace details {
return 0;
}
MBasicBlock* getSuccessor(size_t i) const override {
MOZ_ASSERT(false);
return nullptr;
MOZ_CRASH("no successors");
}
void setSuccessor(size_t i, MBasicBlock* successor) override {
MOZ_ASSERT(false);
MOZ_CRASH("no successors");
}
// Default accessors, assuming a single input and output, respectively.
@ -1137,9 +1137,22 @@ namespace details {
MOZ_ASSERT(numDefs() == 1);
return getDef(0);
}
static size_t offsetOfTemp(uint32_t numDefs, uint32_t index) {
using T = LInstructionFixedDefsTempsHelper<0, 0>;
return offsetof(T, defsAndTemps_) + (numDefs + index) * sizeof(LDefinition);
}
};
} // namespace details
inline LDefinition*
LNode::getTemp(size_t index)
{
MOZ_ASSERT(index < numTemps());
using T = details::LInstructionFixedDefsTempsHelper<0, 0>;
uint8_t* p = reinterpret_cast<uint8_t*>(this) + T::offsetOfTemp(numDefs(), index);
return reinterpret_cast<LDefinition*>(p);
}
template <size_t Defs, size_t Operands, size_t Temps>
class LInstructionHelper : public details::LInstructionFixedDefsTempsHelper<Defs, Temps>
{

View File

@ -4659,12 +4659,12 @@ LIRGenerator::visitWasmCall(MWasmCall* ins)
}
LInstruction* lir;
if (ins->type() == MIRType::Int64) {
if (ins->type() == MIRType::Int64)
lir = new(alloc()) LWasmCallI64(args, ins->numOperands(), needsBoundsCheck);
} else {
uint32_t numDefs = (ins->type() != MIRType::None) ? 1 : 0;
lir = new(alloc()) LWasmCall(args, ins->numOperands(), numDefs, needsBoundsCheck);
}
else if (ins->type() == MIRType::None)
lir = new(alloc()) LWasmCallVoid(args, ins->numOperands(), needsBoundsCheck);
else
lir = new(alloc()) LWasmCall(args, ins->numOperands(), needsBoundsCheck);
if (ins->type() == MIRType::None)
add(lir, ins);

View File

@ -236,7 +236,6 @@ class CodeGeneratorARM : public CodeGeneratorShared
void visitAtomicExchangeTypedArrayElement(LAtomicExchangeTypedArrayElement* lir) override;
void visitWasmSelect(LWasmSelect* ins) override;
void visitWasmReinterpret(LWasmReinterpret* ins) override;
void emitWasmCall(LWasmCallBase* ins);
void visitWasmLoad(LWasmLoad* ins) override;
void visitWasmLoadI64(LWasmLoadI64* ins) override;
void visitWasmUnalignedLoad(LWasmUnalignedLoad* ins) override;

View File

@ -1604,14 +1604,14 @@ Simulator::handleWasmInterrupt()
uint8_t* pc = (uint8_t*)get_pc();
const wasm::CodeSegment* cs = nullptr;
if (!wasm::InInterruptibleCode(cx_, pc, &cs))
const wasm::ModuleSegment* ms = nullptr;
if (!wasm::InInterruptibleCode(cx_, pc, &ms))
return;
if (!startWasmInterrupt(cx_->activation()->asJit()))
return;
set_pc(int32_t(cs->interruptCode()));
set_pc(int32_t(ms->interruptCode()));
}
static inline JitActivation*
@ -1641,10 +1641,11 @@ Simulator::handleWasmSegFault(int32_t addr, unsigned numBytes)
uint8_t* fp = reinterpret_cast<uint8_t*>(get_register(r11));
const wasm::CodeSegment* segment = wasm::LookupCodeSegment(pc);
if (!segment)
if (!segment || !segment->isModule())
return false;
const wasm::ModuleSegment* moduleSegment = segment->asModule();
wasm::Instance* instance = wasm::LookupFaultingInstance(*segment, pc, fp);
wasm::Instance* instance = wasm::LookupFaultingInstance(*moduleSegment, pc, fp);
if (!instance || !instance->memoryAccessInGuardRegion((uint8_t*)addr, numBytes))
return false;
@ -1653,12 +1654,12 @@ Simulator::handleWasmSegFault(int32_t addr, unsigned numBytes)
MOZ_ALWAYS_TRUE(startWasmInterrupt(act));
if (!instance->code().containsCodePC(pc))
MOZ_CRASH("Cannot map PC to trap handler");
set_pc(int32_t(segment->outOfBoundsCode()));
set_pc(int32_t(moduleSegment->outOfBoundsCode()));
return true;
}
MOZ_ASSERT(memoryAccess->hasTrapOutOfLineCode());
set_pc(int32_t(memoryAccess->trapOutOfLineCode(segment->base())));
set_pc(int32_t(memoryAccess->trapOutOfLineCode(moduleSegment->base())));
return true;
}
@ -1673,16 +1674,17 @@ Simulator::handleWasmIllFault()
uint8_t* fp = reinterpret_cast<uint8_t*>(get_register(r11));
const wasm::CodeSegment* segment = wasm::LookupCodeSegment(pc);
if (!segment)
if (!segment || !segment->isModule())
return false;
const wasm::ModuleSegment* moduleSegment = segment->asModule();
wasm::Trap trap;
wasm::BytecodeOffset bytecode;
if (!segment->code().lookupTrap(pc, &trap, &bytecode))
if (!moduleSegment->code().lookupTrap(pc, &trap, &bytecode))
return false;
act->startWasmTrap(trap, bytecode.offset, pc, fp);
set_pc(int32_t(segment->trapCode()));
set_pc(int32_t(moduleSegment->trapCode()));
return true;
}
@ -4920,8 +4922,8 @@ FakeInterruptHandler()
JSContext* cx = TlsContext.get();
uint8_t* pc = cx->simulator()->get_pc_as<uint8_t*>();
const wasm::CodeSegment* cs = nullptr;
if (!wasm::InInterruptibleCode(cx, pc, &cs))
const wasm::ModuleSegment* ms= nullptr;
if (!wasm::InInterruptibleCode(cx, pc, &ms))
return;
cx->simulator()->trigger_wasm_interrupt();

View File

@ -244,8 +244,8 @@ void Simulator::handle_wasm_interrupt() {
uint8_t* pc = (uint8_t*)get_pc();
uint8_t* fp = (uint8_t*)xreg(30);
const js::wasm::CodeSegment* cs = nullptr;
if (!js::wasm::InInterruptibleCode(cx_, pc, &cs))
const js::wasm::ModuleSegment* ms = nullptr;
if (!js::wasm::InInterruptibleCode(cx_, pc, &ms))
return;
JS::ProfilingFrameIterator::RegisterState state;
@ -257,7 +257,7 @@ void Simulator::handle_wasm_interrupt() {
if (!cx_->activation_->asJit()->startWasmInterrupt(state))
return;
set_pc((Instruction*)cs->interruptCode());
set_pc((Instruction*)ms->interruptCode());
}

View File

@ -1646,11 +1646,11 @@ Simulator::handleWasmInterrupt()
JitActivation* activation = TlsContext.get()->activation()->asJit();
const wasm::CodeSegment* segment = wasm::LookupCodeSegment(pc);
if (!segment || !segment->containsCodePC(pc))
if (!segment || !segment->isModule() || !segment->containsCodePC(pc))
return;
startInterrupt(activation);
set_pc(int32_t(segment->interruptCode()));
set_pc(int32_t(segment->asModule()->interruptCode()));
}
@ -1675,10 +1675,11 @@ Simulator::handleWasmFault(int32_t addr, unsigned numBytes)
uint8_t* fp = reinterpret_cast<uint8_t*>(getRegister(Register::fp));
const wasm::CodeSegment* segment = wasm::LookupCodeSegment(pc);
if (!segment)
if (!segment || !segment->isModule())
return false;
const wasm::ModuleSegment* moduleSegment = segment->asModule();
wasm::Instance* instance = wasm::LookupFaultingInstance(*segment, pc, fp);
wasm::Instance* instance = wasm::LookupFaultingInstance(*moduleSegment, pc, fp);
if (!instance || !instance->memoryAccessInGuardRegion((uint8_t*)addr, numBytes))
return false;
@ -1689,12 +1690,12 @@ Simulator::handleWasmFault(int32_t addr, unsigned numBytes)
startInterrupt(act);
if (!instance->code().containsCodePC(pc))
MOZ_CRASH("Cannot map PC to trap handler");
set_pc(int32_t(segment->outOfBoundsCode()));
set_pc(int32_t(moduleSegment->outOfBoundsCode()));
return true;
}
MOZ_ASSERT(memoryAccess->hasTrapOutOfLineCode());
set_pc(int32_t(memoryAccess->trapOutOfLineCode(segment->base())));
set_pc(int32_t(memoryAccess->trapOutOfLineCode(moduleSegment->base())));
return true;
}
@ -1713,16 +1714,17 @@ Simulator::handleWasmTrapFault()
uint8_t* fp = reinterpret_cast<uint8_t*>(getRegister(Register::fp));
const wasm::CodeSegment* segment = wasm::LookupCodeSegment(pc);
if (!segment)
if (!segment || !segment->isModule())
return false;
const wasm::ModuleSegment* moduleSegment = segment->asModule();
wasm::Trap trap;
wasm::BytecodeOffset bytecode;
if (!segment->code().lookupTrap(pc, &trap, &bytecode))
if (!moduleSegment->code().lookupTrap(pc, &trap, &bytecode))
return false;
act->startWasmTrap(trap, bytecode.offset, pc, fp);
set_pc(int32_t(segment->trapCode()));
set_pc(int32_t(moduleSegment->trapCode()));
return true;
}
@ -3639,8 +3641,8 @@ FakeInterruptHandler()
JSContext* cx = TlsContext.get();
uint8_t* pc = cx->simulator()->get_pc_as<uint8_t*>();
const wasm::CodeSegment* cs = nullptr;
if (!wasm::InInterruptibleCode(cx, pc, &cs))
const wasm::ModuleSegment* ms = nullptr;
if (!wasm::InInterruptibleCode(cx, pc, &ms))
return;
cx->simulator()->trigger_wasm_interrupt();

View File

@ -1644,7 +1644,7 @@ Simulator::handleWasmInterrupt()
JitActivation* activation = TlsContext.get()->activation()->asJit();
const wasm::CodeSegment* segment = wasm::LookupCodeSegment(pc);
if (!segment || !segment->containsCodePC(pc))
if (!segment || !segment->isModule() || !segment->containsCodePC(pc))
return;
// fp can be null during the prologue/epilogue of the entry function.
@ -1652,7 +1652,7 @@ Simulator::handleWasmInterrupt()
return;
startInterrupt(activation);
set_pc(int64_t(segment->interruptCode()));
set_pc(int64_t(segment->asModule()->interruptCode()));
}
// WebAssembly memories contain an extra region of guard pages (see
@ -1676,10 +1676,11 @@ Simulator::handleWasmFault(uint64_t addr, unsigned numBytes)
uint8_t* fp = reinterpret_cast<uint8_t*>(getRegister(Register::fp));
const wasm::CodeSegment* segment = wasm::LookupCodeSegment(pc);
if (!segment)
if (!segment || !segment->isModule())
return false;
const wasm::ModuleSegment* moduleSegment = segment->asModule();
wasm::Instance* instance = wasm::LookupFaultingInstance(*segment, pc, fp);
wasm::Instance* instance = wasm::LookupFaultingInstance(*moduleSegment, pc, fp);
if (!instance || !instance->memoryAccessInGuardRegion((uint8_t*)addr, numBytes))
return false;
@ -1690,12 +1691,12 @@ Simulator::handleWasmFault(uint64_t addr, unsigned numBytes)
startInterrupt(act);
if (!instance->code().containsCodePC(pc))
MOZ_CRASH("Cannot map PC to trap handler");
set_pc(int64_t(segment->outOfBoundsCode()));
set_pc(int64_t(moduleSegment->outOfBoundsCode()));
return true;
}
MOZ_ASSERT(memoryAccess->hasTrapOutOfLineCode());
set_pc(int64_t(memoryAccess->trapOutOfLineCode(segment->base())));
set_pc(int64_t(memoryAccess->trapOutOfLineCode(moduleSegment->base())));
return true;
}
@ -1714,16 +1715,17 @@ Simulator::handleWasmTrapFault()
uint8_t* fp = reinterpret_cast<uint8_t*>(getRegister(Register::fp));
const wasm::CodeSegment* segment = wasm::LookupCodeSegment(pc);
if (!segment)
if (!segment || !segment->isModule())
return false;
const wasm::ModuleSegment* moduleSegment = segment->asModule();
wasm::Trap trap;
wasm::BytecodeOffset bytecode;
if (!segment->code().lookupTrap(pc, &trap, &bytecode))
if (!moduleSegment->code().lookupTrap(pc, &trap, &bytecode))
return false;
act->startWasmTrap(trap, bytecode.offset, pc, fp);
set_pc(int64_t(segment->trapCode()));
set_pc(int64_t(moduleSegment->trapCode()));
return true;
}
@ -4046,8 +4048,8 @@ FakeInterruptHandler()
JSContext* cx = TlsContext.get();
uint8_t* pc = cx->simulator()->get_pc_as<uint8_t*>();
const wasm::CodeSegment* cs = nullptr;
if (!wasm::InInterruptibleCode(cx, pc, &cs))
const wasm::ModuleSegment* ms = nullptr;
if (!wasm::InInterruptibleCode(cx, pc, &ms))
return;
cx->simulator()->trigger_wasm_interrupt();

View File

@ -1492,10 +1492,8 @@ CodeGeneratorShared::omitOverRecursedCheck() const
}
void
CodeGeneratorShared::emitWasmCallBase(LWasmCallBase* ins)
CodeGeneratorShared::emitWasmCallBase(MWasmCall* mir, bool needsBoundsCheck)
{
MWasmCall* mir = ins->mir();
if (mir->spIncrement())
masm.freeStack(mir->spIncrement());
@ -1528,7 +1526,7 @@ CodeGeneratorShared::emitWasmCallBase(LWasmCallBase* ins)
break;
case wasm::CalleeDesc::AsmJSTable:
case wasm::CalleeDesc::WasmTable:
masm.wasmCallIndirect(desc, callee, ins->needsBoundsCheck());
masm.wasmCallIndirect(desc, callee, needsBoundsCheck);
reloadRegs = callee.which() == wasm::CalleeDesc::WasmTable && callee.wasmTableIsExternal();
break;
case wasm::CalleeDesc::Builtin:

View File

@ -345,9 +345,16 @@ class CodeGeneratorShared : public LElementVisitor
void emitTruncateDouble(FloatRegister src, Register dest, MTruncateToInt32* mir);
void emitTruncateFloat32(FloatRegister src, Register dest, MTruncateToInt32* mir);
void emitWasmCallBase(LWasmCallBase* ins);
void visitWasmCall(LWasmCall* ins) override { emitWasmCallBase(ins); }
void visitWasmCallI64(LWasmCallI64* ins) override { emitWasmCallBase(ins); }
void emitWasmCallBase(MWasmCall* mir, bool needsBoundsCheck);
void visitWasmCall(LWasmCall* ins) override {
emitWasmCallBase(ins->mir(), ins->needsBoundsCheck());
}
void visitWasmCallVoid(LWasmCallVoid* ins) override {
emitWasmCallBase(ins->mir(), ins->needsBoundsCheck());
}
void visitWasmCallI64(LWasmCallI64* ins) override {
emitWasmCallBase(ins->mir(), ins->needsBoundsCheck());
}
void visitWasmLoadGlobalVar(LWasmLoadGlobalVar* ins) override;
void visitWasmStoreGlobalVar(LWasmStoreGlobalVar* ins) override;

View File

@ -9010,24 +9010,25 @@ class LWasmStackArgI64 : public LInstructionHelper<0, INT64_PIECES, 0>
}
};
class LWasmCallBase : public LInstruction
template <size_t Defs>
class LWasmCallBase : public details::LInstructionFixedDefsTempsHelper<Defs, 0>
{
using Base = details::LInstructionFixedDefsTempsHelper<Defs, 0>;
LAllocation* operands_;
uint32_t needsBoundsCheck_;
public:
LWasmCallBase(LAllocation* operands, uint32_t numOperands, uint32_t numDefs,
bool needsBoundsCheck)
: LInstruction(numOperands, numDefs, /* numTemps = */ 0),
LWasmCallBase(LAllocation* operands, uint32_t numOperands, bool needsBoundsCheck)
: Base(numOperands),
operands_(operands),
needsBoundsCheck_(needsBoundsCheck)
{
setIsCall();
this->setIsCall();
}
MWasmCall* mir() const {
return mir_->toWasmCall();
return this->mir_->toWasmCall();
}
static bool isCallPreserved(AnyRegister reg) {
@ -9041,79 +9042,48 @@ class LWasmCallBase : public LInstruction
// LInstruction interface
LAllocation* getOperand(size_t index) override {
MOZ_ASSERT(index < numOperands());
MOZ_ASSERT(index < this->numOperands());
return &operands_[index];
}
void setOperand(size_t index, const LAllocation& a) override {
MOZ_ASSERT(index < numOperands());
MOZ_ASSERT(index < this->numOperands());
operands_[index] = a;
}
LDefinition* getTemp(size_t index) override {
MOZ_CRASH("no temps");
}
void setTemp(size_t index, const LDefinition& a) override {
MOZ_CRASH("no temps");
}
size_t numSuccessors() const override {
return 0;
}
MBasicBlock* getSuccessor(size_t i) const override {
MOZ_CRASH("no successors");
}
void setSuccessor(size_t i, MBasicBlock*) override {
MOZ_CRASH("no successors");
}
bool needsBoundsCheck() const {
return needsBoundsCheck_;
}
};
class LWasmCall : public LWasmCallBase
class LWasmCall : public LWasmCallBase<1>
{
LDefinition def_;
public:
LIR_HEADER(WasmCall);
LWasmCall(LAllocation* operands, uint32_t numOperands, uint32_t numDefs, bool needsBoundsCheck)
: LWasmCallBase(operands, numOperands, numDefs, needsBoundsCheck),
def_(LDefinition::BogusTemp())
{}
// LInstruction interface
LDefinition* getDef(size_t index) override {
MOZ_ASSERT(numDefs() == 1);
MOZ_ASSERT(index == 0);
return &def_;
}
void setDef(size_t index, const LDefinition& def) override {
MOZ_ASSERT(index == 0);
def_ = def;
LWasmCall(LAllocation* operands, uint32_t numOperands, bool needsBoundsCheck)
: LWasmCallBase(operands, numOperands, needsBoundsCheck)
{
}
};
class LWasmCallI64 : public LWasmCallBase
class LWasmCallVoid : public LWasmCallBase<0>
{
LDefinition defs_[INT64_PIECES];
public:
LIR_HEADER(WasmCallVoid);
LWasmCallVoid(LAllocation* operands, uint32_t numOperands, bool needsBoundsCheck)
: LWasmCallBase(operands, numOperands, needsBoundsCheck)
{
}
};
class LWasmCallI64 : public LWasmCallBase<INT64_PIECES>
{
public:
LIR_HEADER(WasmCallI64);
LWasmCallI64(LAllocation* operands, uint32_t numOperands, bool needsBoundsCheck)
: LWasmCallBase(operands, numOperands, INT64_PIECES, needsBoundsCheck)
: LWasmCallBase(operands, numOperands, needsBoundsCheck)
{
for (size_t i = 0; i < numDefs(); i++)
defs_[i] = LDefinition::BogusTemp();
}
// LInstruction interface
LDefinition* getDef(size_t index) override {
MOZ_ASSERT(index < numDefs());
return &defs_[index];
}
void setDef(size_t index, const LDefinition& def) override {
MOZ_ASSERT(index < numDefs());
defs_[index] = def;
}
};
@ -9121,9 +9091,10 @@ inline bool
LNode::isCallPreserved(AnyRegister reg) const
{
switch (op()) {
case LOp_WasmCallI64:
case LOp_WasmCall:
return LWasmCallBase::isCallPreserved(reg);
case LOp_WasmCallVoid:
case LOp_WasmCallI64:
return LWasmCallBase<0>::isCallPreserved(reg);
default:
return false;
}

View File

@ -460,6 +460,7 @@
_(WasmStackArg) \
_(WasmStackArgI64) \
_(WasmCall) \
_(WasmCallVoid) \
_(WasmCallI64) \
_(WasmUint32ToDouble) \
_(WasmUint32ToFloat32)

View File

@ -285,16 +285,16 @@ namespace TuningDefaults {
static const size_t MaxMallocBytes = 128 * 1024 * 1024;
/* JSGC_ALLOCATION_THRESHOLD_FACTOR */
static const float AllocThresholdFactor = 0.9f;
static const double AllocThresholdFactor = 0.9;
/* JSGC_ALLOCATION_THRESHOLD_FACTOR_AVOID_INTERRUPT */
static const float AllocThresholdFactorAvoidInterrupt = 0.9f;
static const double AllocThresholdFactorAvoidInterrupt = 0.9;
/* no parameter */
static const float MallocThresholdGrowFactor = 1.5f;
static const double MallocThresholdGrowFactor = 1.5;
/* no parameter */
static const float MallocThresholdShrinkFactor = 0.9f;
static const double MallocThresholdShrinkFactor = 0.9;
/* no parameter */
static const size_t MallocThresholdLimit = 1024 * 1024 * 1024;
@ -343,6 +343,31 @@ namespace TuningDefaults {
}}} // namespace js::gc::TuningDefaults
/*
* We start to incremental collection for a zone when a proportion of its
* threshold is reached. This is configured by the
* JSGC_ALLOCATION_THRESHOLD_FACTOR and
* JSGC_ALLOCATION_THRESHOLD_FACTOR_AVOID_INTERRUPT parameters.
*/
static const double MinAllocationThresholdFactor = 0.9;
/*
* We may start to collect a zone before its trigger threshold is reached if
* GCRuntime::maybeGC() is called for that zone or we start collecting other
* zones. These eager threshold factors are not configurable.
*/
static const double HighFrequencyEagerAllocTriggerFactor = 0.85;
static const double LowFrequencyEagerAllocTriggerFactor = 0.9;
/*
* Don't allow heap growth factors to be set so low that collections could
* reduce the trigger threshold.
*/
static const double MinHighFrequencyHeapGrowthFactor =
1.0 / Min(HighFrequencyEagerAllocTriggerFactor, MinAllocationThresholdFactor);
static const double MinLowFrequencyHeapGrowthFactor =
1.0 / Min(LowFrequencyEagerAllocTriggerFactor, MinAllocationThresholdFactor);
/* Increase the IGC marking slice time if we are in highFrequencyGC mode. */
static const int IGC_MARK_SLICE_MULTIPLIER = 2;
@ -1322,21 +1347,21 @@ GCSchedulingTunables::setParameter(JSGCParamKey key, uint32_t value, const AutoL
}
case JSGC_HIGH_FREQUENCY_HEAP_GROWTH_MAX: {
double newGrowth = value / 100.0;
if (newGrowth <= 0.85 || newGrowth > MaxHeapGrowthFactor)
if (newGrowth < MinHighFrequencyHeapGrowthFactor || newGrowth > MaxHeapGrowthFactor)
return false;
setHighFrequencyHeapGrowthMax(newGrowth);
break;
}
case JSGC_HIGH_FREQUENCY_HEAP_GROWTH_MIN: {
double newGrowth = value / 100.0;
if (newGrowth <= 0.85 || newGrowth > MaxHeapGrowthFactor)
if (newGrowth < MinHighFrequencyHeapGrowthFactor || newGrowth > MaxHeapGrowthFactor)
return false;
setHighFrequencyHeapGrowthMin(newGrowth);
break;
}
case JSGC_LOW_FREQUENCY_HEAP_GROWTH: {
double newGrowth = value / 100.0;
if (newGrowth <= 0.9 || newGrowth > MaxHeapGrowthFactor)
if (newGrowth < MinLowFrequencyHeapGrowthFactor || newGrowth > MaxHeapGrowthFactor)
return false;
setLowFrequencyHeapGrowth(newGrowth);
break;
@ -1351,16 +1376,20 @@ GCSchedulingTunables::setParameter(JSGCParamKey key, uint32_t value, const AutoL
gcZoneAllocThresholdBase_ = value * 1024 * 1024;
break;
case JSGC_ALLOCATION_THRESHOLD_FACTOR: {
float newFactor = value / 100.0;
if (newFactor <= 0.1 || newFactor > 1.0)
double newFactor = value / 100.0;
if (newFactor < MinAllocationThresholdFactor || newFactor > 1.0) {
fprintf(stderr, "alloc factor %f %f\n", newFactor, MinAllocationThresholdFactor);
return false;
}
allocThresholdFactor_ = newFactor;
break;
}
case JSGC_ALLOCATION_THRESHOLD_FACTOR_AVOID_INTERRUPT: {
float newFactor = value / 100.0;
if (newFactor <= 0.1 || newFactor > 1.0)
double newFactor = value / 100.0;
if (newFactor < MinAllocationThresholdFactor || newFactor > 1.0) {
fprintf(stderr, "alloc factor %f %f\n", newFactor, MinAllocationThresholdFactor);
return false;
}
allocThresholdFactorAvoidInterrupt_ = newFactor;
break;
}
@ -1407,7 +1436,7 @@ GCSchedulingTunables::setHighFrequencyHeapGrowthMin(double value)
highFrequencyHeapGrowthMin_ = value;
if (highFrequencyHeapGrowthMin_ > highFrequencyHeapGrowthMax_)
highFrequencyHeapGrowthMax_ = highFrequencyHeapGrowthMin_;
MOZ_ASSERT(highFrequencyHeapGrowthMin_ / 0.85 > 1.0);
MOZ_ASSERT(highFrequencyHeapGrowthMin_ >= MinHighFrequencyHeapGrowthFactor);
MOZ_ASSERT(highFrequencyHeapGrowthMin_ <= highFrequencyHeapGrowthMax_);
}
@ -1417,7 +1446,7 @@ GCSchedulingTunables::setHighFrequencyHeapGrowthMax(double value)
highFrequencyHeapGrowthMax_ = value;
if (highFrequencyHeapGrowthMax_ < highFrequencyHeapGrowthMin_)
highFrequencyHeapGrowthMin_ = highFrequencyHeapGrowthMax_;
MOZ_ASSERT(highFrequencyHeapGrowthMax_ / 0.85 > 1.0);
MOZ_ASSERT(highFrequencyHeapGrowthMin_ >= MinHighFrequencyHeapGrowthFactor);
MOZ_ASSERT(highFrequencyHeapGrowthMin_ <= highFrequencyHeapGrowthMax_);
}
@ -1425,7 +1454,7 @@ void
GCSchedulingTunables::setLowFrequencyHeapGrowth(double value)
{
lowFrequencyHeapGrowth_ = value;
MOZ_ASSERT(lowFrequencyHeapGrowth_ / 0.9 > 1.0);
MOZ_ASSERT(lowFrequencyHeapGrowth_ >= MinLowFrequencyHeapGrowthFactor);
}
void
@ -1826,9 +1855,11 @@ GCRuntime::setMaxMallocBytes(size_t value, const AutoLockGC& lock)
}
double
ZoneHeapThreshold::allocTrigger(bool highFrequencyGC) const
ZoneHeapThreshold::eagerAllocTrigger(bool highFrequencyGC) const
{
return (highFrequencyGC ? 0.85 : 0.9) * gcTriggerBytes();
double eagerTriggerFactor = highFrequencyGC ? HighFrequencyEagerAllocTriggerFactor
: LowFrequencyEagerAllocTriggerFactor;
return eagerTriggerFactor * gcTriggerBytes();
}
/* static */ double
@ -3229,7 +3260,7 @@ GCRuntime::maybeAllocTriggerZoneGC(Zone* zone, const AutoLockGC& lock)
}
bool wouldInterruptCollection = isIncrementalGCInProgress() && !zone->isCollecting();
float zoneGCThresholdFactor =
double zoneGCThresholdFactor =
wouldInterruptCollection ? tunables.allocThresholdFactorAvoidInterrupt()
: tunables.allocThresholdFactor();
@ -3308,7 +3339,7 @@ GCRuntime::maybeGC(Zone* zone)
if (gcIfRequested())
return;
double threshold = zone->threshold.allocTrigger(schedulingState.inHighFrequencyGCMode());
double threshold = zone->threshold.eagerAllocTrigger(schedulingState.inHighFrequencyGCMode());
double usedBytes = zone->usage.gcBytes();
if (usedBytes > 1024 * 1024 && usedBytes >= threshold &&
!isIncrementalGCInProgress() && !isBackgroundSweeping())
@ -7198,11 +7229,9 @@ class AutoScheduleZonesForGC
zone->scheduleGC();
// This is a heuristic to reduce the total number of collections.
if (zone->usage.gcBytes() >=
zone->threshold.allocTrigger(rt->gc.schedulingState.inHighFrequencyGCMode()))
{
bool inHighFrequencyMode = rt->gc.schedulingState.inHighFrequencyGCMode();
if (zone->usage.gcBytes() >= zone->threshold.eagerAllocTrigger(inHighFrequencyMode))
zone->scheduleGC();
}
// This ensures we collect zones that have reached the malloc limit.
if (zone->shouldTriggerGCForTooMuchMalloc())
@ -8757,7 +8786,7 @@ ZoneGCAllocTriggerGetter(JSContext* cx, unsigned argc, Value* vp)
{
CallArgs args = CallArgsFromVp(argc, vp);
bool highFrequency = cx->runtime()->gc.schedulingState.inHighFrequencyGCMode();
args.rval().setNumber(double(cx->zone()->threshold.allocTrigger(highFrequency)));
args.rval().setNumber(double(cx->zone()->threshold.eagerAllocTrigger(highFrequency)));
return true;
}

View File

@ -39,6 +39,21 @@ using mozilla::BinarySearch;
using mozilla::MakeEnumeratedRange;
using JS::GenericNaN;
bool
CodeSegment::registerInProcessMap()
{
if (!RegisterCodeSegment(this))
return false;
registered_ = true;
return true;
}
CodeSegment::~CodeSegment()
{
if (registered_)
UnregisterCodeSegment(this);
}
static uint32_t
RoundupCodeLength(uint32_t codeLength)
{
@ -48,7 +63,7 @@ RoundupCodeLength(uint32_t codeLength)
return JS_ROUNDUP(codeLength, ExecutableCodePageSize);
}
/* static */ CodeSegment::UniqueCodeBytes
/* static */ UniqueCodeBytes
CodeSegment::AllocateCodeBytes(uint32_t codeLength)
{
codeLength = RoundupCodeLength(codeLength);
@ -75,7 +90,7 @@ CodeSegment::AllocateCodeBytes(uint32_t codeLength)
}
void
CodeSegment::FreeCode::operator()(uint8_t* bytes)
FreeCode::operator()(uint8_t* bytes)
{
MOZ_ASSERT(codeLength);
MOZ_ASSERT(codeLength == RoundupCodeLength(codeLength));
@ -87,12 +102,12 @@ CodeSegment::FreeCode::operator()(uint8_t* bytes)
}
static bool
StaticallyLink(const CodeSegment& cs, const LinkDataTier& linkData)
StaticallyLink(const ModuleSegment& ms, const LinkDataTier& linkData)
{
for (LinkDataTier::InternalLink link : linkData.internalLinks) {
CodeOffset patchAt(link.patchAtOffset);
CodeOffset target(link.targetOffset);
Assembler::Bind(cs.base(), patchAt, target);
Assembler::Bind(ms.base(), patchAt, target);
}
if (!EnsureBuiltinThunksInitialized())
@ -105,7 +120,7 @@ StaticallyLink(const CodeSegment& cs, const LinkDataTier& linkData)
void* target = SymbolicAddressTarget(imm);
for (uint32_t offset : offsets) {
uint8_t* patchAt = cs.base() + offset;
uint8_t* patchAt = ms.base() + offset;
Assembler::PatchDataWithValueCheck(CodeLocationLabel(patchAt),
PatchedImmPtr(target),
PatchedImmPtr((void*)-1));
@ -148,7 +163,7 @@ AppendToString(const char* str, UTF8Bytes* bytes)
#endif
static void
SendCodeRangesToProfiler(const CodeSegment& cs, const Bytes& bytecode, const Metadata& metadata)
SendCodeRangesToProfiler(const ModuleSegment& ms, const Bytes& bytecode, const Metadata& metadata)
{
bool enabled = false;
#ifdef JS_ION_PERF
@ -160,11 +175,11 @@ SendCodeRangesToProfiler(const CodeSegment& cs, const Bytes& bytecode, const Met
if (!enabled)
return;
for (const CodeRange& codeRange : metadata.metadata(cs.tier()).codeRanges) {
for (const CodeRange& codeRange : metadata.metadata(ms.tier()).codeRanges) {
if (!codeRange.hasFuncIndex())
continue;
uintptr_t start = uintptr_t(cs.base() + codeRange.begin());
uintptr_t start = uintptr_t(ms.base() + codeRange.begin());
uintptr_t size = codeRange.end() - codeRange.begin();
UTF8Bytes name;
@ -216,12 +231,12 @@ SendCodeRangesToProfiler(const CodeSegment& cs, const Bytes& bytecode, const Met
}
}
/* static */ UniqueCodeSegment
CodeSegment::create(Tier tier,
MacroAssembler& masm,
const ShareableBytes& bytecode,
const LinkDataTier& linkData,
const Metadata& metadata)
/* static */ UniqueModuleSegment
ModuleSegment::create(Tier tier,
MacroAssembler& masm,
const ShareableBytes& bytecode,
const LinkDataTier& linkData,
const Metadata& metadata)
{
// Round up the code size to page size since this is eventually required by
// the executable-code allocator and for setting memory protection.
@ -242,12 +257,12 @@ CodeSegment::create(Tier tier,
return create(tier, Move(codeBytes), codeLength, bytecode, linkData, metadata);
}
/* static */ UniqueCodeSegment
CodeSegment::create(Tier tier,
const Bytes& unlinkedBytes,
const ShareableBytes& bytecode,
const LinkDataTier& linkData,
const Metadata& metadata)
/* static */ UniqueModuleSegment
ModuleSegment::create(Tier tier,
const Bytes& unlinkedBytes,
const ShareableBytes& bytecode,
const LinkDataTier& linkData,
const Metadata& metadata)
{
// The unlinked bytes are a snapshot of the MacroAssembler's contents so
// round up just like in the MacroAssembler overload above.
@ -264,33 +279,33 @@ CodeSegment::create(Tier tier,
return create(tier, Move(codeBytes), codeLength, bytecode, linkData, metadata);
}
/* static */ UniqueCodeSegment
CodeSegment::create(Tier tier,
UniqueCodeBytes codeBytes,
uint32_t codeLength,
const ShareableBytes& bytecode,
const LinkDataTier& linkData,
const Metadata& metadata)
/* static */ UniqueModuleSegment
ModuleSegment::create(Tier tier,
UniqueCodeBytes codeBytes,
uint32_t codeLength,
const ShareableBytes& bytecode,
const LinkDataTier& linkData,
const Metadata& metadata)
{
// These should always exist and should never be first in the code segment.
auto cs = js::MakeUnique<CodeSegment>();
if (!cs)
auto ms = js::MakeUnique<ModuleSegment>();
if (!ms)
return nullptr;
if (!cs->initialize(tier, Move(codeBytes), codeLength, bytecode, linkData, metadata))
if (!ms->initialize(tier, Move(codeBytes), codeLength, bytecode, linkData, metadata))
return nullptr;
return UniqueCodeSegment(cs.release());
return UniqueModuleSegment(ms.release());
}
bool
CodeSegment::initialize(Tier tier,
UniqueCodeBytes codeBytes,
uint32_t codeLength,
const ShareableBytes& bytecode,
const LinkDataTier& linkData,
const Metadata& metadata)
ModuleSegment::initialize(Tier tier,
UniqueCodeBytes codeBytes,
uint32_t codeLength,
const ShareableBytes& bytecode,
const LinkDataTier& linkData,
const Metadata& metadata)
{
MOZ_ASSERT(bytes_ == nullptr);
MOZ_ASSERT(linkData.interruptOffset);
@ -315,36 +330,29 @@ CodeSegment::initialize(Tier tier,
if (!ExecutableAllocator::makeExecutable(bytes_.get(), RoundupCodeLength(codeLength)))
return false;
if (!RegisterCodeSegment(this))
if (!registerInProcessMap())
return false;
registered_ = true;
SendCodeRangesToProfiler(*this, bytecode.bytes, metadata);
return true;
}
CodeSegment::~CodeSegment()
{
if (registered_)
UnregisterCodeSegment(this);
}
size_t
CodeSegment::serializedSize() const
ModuleSegment::serializedSize() const
{
return sizeof(uint32_t) + length_;
}
void
CodeSegment::addSizeOfMisc(mozilla::MallocSizeOf mallocSizeOf, size_t* code, size_t* data) const
ModuleSegment::addSizeOfMisc(mozilla::MallocSizeOf mallocSizeOf, size_t* code, size_t* data) const
{
*data += mallocSizeOf(this);
*code += RoundupCodeLength(length_);
}
uint8_t*
CodeSegment::serialize(uint8_t* cursor, const LinkDataTier& linkData) const
ModuleSegment::serialize(uint8_t* cursor, const LinkDataTier& linkData) const
{
MOZ_ASSERT(tier() == Tier::Serialized);
@ -356,8 +364,8 @@ CodeSegment::serialize(uint8_t* cursor, const LinkDataTier& linkData) const
}
const uint8_t*
CodeSegment::deserialize(const uint8_t* cursor, const ShareableBytes& bytecode,
const LinkDataTier& linkData, const Metadata& metadata)
ModuleSegment::deserialize(const uint8_t* cursor, const ShareableBytes& bytecode,
const LinkDataTier& linkData, const Metadata& metadata)
{
uint32_t length;
cursor = ReadScalar<uint32_t>(cursor, &length);
@ -720,7 +728,7 @@ Metadata::getFuncName(const Bytes* maybeBytecode, uint32_t funcIndex, UTF8Bytes*
}
bool
JumpTables::init(CompileMode mode, const CodeSegment& cs, const CodeRangeVector& codeRanges)
JumpTables::init(CompileMode mode, const ModuleSegment& ms, const CodeRangeVector& codeRanges)
{
// Note a fast jit entry has two addresses, to be compatible with
// ion/baseline functions which have the raw vs checked args entries,
@ -759,7 +767,7 @@ JumpTables::init(CompileMode mode, const CodeSegment& cs, const CodeRangeVector&
if (!jit_)
return false;
uint8_t* codeBase = cs.base();
uint8_t* codeBase = ms.base();
for (const CodeRange& cr : codeRanges) {
if (cr.isFunction())
setTieringEntry(cr.funcIndex(), codeBase + cr.funcTierEntry());
@ -769,7 +777,7 @@ JumpTables::init(CompileMode mode, const CodeSegment& cs, const CodeRangeVector&
return true;
}
Code::Code(UniqueCodeSegment tier, const Metadata& metadata, JumpTables&& maybeJumpTables)
Code::Code(UniqueModuleSegment tier, const Metadata& metadata, JumpTables&& maybeJumpTables)
: metadata_(&metadata),
profilingLabels_(mutexid::WasmCodeProfilingLabels, CacheableCharsVector()),
jumpTables_(Move(maybeJumpTables))
@ -783,7 +791,7 @@ Code::Code()
}
void
Code::setTier2(UniqueCodeSegment segment) const
Code::setTier2(UniqueModuleSegment segment) const
{
MOZ_RELEASE_ASSERT(segment->tier() == Tier::Ion && segment1_->tier() != Tier::Ion);
MOZ_RELEASE_ASSERT(!segment2_.get());
@ -828,7 +836,7 @@ Code::bestTier() const
return segment1_->tier();
}
const CodeSegment&
const ModuleSegment&
Code::segment(Tier tier) const
{
switch (tier) {
@ -851,8 +859,8 @@ bool
Code::containsCodePC(const void* pc) const
{
for (Tier t : tiers()) {
const CodeSegment& cs = segment(t);
if (cs.containsCodePC(pc))
const ModuleSegment& ms = segment(t);
if (ms.containsCodePC(pc))
return true;
}
return false;
@ -1080,16 +1088,16 @@ Code::deserialize(const uint8_t* cursor, const SharedBytes& bytecode, const Link
if (!cursor)
return nullptr;
UniqueCodeSegment codeSegment = js::MakeUnique<CodeSegment>();
if (!codeSegment)
UniqueModuleSegment moduleSegment = js::MakeUnique<ModuleSegment>();
if (!moduleSegment)
return nullptr;
cursor = codeSegment->deserialize(cursor, *bytecode, linkData.linkData(Tier::Serialized),
metadata);
cursor = moduleSegment->deserialize(cursor, *bytecode, linkData.linkData(Tier::Serialized),
metadata);
if (!cursor)
return nullptr;
segment1_ = takeOwnership(Move(codeSegment));
segment1_ = takeOwnership(Move(moduleSegment));
metadata_ = &metadata;
if (!jumpTables_.init(CompileMode::Once, *segment1_,

View File

@ -53,28 +53,82 @@ struct ShareableBytes : ShareableBase<ShareableBytes>
typedef RefPtr<ShareableBytes> MutableBytes;
typedef RefPtr<const ShareableBytes> SharedBytes;
// A wasm CodeSegment owns the allocated executable code for a wasm module.
// Executable code must be deallocated specially.
class CodeSegment;
typedef UniquePtr<CodeSegment> UniqueCodeSegment;
typedef UniquePtr<const CodeSegment> UniqueConstCodeSegment;
struct FreeCode {
uint32_t codeLength;
FreeCode() : codeLength(0) {}
explicit FreeCode(uint32_t codeLength) : codeLength(codeLength) {}
void operator()(uint8_t* codeBytes);
};
using UniqueCodeBytes = UniquePtr<uint8_t, FreeCode>;
class ModuleSegment;
// CodeSegment contains common helpers for determining the base and length of a
// code segment and if a pc belongs to this segment. It is inherited by:
// - ModuleSegment, i.e. the code segment of a Module, generated
// eagerly when a Module is instanciated.
// - LazyCodeSegment, i.e. the code segment of entry stubs that are lazily
// generated.
class CodeSegment
{
// Executable code must be deallocated specially.
struct FreeCode {
uint32_t codeLength;
FreeCode() : codeLength(0) {}
explicit FreeCode(uint32_t codeLength) : codeLength(codeLength) {}
void operator()(uint8_t* codeBytes);
};
typedef UniquePtr<uint8_t, FreeCode> UniqueCodeBytes;
protected:
static UniqueCodeBytes AllocateCodeBytes(uint32_t codeLength);
const Code* code_;
Tier tier_;
// A back reference to the owning code.
const Code* code_;
UniqueCodeBytes bytes_;
uint32_t length_;
uint32_t length_;
enum class Kind {
LazyStubs,
Module
} kind_;
bool registerInProcessMap();
private:
bool registered_;
public:
CodeSegment()
: code_(nullptr),
length_(0),
kind_(Kind::Module),
registered_(false)
{}
~CodeSegment();
bool isLazyStubs() const { return kind_ == Kind::LazyStubs; }
bool isModule() const { return kind_ == Kind::Module; }
const ModuleSegment* asModule() const { MOZ_ASSERT(isModule()); return (ModuleSegment*) this; }
uint8_t* base() const { return bytes_.get(); }
uint32_t length() const { return length_; }
bool containsCodePC(const void* pc) const {
return pc >= base() && pc < (base() + length_);
}
void initCode(const Code* code) {
MOZ_ASSERT(!code_);
code_ = code;
}
const Code& code() const { MOZ_ASSERT(code_); return *code_; }
};
// A wasm ModuleSegment owns the allocated executable code for a wasm module.
typedef UniquePtr<ModuleSegment> UniqueModuleSegment;
typedef UniquePtr<const ModuleSegment> UniqueConstModuleSegment;
class ModuleSegment : public CodeSegment
{
Tier tier_;
// These are pointers into code for stubs used for asynchronous
// signal-handler control-flow transfer.
@ -83,8 +137,6 @@ class CodeSegment
uint8_t* unalignedAccessCode_;
uint8_t* trapCode_;
bool registered_;
bool initialize(Tier tier,
UniqueCodeBytes bytes,
uint32_t codeLength,
@ -92,61 +144,44 @@ class CodeSegment
const LinkDataTier& linkData,
const Metadata& metadata);
static UniqueCodeSegment create(Tier tier,
UniqueCodeBytes bytes,
uint32_t codeLength,
const ShareableBytes& bytecode,
const LinkDataTier& linkData,
const Metadata& metadata);
static UniqueModuleSegment create(Tier tier,
UniqueCodeBytes bytes,
uint32_t codeLength,
const ShareableBytes& bytecode,
const LinkDataTier& linkData,
const Metadata& metadata);
public:
CodeSegment(const CodeSegment&) = delete;
void operator=(const CodeSegment&) = delete;
ModuleSegment(const ModuleSegment&) = delete;
void operator=(const ModuleSegment&) = delete;
CodeSegment()
: code_(nullptr),
ModuleSegment()
: CodeSegment(),
tier_(Tier(-1)),
length_(0),
interruptCode_(nullptr),
outOfBoundsCode_(nullptr),
unalignedAccessCode_(nullptr),
trapCode_(nullptr),
registered_(false)
trapCode_(nullptr)
{}
~CodeSegment();
static UniqueModuleSegment create(Tier tier,
jit::MacroAssembler& masm,
const ShareableBytes& bytecode,
const LinkDataTier& linkData,
const Metadata& metadata);
static UniqueCodeSegment create(Tier tier,
jit::MacroAssembler& masm,
const ShareableBytes& bytecode,
const LinkDataTier& linkData,
const Metadata& metadata);
static UniqueModuleSegment create(Tier tier,
const Bytes& unlinkedBytes,
const ShareableBytes& bytecode,
const LinkDataTier& linkData,
const Metadata& metadata);
static UniqueCodeSegment create(Tier tier,
const Bytes& unlinkedBytes,
const ShareableBytes& bytecode,
const LinkDataTier& linkData,
const Metadata& metadata);
void initCode(const Code* code) {
MOZ_ASSERT(!code_);
code_ = code;
}
const Code& code() const { MOZ_ASSERT(code_); return *code_; }
Tier tier() const { return tier_; }
uint8_t* base() const { return bytes_.get(); }
uint32_t length() const { return length_; }
uint8_t* interruptCode() const { return interruptCode_; }
uint8_t* outOfBoundsCode() const { return outOfBoundsCode_; }
uint8_t* unalignedAccessCode() const { return unalignedAccessCode_; }
uint8_t* trapCode() const { return trapCode_; }
bool containsCodePC(const void* pc) const {
return pc >= base() && pc < (base() + length_);
}
// Structured clone support:
size_t serializedSize() const;
@ -463,7 +498,7 @@ class JumpTables
size_t numFuncs_;
public:
bool init(CompileMode mode, const CodeSegment& cs, const CodeRangeVector& codeRanges);
bool init(CompileMode mode, const ModuleSegment& ms, const CodeRangeVector& codeRanges);
void setJitEntry(size_t i, void* target) const {
// See comment in wasm::Module::finishTier2 and JumpTables::init.
@ -501,20 +536,20 @@ class JumpTables
class Code : public ShareableBase<Code>
{
UniqueConstCodeSegment segment1_;
mutable UniqueConstCodeSegment segment2_; // Access only when hasTier2() is true
UniqueConstModuleSegment segment1_;
mutable UniqueConstModuleSegment segment2_; // Access only when hasTier2() is true
SharedMetadata metadata_;
ExclusiveData<CacheableCharsVector> profilingLabels_;
JumpTables jumpTables_;
UniqueConstCodeSegment takeOwnership(UniqueCodeSegment segment) const {
UniqueConstModuleSegment takeOwnership(UniqueModuleSegment segment) const {
segment->initCode(this);
return UniqueConstCodeSegment(segment.release());
return UniqueConstModuleSegment(segment.release());
}
public:
Code();
Code(UniqueCodeSegment tier, const Metadata& metadata, JumpTables&& maybeJumpTables);
Code(UniqueModuleSegment tier, const Metadata& metadata, JumpTables&& maybeJumpTables);
void setTieringEntry(size_t i, void* target) const { jumpTables_.setTieringEntry(i, target); }
void** tieringJumpTable() const { return jumpTables_.tiering(); }
@ -524,14 +559,14 @@ class Code : public ShareableBase<Code>
uint32_t lookupFuncIndex(JSFunction* fun) const;
bool hasTier2() const { return metadata_->hasTier2(); }
void setTier2(UniqueCodeSegment segment) const;
void setTier2(UniqueModuleSegment segment) const;
Tiers tiers() const;
bool hasTier(Tier t) const;
Tier stableTier() const; // This is stable during a run
Tier bestTier() const; // This may transition from Baseline -> Ion at any time
const CodeSegment& segment(Tier tier) const;
const ModuleSegment& segment(Tier tier) const;
const MetadataTier& metadata(Tier tier) const { return metadata_->metadata(tier); }
const Metadata& metadata() const { return *metadata_; }

View File

@ -24,7 +24,6 @@
namespace js {
namespace wasm {
class CodeSegment;
typedef Vector<Instance*, 0, SystemAllocPolicy> InstanceVector;
// wasm::Compartment lives in JSCompartment and contains the wasm-related

View File

@ -394,7 +394,7 @@ DebugState::toggleBreakpointTrap(JSRuntime* rt, uint32_t offset, bool enabled)
return;
size_t debugTrapOffset = callSite->returnAddressOffset();
const CodeSegment& codeSegment = code_->segment(Tier::Debug);
const ModuleSegment& codeSegment = code_->segment(Tier::Debug);
const CodeRange* codeRange = code_->lookupRange(codeSegment.base() + debugTrapOffset);
MOZ_ASSERT(codeRange && codeRange->isFunction());
@ -513,7 +513,7 @@ DebugState::adjustEnterAndLeaveFrameTrapsState(JSContext* cx, bool enabled)
if (wasEnabled == stillEnabled)
return;
const CodeSegment& codeSegment = code_->segment(Tier::Debug);
const ModuleSegment& codeSegment = code_->segment(Tier::Debug);
AutoWritableJitCode awjc(cx->runtime(), codeSegment.base(), codeSegment.length());
AutoFlushICache afc("Code::adjustEnterAndLeaveFrameTrapsState");
AutoFlushICache::setRange(uintptr_t(codeSegment.base()), codeSegment.length());

View File

@ -1213,7 +1213,7 @@ ProfilingFrameIterator::label() const
}
Instance*
wasm::LookupFaultingInstance(const CodeSegment& codeSegment, void* pc, void* fp)
wasm::LookupFaultingInstance(const ModuleSegment& codeSegment, void* pc, void* fp)
{
// Assume bug-caused faults can be raised at any PC and apply the logic of
// ProfilingFrameIterator to reject any pc outside the (post-prologue,

View File

@ -34,12 +34,10 @@ class Label;
namespace wasm {
class CallSite;
class Code;
class CodeRange;
class CodeSegment;
class ModuleSegment;
class DebugFrame;
class DebugState;
class Instance;
class SigIdDesc;
struct Frame;
@ -229,7 +227,7 @@ GenerateFunctionEpilogue(jit::MacroAssembler& masm, unsigned framePushed, FuncOf
// is such a plausible instance, and otherwise null.
Instance*
LookupFaultingInstance(const CodeSegment& codeSegment, void* pc, void* fp);
LookupFaultingInstance(const ModuleSegment& codeSegment, void* pc, void* fp);
// Return whether the given PC is in wasm code.

View File

@ -937,7 +937,7 @@ ModuleGenerator::finishMetadata(const ShareableBytes& bytecode)
return true;
}
UniqueCodeSegment
UniqueModuleSegment
ModuleGenerator::finish(const ShareableBytes& bytecode)
{
MOZ_ASSERT(finishedFuncDefs_);
@ -972,7 +972,7 @@ ModuleGenerator::finish(const ShareableBytes& bytecode)
if (!finishMetadata(bytecode))
return nullptr;
return CodeSegment::create(tier(), masm_, bytecode, *linkDataTier_, *metadata_);
return ModuleSegment::create(tier(), masm_, bytecode, *linkDataTier_, *metadata_);
}
SharedModule
@ -980,12 +980,12 @@ ModuleGenerator::finishModule(const ShareableBytes& bytecode)
{
MOZ_ASSERT(mode() == CompileMode::Once || mode() == CompileMode::Tier1);
UniqueCodeSegment codeSegment = finish(bytecode);
if (!codeSegment)
UniqueModuleSegment moduleSegment = finish(bytecode);
if (!moduleSegment)
return nullptr;
JumpTables jumpTables;
if (!jumpTables.init(mode(), *codeSegment, metadataTier_->codeRanges))
if (!jumpTables.init(mode(), *moduleSegment, metadataTier_->codeRanges))
return nullptr;
UniqueConstBytes maybeDebuggingBytes;
@ -1000,7 +1000,7 @@ ModuleGenerator::finishModule(const ShareableBytes& bytecode)
return nullptr;
}
SharedCode code = js_new<Code>(Move(codeSegment), *metadata_, Move(jumpTables));
SharedCode code = js_new<Code>(Move(moduleSegment), *metadata_, Move(jumpTables));
if (!code)
return nullptr;
@ -1032,13 +1032,13 @@ ModuleGenerator::finishTier2(Module& module)
if (cancelled_ && *cancelled_)
return false;
UniqueCodeSegment codeSegment = finish(module.bytecode());
if (!codeSegment)
UniqueModuleSegment moduleSegment = finish(module.bytecode());
if (!moduleSegment)
return false;
module.finishTier2(linkData_.takeLinkData(tier()),
metadata_->takeMetadata(tier()),
Move(codeSegment),
Move(moduleSegment),
env_);
return true;
}

View File

@ -203,7 +203,7 @@ class MOZ_STACK_CLASS ModuleGenerator
bool finishOutstandingTask();
bool finishCode();
bool finishMetadata(const ShareableBytes& bytecode);
UniqueCodeSegment finish(const ShareableBytes& bytecode);
UniqueModuleSegment finish(const ShareableBytes& bytecode);
bool isAsmJS() const { return env_->isAsmJS(); }
Tier tier() const { return env_->tier; }

View File

@ -84,7 +84,7 @@ class Instance
const Code& code() const { return *code_; }
DebugState& debug() { return *debug_; }
const DebugState& debug() const { return *debug_; }
const CodeSegment& codeSegment(Tier t) const { return code_->segment(t); }
const ModuleSegment& moduleSegment(Tier t) const { return code_->segment(t); }
TlsData* tlsData() const { return tlsData_.get(); }
uint8_t* globalData() const { return (uint8_t*)&tlsData_->globalArea; }
uint8_t* codeBase(Tier t) const { return code_->segment(t).base(); }

View File

@ -279,7 +279,7 @@ Module::notifyCompilationListeners()
void
Module::finishTier2(UniqueLinkDataTier linkData2, UniqueMetadataTier metadata2,
UniqueCodeSegment code2, ModuleEnvironment* env2)
UniqueModuleSegment code2, ModuleEnvironment* env2)
{
// Install the data in the data structures. They will not be visible yet.
@ -648,12 +648,12 @@ Module::extractCode(JSContext* cx, Tier tier, MutableHandleValue vp) const
return true;
}
const CodeSegment& codeSegment = code_->segment(tier);
RootedObject code(cx, JS_NewUint8Array(cx, codeSegment.length()));
const ModuleSegment& moduleSegment = code_->segment(tier);
RootedObject code(cx, JS_NewUint8Array(cx, moduleSegment.length()));
if (!code)
return false;
memcpy(code->as<TypedArrayObject>().viewDataUnshared(), codeSegment.base(), codeSegment.length());
memcpy(code->as<TypedArrayObject>().viewDataUnshared(), moduleSegment.base(), moduleSegment.length());
RootedValue value(cx, ObjectValue(*code));
if (!JS_DefineProperty(cx, result, "code", value, JSPROP_ENUMERATE))
@ -1167,21 +1167,21 @@ Module::instantiate(JSContext* cx,
// bytes that we keep around for debugging instead, because the debugger
// may patch the pre-linked code at any time.
if (!codeIsBusy_.compareExchange(false, true)) {
auto codeSegment = CodeSegment::create(Tier::Baseline,
*unlinkedCodeForDebugging_,
*bytecode_,
linkData_.linkData(Tier::Baseline),
metadata());
if (!codeSegment) {
auto moduleSegment = ModuleSegment::create(Tier::Baseline,
*unlinkedCodeForDebugging_,
*bytecode_,
linkData_.linkData(Tier::Baseline),
metadata());
if (!moduleSegment) {
ReportOutOfMemory(cx);
return false;
}
JumpTables jumpTables;
if (!jumpTables.init(CompileMode::Once, *codeSegment, metadata(Tier::Baseline).codeRanges))
if (!jumpTables.init(CompileMode::Once, *moduleSegment, metadata(Tier::Baseline).codeRanges))
return false;
code = js_new<Code>(Move(codeSegment), metadata(), Move(jumpTables));
code = js_new<Code>(Move(moduleSegment), metadata(), Move(jumpTables));
if (!code) {
ReportOutOfMemory(cx);
return false;

View File

@ -33,7 +33,7 @@ namespace wasm {
struct CompileArgs;
// LinkData contains all the metadata necessary to patch all the locations
// that depend on the absolute address of a CodeSegment.
// that depend on the absolute address of a ModuleSegment.
//
// LinkData is built incrementally by ModuleGenerator and then stored immutably
// in Module. LinkData is distinct from Metadata in that LinkData is owned and
@ -126,7 +126,7 @@ typedef ExclusiveWaitableData<Tiering> ExclusiveTiering;
// to produce a new, equivalent Module.
//
// Fully linked-and-instantiated code (represented by Code and its owned
// CodeSegment) can be shared between instances, provided none of those
// ModuleSegment) can be shared between instances, provided none of those
// instances are being debugged. If patchable code is needed then each instance
// must have its own Code. Module eagerly creates a new Code and gives it to the
// first instance; it then instantiates new Code objects from a copy of the
@ -193,7 +193,7 @@ class Module : public JS::WasmModule
~Module() override { /* Note: can be called on any thread */ }
const Code& code() const { return *code_; }
const CodeSegment& codeSegment(Tier t) const { return code_->segment(t); }
const ModuleSegment& moduleSegment(Tier t) const { return code_->segment(t); }
const Metadata& metadata() const { return code_->metadata(); }
const MetadataTier& metadata(Tier t) const { return code_->metadata(t); }
const LinkData& linkData() const { return linkData_; }
@ -221,7 +221,7 @@ class Module : public JS::WasmModule
void startTier2(const CompileArgs& args);
void finishTier2(UniqueLinkDataTier linkData2, UniqueMetadataTier metadata2,
UniqueCodeSegment code2, ModuleEnvironment* env2);
UniqueModuleSegment code2, ModuleEnvironment* env2);
void blockOnTier2Complete() const;
// JS API and JS::WasmModule implementation:

View File

@ -793,7 +793,7 @@ ComputeAccessAddress(EMULATOR_CONTEXT* context, const Disassembler::ComplexAddre
MOZ_COLD static void
HandleMemoryAccess(EMULATOR_CONTEXT* context, uint8_t* pc, uint8_t* faultingAddress,
const CodeSegment* segment, const Instance& instance, JitActivation* activation,
const ModuleSegment* segment, const Instance& instance, JitActivation* activation,
uint8_t** ppc)
{
MOZ_RELEASE_ASSERT(instance.code().containsCodePC(pc));
@ -949,7 +949,7 @@ HandleMemoryAccess(EMULATOR_CONTEXT* context, uint8_t* pc, uint8_t* faultingAddr
MOZ_COLD static void
HandleMemoryAccess(EMULATOR_CONTEXT* context, uint8_t* pc, uint8_t* faultingAddress,
const CodeSegment* segment, const Instance& instance, JitActivation* activation,
const ModuleSegment* segment, const Instance& instance, JitActivation* activation,
uint8_t** ppc)
{
MOZ_RELEASE_ASSERT(instance.code().containsCodePC(pc));
@ -996,31 +996,33 @@ HandleFault(PEXCEPTION_POINTERS exception)
uint8_t* pc = *ppc;
const CodeSegment* codeSegment = LookupCodeSegment(pc);
if (!codeSegment)
if (!codeSegment || !codeSegment->isModule())
return false;
const ModuleSegment* moduleSegment = codeSegment->asModule();
JitActivation* activation = TlsContext.get()->activation()->asJit();
MOZ_ASSERT(activation);
const Instance* instance = LookupFaultingInstance(*codeSegment, pc, ContextToFP(context));
const Instance* instance = LookupFaultingInstance(*moduleSegment, pc, ContextToFP(context));
if (!instance) {
// On Windows, it is possible for InterruptRunningJitCode to execute
// between a faulting instruction and the handling of the fault due
// to InterruptRunningJitCode's use of SuspendThread. When this happens,
// after ResumeThread, the exception handler is called with pc equal to
// CodeSegment.interrupt, which is logically wrong. The Right Thing would
// ModuleSegment.interrupt, which is logically wrong. The Right Thing would
// be for the OS to make fault-handling atomic (so that CONTEXT.pc was
// always the logically-faulting pc). Fortunately, we can detect this
// case and silence the exception ourselves (the exception will
// retrigger after the interrupt jumps back to resumePC).
return activation->isWasmInterrupted() &&
pc == codeSegment->interruptCode() &&
codeSegment->containsCodePC(activation->wasmInterruptResumePC());
pc == moduleSegment->interruptCode() &&
moduleSegment->containsCodePC(activation->wasmInterruptResumePC());
}
// In the same race-with-interrupt situation above, it's *also* possible
// that the reported 'pc' is the pre-interrupt pc, not post-interrupt
// codeSegment->interruptCode (this may be windows-version-specific). In
// moduleSegment->interruptCode (this may be windows-version-specific). In
// this case, lookupTrap()/lookupMemoryAccess() will all succeed causing the
// pc to be redirected *again* (to a trap stub), leading to the interrupt
// stub never being called. Since the goal of the async interrupt is to break
@ -1034,11 +1036,11 @@ HandleFault(PEXCEPTION_POINTERS exception)
if (record->ExceptionCode == EXCEPTION_ILLEGAL_INSTRUCTION) {
Trap trap;
BytecodeOffset bytecode;
if (!codeSegment->code().lookupTrap(pc, &trap, &bytecode))
if (!moduleSegment->code().lookupTrap(pc, &trap, &bytecode))
return false;
activation->startWasmTrap(trap, bytecode.offset, pc, ContextToFP(context));
*ppc = codeSegment->trapCode();
*ppc = moduleSegment->trapCode();
return true;
}
@ -1054,7 +1056,7 @@ HandleFault(PEXCEPTION_POINTERS exception)
MOZ_ASSERT(activation->compartment() == instance->compartment());
HandleMemoryAccess(context, pc, faultingAddress, codeSegment, *instance, activation, ppc);
HandleMemoryAccess(context, pc, faultingAddress, moduleSegment, *instance, activation, ppc);
return true;
}
@ -1150,10 +1152,12 @@ HandleMachException(JSContext* cx, const ExceptionRequest& request)
AutoNoteSingleThreadedRegion anstr;
const CodeSegment* codeSegment = LookupCodeSegment(pc);
if (!codeSegment)
if (!codeSegment || !codeSegment->isModule())
return false;
const Instance* instance = LookupFaultingInstance(*codeSegment, pc, ContextToFP(&context));
const ModuleSegment* moduleSegment = codeSegment->asModule();
const Instance* instance = LookupFaultingInstance(*moduleSegment, pc, ContextToFP(&context));
if (!instance)
return false;
@ -1163,11 +1167,11 @@ HandleMachException(JSContext* cx, const ExceptionRequest& request)
if (request.body.exception == EXC_BAD_INSTRUCTION) {
Trap trap;
BytecodeOffset bytecode;
if (!codeSegment->code().lookupTrap(pc, &trap, &bytecode))
if (!moduleSegment->code().lookupTrap(pc, &trap, &bytecode))
return false;
activation->startWasmTrap(trap, bytecode.offset, pc, ContextToFP(&context));
*ppc = codeSegment->trapCode();
*ppc = moduleSegment->trapCode();
} else {
MOZ_ASSERT(request.body.exception == EXC_BAD_ACCESS);
if (request.body.codeCnt != 2)
@ -1180,7 +1184,7 @@ HandleMachException(JSContext* cx, const ExceptionRequest& request)
if (!IsHeapAccessAddress(*instance, faultingAddress))
return false;
HandleMemoryAccess(&context, pc, faultingAddress, codeSegment, *instance, activation, ppc);
HandleMemoryAccess(&context, pc, faultingAddress, moduleSegment, *instance, activation, ppc);
}
// Update the thread state with the new pc and register values.
@ -1367,10 +1371,12 @@ HandleFault(int signum, siginfo_t* info, void* ctx)
uint8_t* pc = *ppc;
const CodeSegment* segment = LookupCodeSegment(pc);
if (!segment)
if (!segment || !segment->isModule())
return false;
const Instance* instance = LookupFaultingInstance(*segment, pc, ContextToFP(context));
const ModuleSegment* moduleSegment = segment->asModule();
const Instance* instance = LookupFaultingInstance(*moduleSegment, pc, ContextToFP(context));
if (!instance)
return false;
@ -1385,11 +1391,11 @@ HandleFault(int signum, siginfo_t* info, void* ctx)
#endif
Trap trap;
BytecodeOffset bytecode;
if (!segment->code().lookupTrap(pc, &trap, &bytecode))
if (!moduleSegment->code().lookupTrap(pc, &trap, &bytecode))
return false;
activation->startWasmTrap(trap, bytecode.offset, pc, ContextToFP(context));
*ppc = segment->trapCode();
*ppc = moduleSegment->trapCode();
return true;
}
@ -1421,12 +1427,12 @@ HandleFault(int signum, siginfo_t* info, void* ctx)
// error and we should signal that properly, but to do so we must inspect
// the operand of the failed access.
MOZ_ALWAYS_TRUE(activation->startWasmInterrupt(ToRegisterState(context)));
*ppc = segment->unalignedAccessCode();
*ppc = moduleSegment->unalignedAccessCode();
return true;
}
#endif
HandleMemoryAccess(context, pc, faultingAddress, segment, *instance, activation, ppc);
HandleMemoryAccess(context, pc, faultingAddress, moduleSegment, *instance, activation, ppc);
return true;
}
@ -1489,7 +1495,7 @@ RedirectIonBackedgesToInterruptCheck(JSContext* cx)
}
bool
wasm::InInterruptibleCode(JSContext* cx, uint8_t* pc, const CodeSegment** cs)
wasm::InInterruptibleCode(JSContext* cx, uint8_t* pc, const ModuleSegment** ms)
{
// Only interrupt in function code so that the frame iterators have the
// invariant that resumePC always has a function CodeRange and we can't
@ -1498,11 +1504,12 @@ wasm::InInterruptibleCode(JSContext* cx, uint8_t* pc, const CodeSegment** cs)
if (!cx->compartment())
return false;
*cs = LookupCodeSegment(pc);
if (!*cs)
const CodeSegment* cs = LookupCodeSegment(pc);
if (!cs || !cs->isModule())
return false;
const CodeRange* codeRange = (*cs)->code().lookupRange(pc);
*ms = cs->asModule();
const CodeRange* codeRange = (*ms)->code().lookupRange(pc);
return codeRange && codeRange->isFunction();
}
@ -1527,8 +1534,8 @@ RedirectJitCodeToInterruptCheck(JSContext* cx, CONTEXT* context)
uint8_t* pc = *ContextToPC(context);
#endif
const CodeSegment* codeSegment = nullptr;
if (!InInterruptibleCode(cx, pc, &codeSegment))
const ModuleSegment* moduleSegment = nullptr;
if (!InInterruptibleCode(cx, pc, &moduleSegment))
return false;
#ifdef JS_SIMULATOR
@ -1549,7 +1556,7 @@ RedirectJitCodeToInterruptCheck(JSContext* cx, CONTEXT* context)
if (!activation->startWasmInterrupt(ToRegisterState(context)))
return false;
*ContextToPC(context) = codeSegment->interruptCode();
*ContextToPC(context) = moduleSegment->interruptCode();
#endif
return true;

View File

@ -49,12 +49,12 @@ EnsureSignalHandlers(JSContext* cx);
bool
HaveSignalHandlers();
class CodeSegment;
class ModuleSegment;
// Returns true if wasm code is on top of the activation stack (and fills out
// the code segment outparam in this case), or false otherwise.
bool
InInterruptibleCode(JSContext* cx, uint8_t* pc, const CodeSegment** cs);
InInterruptibleCode(JSContext* cx, uint8_t* pc, const ModuleSegment** ms);
#if defined(XP_DARWIN)
// On OSX we are forced to use the lower-level Mach exception mechanism instead

View File

@ -268,15 +268,19 @@ fuzzy-if(cocoaWidget&&layersGPUAccelerated,1,2) == anim-gradient-attr-presence-0
# Test filtering of excessive times
== filtered-instance-time-1.svg anim-standard-ref.svg
# Animation tests disable reduceTimerPrecision because they use a screenshot
# mechanism that relies on performance.now(), and on low precision that can be
# finnicky.
# Test animation using defs element
== anim-defs-gradient-property.svg lime.svg
== anim-defs-gradient-attribute.svg lime.svg
== anim-defs-fill.svg lime.svg
== anim-defs-width.svg lime.svg
pref(privacy.reduceTimerPrecision,false) == anim-defs-gradient-property.svg lime.svg
pref(privacy.reduceTimerPrecision,false) == anim-defs-gradient-attribute.svg lime.svg
pref(privacy.reduceTimerPrecision,false) == anim-defs-fill.svg lime.svg
pref(privacy.reduceTimerPrecision,false) == anim-defs-width.svg lime.svg
# Test animation that changes 'display' attribute
== anim-display.svg lime.svg
== anim-display-in-g-element.svg lime.svg
pref(privacy.reduceTimerPrecision,false) == anim-display.svg lime.svg
pref(privacy.reduceTimerPrecision,false) == anim-display-in-g-element.svg lime.svg
# Test animation that change 'display' style value to 'none'
== anim-change-display-none-for-ancestor-elem.html lime.html

View File

@ -389,6 +389,7 @@ opaque-types = [
"mozilla::dom::Optional",
"mozilla::dom::OwningNodeOrString_Value",
"mozilla::dom::Nullable",
"mozilla::external::AtomicRefCounted",
"RefPtr_Proxy",
"RefPtr_Proxy_member_function",
"nsAutoPtr_Proxy",

View File

@ -7,25 +7,11 @@
- file, You can obtain one at http://mozilla.org/MPL/2.0/. -->
<!--
This file contains the nodes that will be overlayed on top of
<chrome://communicator/content/tasksOverlay.xul>.
Declare XML entites that this file refers to in layoutdebug-overlay.dtd.
-->
<!DOCTYPE window SYSTEM "chrome://layoutdebug/locale/layoutdebug-overlay.dtd" >
<overlay id="layoutdebugTaskMenuID"
xmlns="http://www.mozilla.org/keymaster/gatekeeper/there.is.only.xul">
<!-- SeaMonkey -->
<menupopup id="taskPopup">
<menuitem label="&ldbCmd.label;"
accesskey="&ldbCmd.accesskey;"
oncommand="toOpenWindowByType('mozapp:layoutdebug',
'chrome://layoutdebug/content/');"/>
</menupopup>
<!-- Firefox -->
<menupopup id="menu_ToolsPopup">
<menuitem label="&ldbCmd.label;"

Some files were not shown because too many files have changed in this diff Show More