mirror of
https://github.com/mozilla/gecko-dev.git
synced 2024-11-24 13:21:05 +00:00
merge mozilla-inbound to mozilla-central a=merge
This commit is contained in:
commit
f4a16a723a
@ -273,13 +273,6 @@ function loadSnippets()
|
||||
// Try to update from network.
|
||||
let xhr = new XMLHttpRequest();
|
||||
xhr.timeout = 5000;
|
||||
try {
|
||||
xhr.open("GET", updateURL, true);
|
||||
} catch (ex) {
|
||||
showSnippets();
|
||||
loadCompleted();
|
||||
return;
|
||||
}
|
||||
// Even if fetching should fail we don't want to spam the server, thus
|
||||
// set the last update time regardless its results. Will retry tomorrow.
|
||||
gSnippetsMap.set("snippets-last-update", Date.now());
|
||||
@ -291,7 +284,14 @@ function loadSnippets()
|
||||
showSnippets();
|
||||
loadCompleted();
|
||||
};
|
||||
xhr.send(null);
|
||||
try {
|
||||
xhr.open("GET", updateURL, true);
|
||||
xhr.send(null);
|
||||
} catch (ex) {
|
||||
showSnippets();
|
||||
loadCompleted();
|
||||
return;
|
||||
}
|
||||
} else {
|
||||
showSnippets();
|
||||
loadCompleted();
|
||||
|
@ -22,8 +22,8 @@ body {
|
||||
}
|
||||
|
||||
input {
|
||||
font: message-box !important;
|
||||
font-size: 16px !important;
|
||||
font: message-box;
|
||||
font-size: 16px;
|
||||
}
|
||||
|
||||
input[type=button] {
|
||||
|
@ -84,7 +84,9 @@
|
||||
</div>
|
||||
<div id="newtab-margin-bottom"/>
|
||||
</div>
|
||||
<input id="newtab-customize-button" type="button" dir="&locale.dir;" title="&newtab.customize.title;"/>
|
||||
<input id="newtab-customize-button" type="button" dir="&locale.dir;"
|
||||
value="⚙"
|
||||
title="&newtab.customize.title;"/>
|
||||
</body>
|
||||
<script type="text/javascript;version=1.8" src="chrome://browser/content/contentSearchUI.js"/>
|
||||
<script type="text/javascript;version=1.8" src="chrome://browser/content/newtab/newTab.js"/>
|
||||
|
@ -19,6 +19,9 @@ support-files =
|
||||
test_firstParty_iframe_http_redirect.html
|
||||
test_firstParty_postMessage.html
|
||||
window.html
|
||||
worker_blobify.js
|
||||
worker_deblobify.js
|
||||
|
||||
[browser_firstPartyIsolation.js]
|
||||
[browser_localStorageIsolation.js]
|
||||
[browser_blobURLIsolation.js]
|
||||
|
@ -0,0 +1,97 @@
|
||||
/**
|
||||
* Bug 1264573 - A test case for blob url isolation.
|
||||
*/
|
||||
|
||||
const TEST_PAGE = "http://mochi.test:8888/browser/browser/components/" +
|
||||
"originattributes/test/browser/file_firstPartyBasic.html";
|
||||
const SCRIPT_WORKER_BLOBIFY = "worker_blobify.js";
|
||||
const SCRIPT_WORKER_DEBLOBIFY = "worker_deblobify.js";
|
||||
|
||||
function page_blobify(browser, input) {
|
||||
return ContentTask.spawn(browser, input, function(input) {
|
||||
return { blobURL: content.URL.createObjectURL(new content.Blob([input])) };
|
||||
});
|
||||
}
|
||||
|
||||
function page_deblobify(browser, blobURL) {
|
||||
return ContentTask.spawn(browser, blobURL, function* (blobURL) {
|
||||
if ("error" in blobURL) {
|
||||
return blobURL;
|
||||
}
|
||||
blobURL = blobURL.blobURL;
|
||||
|
||||
function blobURLtoBlob(blobURL) {
|
||||
return new content.Promise(function (resolve) {
|
||||
let xhr = new content.XMLHttpRequest();
|
||||
xhr.open("GET", blobURL, true);
|
||||
xhr.onload = function () {
|
||||
resolve(xhr.response);
|
||||
};
|
||||
xhr.onerror = function () {
|
||||
resolve("xhr error");
|
||||
};
|
||||
xhr.responseType = "blob";
|
||||
xhr.send();
|
||||
});
|
||||
}
|
||||
|
||||
function blobToString(blob) {
|
||||
return new content.Promise(function (resolve) {
|
||||
let fileReader = new content.FileReader();
|
||||
fileReader.onload = function () {
|
||||
resolve(fileReader.result);
|
||||
};
|
||||
fileReader.readAsText(blob);
|
||||
});
|
||||
}
|
||||
|
||||
let blob = yield blobURLtoBlob(blobURL);
|
||||
if (blob == "xhr error") {
|
||||
return "xhr error";
|
||||
}
|
||||
|
||||
return yield blobToString(blob);
|
||||
});
|
||||
}
|
||||
|
||||
function workerIO(browser, scriptFile, message) {
|
||||
return ContentTask.spawn(browser, {scriptFile, message}, function* (args) {
|
||||
let worker = new content.Worker(args.scriptFile);
|
||||
let promise = new content.Promise(function(resolve) {
|
||||
let listenFunction = function(event) {
|
||||
worker.removeEventListener("message", listenFunction, false);
|
||||
worker.terminate();
|
||||
resolve(event.data);
|
||||
};
|
||||
worker.addEventListener("message", listenFunction, false);
|
||||
});
|
||||
worker.postMessage(args.message);
|
||||
return yield promise;
|
||||
});
|
||||
}
|
||||
|
||||
let worker_blobify = (browser, input) => workerIO(browser, SCRIPT_WORKER_BLOBIFY, input);
|
||||
let worker_deblobify = (browser, blobURL) => workerIO(browser, SCRIPT_WORKER_DEBLOBIFY, blobURL);
|
||||
|
||||
function doTest(blobify, deblobify) {
|
||||
let blobURL = null;
|
||||
return function* (browser) {
|
||||
if (blobURL === null) {
|
||||
let input = Math.random().toString();
|
||||
blobURL = yield blobify(browser, input);
|
||||
return input;
|
||||
}
|
||||
let result = yield deblobify(browser, blobURL);
|
||||
blobURL = null;
|
||||
return result;
|
||||
}
|
||||
}
|
||||
|
||||
let tests = [];
|
||||
for (let blobify of [page_blobify, worker_blobify]) {
|
||||
for (let deblobify of [page_deblobify, worker_deblobify]) {
|
||||
tests.push(doTest(blobify, deblobify));
|
||||
}
|
||||
}
|
||||
|
||||
IsolationTestTools.runTests(TEST_PAGE, tests);
|
@ -259,11 +259,11 @@ this.IsolationTestTools = {
|
||||
* The URL of the page that will be tested or an object contains 'url',
|
||||
* the tested page, 'firstFrameSetting' for the frame setting of the first
|
||||
* tab, and 'secondFrameSetting' for the second tab.
|
||||
* @param aGetResultFunc
|
||||
* A function which is responsible for returning the isolation result back
|
||||
* to the framework for further checking. This function will be provided
|
||||
* the browser object of the tab, that allows modifying or fetching results
|
||||
* from the page content.
|
||||
* @param aGetResultFuncs
|
||||
* An array of functions or a single function which are responsible for
|
||||
* returning the isolation result back to the framework for further checking.
|
||||
* Each of these functions will be provided the browser object of the tab,
|
||||
* that allows modifying or fetchings results from the page content.
|
||||
* @param aCompareResultFunc
|
||||
* An optional function which allows modifying the way how does framework
|
||||
* check results. This function will be provided a boolean to indicate
|
||||
@ -271,7 +271,7 @@ this.IsolationTestTools = {
|
||||
* a boolean to tell that whether isolation is working. If this function
|
||||
* is not given, the framework will take case checking by itself.
|
||||
*/
|
||||
runTests(aURL, aGetResultFunc, aCompareResultFunc) {
|
||||
runTests(aURL, aGetResultFuncs, aCompareResultFunc) {
|
||||
let pageURL;
|
||||
let firstFrameSetting;
|
||||
let secondFrameSetting;
|
||||
@ -284,6 +284,10 @@ this.IsolationTestTools = {
|
||||
secondFrameSetting = aURL.secondFrameSetting;
|
||||
}
|
||||
|
||||
if (!Array.isArray(aGetResultFuncs)) {
|
||||
aGetResultFuncs = [aGetResultFuncs];
|
||||
}
|
||||
|
||||
let tabSettings = [
|
||||
{ firstPartyDomain: "http://example.com", userContextId: 1},
|
||||
{ firstPartyDomain: "http://example.org", userContextId: 2}
|
||||
@ -303,32 +307,34 @@ this.IsolationTestTools = {
|
||||
tabSettings[tabSettingB],
|
||||
secondFrameSetting);
|
||||
|
||||
// Fetch results from tabs.
|
||||
let resultA = yield aGetResultFunc(tabInfoA.browser);
|
||||
let resultB = yield aGetResultFunc(tabInfoB.browser);
|
||||
for (let getResultFunc of aGetResultFuncs) {
|
||||
// Fetch results from tabs.
|
||||
let resultA = yield getResultFunc(tabInfoA.browser);
|
||||
let resultB = yield getResultFunc(tabInfoB.browser);
|
||||
|
||||
// Compare results.
|
||||
let result = false;
|
||||
let shouldIsolate = (aMode !== TEST_MODE_NO_ISOLATION) &&
|
||||
tabSettingA !== tabSettingB;
|
||||
if (aCompareResultFunc) {
|
||||
result = yield aCompareResultFunc(shouldIsolate, resultA, resultB);
|
||||
} else {
|
||||
result = shouldIsolate ? resultA !== resultB :
|
||||
resultA === resultB;
|
||||
}
|
||||
|
||||
let msg = `Testing ${TEST_MODE_NAMES[aMode]} for ` +
|
||||
`isolation ${shouldIsolate ? "on" : "off"} with TabSettingA ` +
|
||||
`${tabSettingA} and tabSettingB ${tabSettingB}` +
|
||||
`, resultA = ${resultA}, resultB = ${resultB}`;
|
||||
|
||||
ok(result, msg);
|
||||
}
|
||||
|
||||
// Close Tabs.
|
||||
yield BrowserTestUtils.removeTab(tabInfoA.tab);
|
||||
yield BrowserTestUtils.removeTab(tabInfoB.tab);
|
||||
|
||||
// Compare results.
|
||||
let result = false;
|
||||
let shouldIsolate = (aMode !== TEST_MODE_NO_ISOLATION) &&
|
||||
tabSettingA !== tabSettingB;
|
||||
if (aCompareResultFunc) {
|
||||
result = yield aCompareResultFunc(shouldIsolate, resultA, resultB);
|
||||
} else {
|
||||
result = shouldIsolate ? resultA !== resultB :
|
||||
resultA === resultB;
|
||||
}
|
||||
|
||||
let msg = `Testing ${TEST_MODE_NAMES[aMode]} for ` +
|
||||
`isolation ${shouldIsolate ? "on" : "off"} with TabSettingA ` +
|
||||
`${tabSettingA} and tabSettingB ${tabSettingB}`;
|
||||
|
||||
ok(result, msg);
|
||||
}
|
||||
|
||||
});
|
||||
}
|
||||
};
|
||||
|
@ -0,0 +1,11 @@
|
||||
// Wait for a string to be posted to this worker.
|
||||
// Create a blob containing this string, and then
|
||||
// post back a blob URL pointing to the blob.
|
||||
self.addEventListener("message", function (e) {
|
||||
try {
|
||||
var blobURL = URL.createObjectURL(new Blob([e.data]));
|
||||
postMessage({ blobURL });
|
||||
} catch (e) {
|
||||
postMessage({ error: e.message });
|
||||
}
|
||||
}, false);
|
@ -0,0 +1,31 @@
|
||||
// Wait for a blob URL to be posted to this worker.
|
||||
// Obtain the blob, and read the string contained in it.
|
||||
// Post back the string.
|
||||
|
||||
var postStringInBlob = function (blobObject) {
|
||||
var fileReader = new FileReaderSync();
|
||||
var result = fileReader.readAsText(blobObject);
|
||||
postMessage(result);
|
||||
};
|
||||
|
||||
self.addEventListener("message", function (e) {
|
||||
if ("error" in e.data) {
|
||||
postMessage(e.data);
|
||||
return;
|
||||
}
|
||||
var blobURL = e.data.blobURL,
|
||||
xhr = new XMLHttpRequest();
|
||||
try {
|
||||
xhr.open("GET", blobURL, true);
|
||||
xhr.onload = function () {
|
||||
postStringInBlob(xhr.response);
|
||||
};
|
||||
xhr.onerror = function () {
|
||||
postMessage({ error: "xhr error" });
|
||||
};
|
||||
xhr.responseType = "blob";
|
||||
xhr.send();
|
||||
} catch (e) {
|
||||
postMessage({ error: e.message });
|
||||
}
|
||||
}, false);
|
@ -940,12 +940,6 @@ Experiments.Experiments.prototype = {
|
||||
_httpGetRequest: function (url) {
|
||||
this._log.trace("httpGetRequest(" + url + ")");
|
||||
let xhr = Cc["@mozilla.org/xmlextras/xmlhttprequest;1"].createInstance(Ci.nsIXMLHttpRequest);
|
||||
try {
|
||||
xhr.open("GET", url);
|
||||
} catch (e) {
|
||||
this._log.error("httpGetRequest() - Error opening request to " + url + ": " + e);
|
||||
return Promise.reject(new Error("Experiments - Error opening XHR for " + url));
|
||||
}
|
||||
|
||||
this._networkRequest = xhr;
|
||||
let deferred = Promise.defer();
|
||||
@ -972,12 +966,19 @@ Experiments.Experiments.prototype = {
|
||||
this._networkRequest = null;
|
||||
};
|
||||
|
||||
if (xhr.channel instanceof Ci.nsISupportsPriority) {
|
||||
xhr.channel.priority = Ci.nsISupportsPriority.PRIORITY_LOWEST;
|
||||
}
|
||||
try {
|
||||
xhr.open("GET", url);
|
||||
|
||||
xhr.timeout = MANIFEST_FETCH_TIMEOUT_MSEC;
|
||||
xhr.send(null);
|
||||
if (xhr.channel instanceof Ci.nsISupportsPriority) {
|
||||
xhr.channel.priority = Ci.nsISupportsPriority.PRIORITY_LOWEST;
|
||||
}
|
||||
|
||||
xhr.timeout = MANIFEST_FETCH_TIMEOUT_MSEC;
|
||||
xhr.send(null);
|
||||
} catch (e) {
|
||||
this._log.error("httpGetRequest() - Error opening request to " + url + ": " + e);
|
||||
return Promise.reject(new Error("Experiments - Error opening XHR for " + url));
|
||||
}
|
||||
return deferred.promise;
|
||||
},
|
||||
|
||||
|
@ -406,7 +406,7 @@
|
||||
padding: 3px 2px 1px;
|
||||
text-transform: none;
|
||||
}
|
||||
.pkt_ext_containersaved .pkt_ext_tag_input_wrapper input::-moz-placeholder {
|
||||
.pkt_ext_containersaved .pkt_ext_tag_input_wrapper input::placeholder {
|
||||
color: #a9a9a9;
|
||||
letter-spacing: normal;
|
||||
text-transform: none;
|
||||
|
@ -2330,7 +2330,7 @@ html|span.ac-emphasize-text-url {
|
||||
}
|
||||
}
|
||||
|
||||
#editBMPanel_tagsField > .autocomplete-textbox-container > .textbox-input-box > html|*.textbox-input::-moz-placeholder {
|
||||
#editBMPanel_tagsField > .autocomplete-textbox-container > .textbox-input-box > html|*.textbox-input::placeholder {
|
||||
opacity: 1.0;
|
||||
color: #bbb;
|
||||
}
|
||||
|
@ -85,6 +85,13 @@
|
||||
width: 28px;
|
||||
}
|
||||
|
||||
#newtab-customize-button {
|
||||
font-size: 28px;
|
||||
padding: 0;
|
||||
/* only display the text label when CSS backgrounds are disabled (e.g. in high contrast mode) */
|
||||
color: transparent;
|
||||
}
|
||||
|
||||
#newtab-customize-button:-moz-any(:hover, :active, [active]) {
|
||||
background-image: -moz-image-rect(url(chrome://browser/skin/newtab/controls.svg), 0, 64, 32, 32);
|
||||
background-color: #FFFFFF;
|
||||
|
@ -1357,8 +1357,8 @@ toolbar[brighttext] #close-button {
|
||||
-moz-box-direction: reverse;
|
||||
}
|
||||
|
||||
html|*.urlbar-input:-moz-lwtheme::-moz-placeholder,
|
||||
.searchbar-textbox:-moz-lwtheme > .autocomplete-textbox-container > .textbox-input-box > html|*.textbox-input::-moz-placeholder {
|
||||
html|*.urlbar-input:-moz-lwtheme::placeholder,
|
||||
.searchbar-textbox:-moz-lwtheme > .autocomplete-textbox-container > .textbox-input-box > html|*.textbox-input::placeholder {
|
||||
opacity: 1.0;
|
||||
color: #777;
|
||||
}
|
||||
|
@ -1854,7 +1854,7 @@ output {
|
||||
border-color: #66afe9;
|
||||
outline: 0;
|
||||
box-shadow: inset 0 1px 1px rgba(0, 0, 0, 0.075), 0 0 8px rgba(102, 175, 233, 0.6); }
|
||||
.form-control::-moz-placeholder {
|
||||
.form-control::placeholder {
|
||||
color: #999999;
|
||||
opacity: 1; }
|
||||
.form-control:-ms-input-placeholder {
|
||||
@ -4510,4 +4510,4 @@ body {
|
||||
.jumbotron {
|
||||
border-bottom: 300; } }
|
||||
|
||||
/*# sourceMappingURL=data:application/json;base64,eyJ2ZXJzaW9uIjozLCJmaWxlIjoibWFpbi5jc3MiLCJuYW1lcyI6W10sIm1hcHBpbmdzIjoiIiwic291cmNlcyI6WyJtYWluLnNjc3MiXSwic291cmNlc0NvbnRlbnQiOlsiJGljb24tZm9udC1wYXRoOiBcIi4uL2Jvd2VyX2NvbXBvbmVudHMvYm9vdHN0cmFwLXNhc3Mtb2ZmaWNpYWwvdmVuZG9yL2Fzc2V0cy9mb250cy9ib290c3RyYXAvXCI7XG5cbi8vIGJvd2VyOnNjc3NcbkBpbXBvcnQgXCIuLi9ib3dlcl9jb21wb25lbnRzL2Jvb3RzdHJhcC1zYXNzLW9mZmljaWFsL3ZlbmRvci9hc3NldHMvc3R5bGVzaGVldHMvYm9vdHN0cmFwLnNjc3NcIjtcbi8vIGVuZGJvd2VyXG5cbi5icm93c2VoYXBweSB7XG4gICAgbWFyZ2luOiAwLjJlbSAwO1xuICAgIGJhY2tncm91bmQ6ICNjY2M7XG4gICAgY29sb3I6ICMwMDA7XG4gICAgcGFkZGluZzogMC4yZW0gMDtcbn1cblxuLyogU3BhY2Ugb3V0IGNvbnRlbnQgYSBiaXQgKi9cbmJvZHkge1xuICAgIHBhZGRpbmctdG9wOiAyMHB4O1xuICAgIHBhZGRpbmctYm90dG9tOiAyMHB4O1xufVxuXG4vKiBFdmVyeXRoaW5nIGJ1dCB0aGUganVtYm90cm9uIGdldHMgc2lkZSBzcGFjaW5nIGZvciBtb2JpbGUgZmlyc3Qgdmlld3MgKi9cbi5oZWFkZXIsXG4ubWFya2V0aW5nLFxuLmZvb3RlciB7XG4gICAgcGFkZGluZy1sZWZ0OiAxNXB4O1xuICAgIHBhZGRpbmctcmlnaHQ6IDE1cHg7XG59XG5cbi8qIEN1c3RvbSBwYWdlIGhlYWRlciAqL1xuLmhlYWRlciB7XG4gICAgYm9yZGVyLWJvdHRvbTogMXB4IHNvbGlkICNlNWU1ZTU7XG5cbiAgICAvKiBNYWtlIHRoZSBtYXN0aGVhZCBoZWFkaW5nIHRoZSBzYW1lIGhlaWdodCBhcyB0aGUgbmF2aWdhdGlvbiAqL1xuICAgIGgzIHtcbiAgICAgICAgbWFyZ2luLXRvcDogMDtcbiAgICAgICAgbWFyZ2luLWJvdHRvbTogMDtcbiAgICAgICAgbGluZS1oZWlnaHQ6IDQwcHg7XG4gICAgICAgIHBhZGRpbmctYm90dG9tOiAxOXB4O1xuICAgIH1cbn1cblxuLyogQ3VzdG9tIHBhZ2UgZm9vdGVyICovXG4uZm9vdGVyIHtcbiAgICBwYWRkaW5nLXRvcDogMTlweDtcbiAgICBjb2xvcjogIzc3NztcbiAgICBib3JkZXItdG9wOiAxcHggc29saWQgI2U1ZTVlNTtcbn1cblxuLmNvbnRhaW5lci1uYXJyb3cgPiBociB7XG4gICAgbWFyZ2luOiAzMHB4IDA7XG59XG5cbi8qIE1haW4gbWFya2V0aW5nIG1lc3NhZ2UgYW5kIHNpZ24gdXAgYnV0dG9uICovXG4uanVtYm90cm9uIHtcbiAgICB0ZXh0LWFsaWduOiBjZW50ZXI7XG4gICAgYm9yZGVyLWJvdHRvbTogMXB4IHNvbGlkICNlNWU1ZTU7XG4gICAgLmJ0biB7XG4gICAgICAgIGZvbnQtc2l6ZTogMjFweDtcbiAgICAgICAgcGFkZGluZzogMTRweCAyNHB4O1xuICAgIH1cbn1cblxuLyogU3VwcG9ydGluZyBtYXJrZXRpbmcgY29udGVudCAqL1xuLm1hcmtldGluZyB7XG4gICAgbWFyZ2luOiA0MHB4IDA7XG4gICAgcCArIGg0IHtcbiAgICAgICAgbWFyZ2luLXRvcDogMjhweDtcbiAgICB9XG59XG5cbi8qIFJlc3BvbnNpdmU6IFBvcnRyYWl0IHRhYmxldHMgYW5kIHVwICovXG5AbWVkaWEgc2NyZWVuIGFuZCAobWluLXdpZHRoOiA3NjhweCkge1xuICAgIC5jb250YWluZXIge1xuICAgICAgICBtYXgtd2lkdGg6IDczMHB4O1xuICAgIH1cblxuICAgIC8qIFJlbW92ZSB0aGUgcGFkZGluZyB3ZSBzZXQgZWFybGllciAqL1xuICAgIC5oZWFkZXIsXG4gICAgLm1hcmtldGluZyxcbiAgICAuZm9vdGVyIHtcbiAgICAgICAgcGFkZGluZy1sZWZ0OiAwO1xuICAgICAgICBwYWRkaW5nLXJpZ2h0OiAwO1xuICAgIH1cblxuICAgIC8qIFNwYWNlIG91dCB0aGUgbWFzdGhlYWQgKi9cbiAgICAuaGVhZGVyIHtcbiAgICAgICAgbWFyZ2luLWJvdHRvbTogMzBweDtcbiAgICB9XG5cbiAgICAvKiBSZW1vdmUgdGhlIGJvdHRvbSBib3JkZXIgb24gdGhlIGp1bWJvdHJvbiBmb3IgdmlzdWFsIGVmZmVjdCAqL1xuICAgIC5qdW1ib3Ryb24ge1xuICAgICAgICBib3JkZXItYm90dG9tOiAzMDA7XG4gICAgfVxufVxuXG4vLyB0aGlzIGlzIGEgY29tbWVudC4uLlxuIl0sInNvdXJjZVJvb3QiOiIvc291cmNlLyJ9 */
|
||||
/*# sourceMappingURL=data:application/json;base64,eyJ2ZXJzaW9uIjozLCJmaWxlIjoibWFpbi5jc3MiLCJuYW1lcyI6W10sIm1hcHBpbmdzIjoiIiwic291cmNlcyI6WyJtYWluLnNjc3MiXSwic291cmNlc0NvbnRlbnQiOlsiJGljb24tZm9udC1wYXRoOiBcIi4uL2Jvd2VyX2NvbXBvbmVudHMvYm9vdHN0cmFwLXNhc3Mtb2ZmaWNpYWwvdmVuZG9yL2Fzc2V0cy9mb250cy9ib290c3RyYXAvXCI7XG5cbi8vIGJvd2VyOnNjc3NcbkBpbXBvcnQgXCIuLi9ib3dlcl9jb21wb25lbnRzL2Jvb3RzdHJhcC1zYXNzLW9mZmljaWFsL3ZlbmRvci9hc3NldHMvc3R5bGVzaGVldHMvYm9vdHN0cmFwLnNjc3NcIjtcbi8vIGVuZGJvd2VyXG5cbi5icm93c2VoYXBweSB7XG4gICAgbWFyZ2luOiAwLjJlbSAwO1xuICAgIGJhY2tncm91bmQ6ICNjY2M7XG4gICAgY29sb3I6ICMwMDA7XG4gICAgcGFkZGluZzogMC4yZW0gMDtcbn1cblxuLyogU3BhY2Ugb3V0IGNvbnRlbnQgYSBiaXQgKi9cbmJvZHkge1xuICAgIHBhZGRpbmctdG9wOiAyMHB4O1xuICAgIHBhZGRpbmctYm90dG9tOiAyMHB4O1xufVxuXG4vKiBFdmVyeXRoaW5nIGJ1dCB0aGUganVtYm90cm9uIGdldHMgc2lkZSBzcGFjaW5nIGZvciBtb2JpbGUgZmlyc3Qgdmlld3MgKi9cbi5oZWFkZXIsXG4ubWFya2V0aW5nLFxuLmZvb3RlciB7XG4gICAgcGFkZGluZy1sZWZ0OiAxNXB4O1xuICAgIHBhZGRpbmctcmlnaHQ6IDE1cHg7XG59XG5cbi8qIEN1c3RvbSBwYWdlIGhlYWRlciAqL1xuLmhlYWRlciB7XG4gICAgYm9yZGVyLWJvdHRvbTogMXB4IHNvbGlkICNlNWU1ZTU7XG5cbiAgICAvKiBNYWtlIHRoZSBtYXN0aGVhZCBoZWFkaW5nIHRoZSBzYW1lIGhlaWdodCBhcyB0aGUgbmF2aWdhdGlvbiAqL1xuICAgIGgzIHtcbiAgICAgICAgbWFyZ2luLXRvcDogMDtcbiAgICAgICAgbWFyZ2luLWJvdHRvbTogMDtcbiAgICAgICAgbGluZS1oZWlnaHQ6IDQwcHg7XG4gICAgICAgIHBhZGRpbmctYm90dG9tOiAxOXB4O1xuICAgIH1cbn1cblxuLyogQ3VzdG9tIHBhZ2UgZm9vdGVyICovXG4uZm9vdGVyIHtcbiAgICBwYWRkaW5nLXRvcDogMTlweDtcbiAgICBjb2xvcjogIzc3NztcbiAgICBib3JkZXItdG9wOiAxcHggc29saWQgI2U1ZTVlNTtcbn1cblxuLmNvbnRhaW5lci1uYXJyb3cgPiBociB7XG4gICAgbWFyZ2luOiAzMHB4IDA7XG59XG5cbi8qIE1haW4gbWFya2V0aW5nIG1lc3NhZ2UgYW5kIHNpZ24gdXAgYnV0dG9uICovXG4uanVtYm90cm9uIHtcbiAgICB0ZXh0LWFsaWduOiBjZW50ZXI7XG4gICAgYm9yZGVyLWJvdHRvbTogMXB4IHNvbGlkICNlNWU1ZTU7XG4gICAgLmJ0biB7XG4gICAgICAgIGZvbnQtc2l6ZTogMjFweDtcbiAgICAgICAgcGFkZGluZzogMTRweCAyNHB4O1xuICAgIH1cbn1cblxuLyogU3VwcG9ydGluZyBtYXJrZXRpbmcgY29udGVudCAqL1xuLm1hcmtldGluZyB7XG4gICAgbWFyZ2luOiA0MHB4IDA7XG4gICAgcCArIGg0IHtcbiAgICAgICAgbWFyZ2luLXRvcDogMjhweDtcbiAgICB9XG59XG5cbi8qIFJlc3BvbnNpdmU6IFBvcnRyYWl0IHRhYmxldHMgYW5kIHVwICovXG5AbWVkaWEgc2NyZWVuIGFuZCAobWluLXdpZHRoOiA3NjhweCkge1xuICAgIC5jb250YWluZXIge1xuICAgICAgICBtYXgtd2lkdGg6IDczMHB4O1xuICAgIH1cblxuICAgIC8qIFJlbW92ZSB0aGUgcGFkZGluZyB3ZSBzZXQgZWFybGllciAqL1xuICAgIC5oZWFkZXIsXG4gICAgLm1hcmtldGluZyxcbiAgICAuZm9vdGVyIHtcbiAgICAgICAgcGFkZGluZy1sZWZ0OiAwO1xuICAgICAgICBwYWRkaW5nLXJpZ2h0OiAwO1xuICAgIH1cblxuICAgIC8qIFNwYWNlIG91dCB0aGUgbWFzdGhlYWQgKi9cbiAgICAuaGVhZGVyIHtcbiAgICAgICAgbWFyZ2luLWJvdHRvbTogMzBweDtcbiAgICB9XG5cbiAgICAvKiBSZW1vdmUgdGhlIGJvdHRvbSBib3JkZXIgb24gdGhlIGp1bWJvdHJvbiBmb3IgdmlzdWFsIGVmZmVjdCAqL1xuICAgIC5qdW1ib3Ryb24ge1xuICAgICAgICBib3JkZXItYm90dG9tOiAzMDA7XG4gICAgfVxufVxuXG4vLyB0aGlzIGlzIGEgY29tbWVudC4uLlxuIl0sInNvdXJjZVJvb3QiOiIvc291cmNlLyJ9 */
|
||||
|
@ -556,8 +556,8 @@ checkbox:-moz-focusring {
|
||||
visibility: hidden;
|
||||
}
|
||||
|
||||
.devtools-searchinput .textbox-input::-moz-placeholder,
|
||||
.devtools-filterinput .textbox-input::-moz-placeholder {
|
||||
.devtools-searchinput .textbox-input::placeholder,
|
||||
.devtools-filterinput .textbox-input::placeholder {
|
||||
font-style: normal;
|
||||
}
|
||||
|
||||
|
@ -72,12 +72,12 @@ AppValidator.checkManifest = function (manifestURL) {
|
||||
|
||||
try {
|
||||
req.open("GET", manifestURL, true);
|
||||
req.channel.loadFlags |= Ci.nsIRequest.LOAD_BYPASS_CACHE | Ci.nsIRequest.INHIBIT_CACHING;
|
||||
} catch (e) {
|
||||
error = strings.formatStringFromName("validator.invalidManifestURL", [manifestURL], 1);
|
||||
deferred.reject(error);
|
||||
return deferred.promise;
|
||||
}
|
||||
req.channel.loadFlags |= Ci.nsIRequest.LOAD_BYPASS_CACHE | Ci.nsIRequest.INHIBIT_CACHING;
|
||||
|
||||
req.onload = function () {
|
||||
let manifest = null;
|
||||
@ -228,12 +228,12 @@ AppValidator.prototype.validateLaunchPath = function (manifest) {
|
||||
req.overrideMimeType("text/plain");
|
||||
try {
|
||||
req.open("HEAD", indexURL, true);
|
||||
req.channel.loadFlags |= Ci.nsIRequest.LOAD_BYPASS_CACHE | Ci.nsIRequest.INHIBIT_CACHING;
|
||||
} catch (e) {
|
||||
this.error(strings.formatStringFromName("validator.accessFailedLaunchPath", [indexURL], 1));
|
||||
deferred.resolve();
|
||||
return deferred.promise;
|
||||
}
|
||||
req.channel.loadFlags |= Ci.nsIRequest.LOAD_BYPASS_CACHE | Ci.nsIRequest.INHIBIT_CACHING;
|
||||
req.onload = () => {
|
||||
if (req.status >= 400)
|
||||
this.error(strings.formatStringFromName("validator.accessFailedLaunchPathBadHttpCode", [indexURL, req.status], 2));
|
||||
|
@ -63,7 +63,7 @@ console.log("The PSEUDO_ELEMENTS have been copied to your clipboard.")
|
||||
*/
|
||||
|
||||
/*eslint-disable */
|
||||
exports.PSEUDO_ELEMENTS = [":after",":before",":backdrop",":first-letter",":first-line",":-moz-selection",":-moz-focus-inner",":-moz-focus-outer",":-moz-list-bullet",":-moz-list-number",":-moz-math-anonymous",":-moz-progress-bar",":-moz-range-track",":-moz-range-progress",":-moz-range-thumb",":-moz-meter-bar",":-moz-placeholder",":-moz-color-swatch"];
|
||||
exports.PSEUDO_ELEMENTS = [":after",":before",":backdrop",":first-letter",":first-line",":-moz-selection",":-moz-focus-inner",":-moz-focus-outer",":-moz-list-bullet",":-moz-list-number",":-moz-math-anonymous",":-moz-progress-bar",":-moz-range-track",":-moz-range-progress",":-moz-range-thumb",":-moz-meter-bar",":-moz-placeholder",":placeholder",":-moz-color-swatch"];
|
||||
/*eslint-enable */
|
||||
|
||||
/**
|
||||
|
@ -106,12 +106,6 @@ exports.items = [
|
||||
|
||||
let xhr = new XMLHttpRequest();
|
||||
|
||||
try {
|
||||
xhr.open("GET", args.url, true);
|
||||
} catch(e) {
|
||||
return l10n.lookup("jsbInvalidURL");
|
||||
}
|
||||
|
||||
let deferred = context.defer();
|
||||
|
||||
xhr.onreadystatechange = function() {
|
||||
@ -128,7 +122,12 @@ exports.items = [
|
||||
}
|
||||
};
|
||||
}
|
||||
xhr.send(null);
|
||||
try {
|
||||
xhr.open("GET", args.url, true);
|
||||
xhr.send(null);
|
||||
} catch(e) {
|
||||
return l10n.lookup("jsbInvalidURL");
|
||||
}
|
||||
return deferred.promise;
|
||||
}
|
||||
}
|
||||
|
@ -201,7 +201,6 @@ Exception::Exception(const nsACString& aMessage,
|
||||
nsIStackFrame *aLocation,
|
||||
nsISupports *aData)
|
||||
: mResult(NS_OK),
|
||||
mLineNumber(0),
|
||||
mInitialized(false),
|
||||
mHoldingJSVal(false)
|
||||
{
|
||||
@ -223,7 +222,6 @@ Exception::Exception(const nsACString& aMessage,
|
||||
|
||||
Exception::Exception()
|
||||
: mResult(NS_OK),
|
||||
mLineNumber(-1),
|
||||
mInitialized(false),
|
||||
mHoldingJSVal(false)
|
||||
{
|
||||
@ -316,7 +314,7 @@ Exception::GetFilename(JSContext* aCx, nsAString& aFilename)
|
||||
return mLocation->GetFilename(aCx, aFilename);
|
||||
}
|
||||
|
||||
aFilename.Assign(mFilename);
|
||||
aFilename.Truncate();
|
||||
return NS_OK;
|
||||
}
|
||||
|
||||
@ -333,7 +331,7 @@ Exception::GetLineNumber(JSContext* aCx, uint32_t *aLineNumber)
|
||||
return rv;
|
||||
}
|
||||
|
||||
*aLineNumber = mLineNumber;
|
||||
*aLineNumber = 0;
|
||||
return NS_OK;
|
||||
}
|
||||
|
||||
@ -484,7 +482,7 @@ Exception::LineNumber(JSContext* aCx) const
|
||||
return 0;
|
||||
}
|
||||
|
||||
return mLineNumber;
|
||||
return 0;
|
||||
}
|
||||
|
||||
uint32_t
|
||||
|
@ -136,8 +136,6 @@ protected:
|
||||
nsCString mName;
|
||||
nsCOMPtr<nsIStackFrame> mLocation;
|
||||
nsCOMPtr<nsISupports> mData;
|
||||
nsString mFilename;
|
||||
int mLineNumber;
|
||||
bool mInitialized;
|
||||
|
||||
bool mHoldingJSVal;
|
||||
|
@ -15,7 +15,6 @@
|
||||
#include "mozilla/dom/File.h"
|
||||
#include "mozilla/dom/MessageEvent.h"
|
||||
#include "mozilla/dom/MessageEventBinding.h"
|
||||
#include "mozilla/dom/nsCSPService.h"
|
||||
#include "mozilla/dom/nsCSPContext.h"
|
||||
#include "mozilla/dom/nsCSPUtils.h"
|
||||
#include "mozilla/dom/ScriptSettings.h"
|
||||
@ -1568,27 +1567,28 @@ WebSocketImpl::Init(JSContext* aCx,
|
||||
}
|
||||
|
||||
// The 'real' nsHttpChannel of the websocket gets opened in the parent.
|
||||
// Since we don't serialize the CSP within child and parent we have to
|
||||
// perform the CSP check here instead of AsyncOpen2().
|
||||
// Since we don't serialize the CSP within child and parent and also not
|
||||
// the context, we have to perform content policy checks here instead of
|
||||
// AsyncOpen2().
|
||||
// Please note that websockets can't follow redirects, hence there is no
|
||||
// need to perform a CSP check after redirects.
|
||||
nsCOMPtr<nsIContentPolicy> cspService = do_GetService(CSPSERVICE_CONTRACTID);
|
||||
int16_t shouldLoad = nsIContentPolicy::REJECT_REQUEST;
|
||||
aRv = cspService->ShouldLoad(nsIContentPolicy::TYPE_WEBSOCKET,
|
||||
uri,
|
||||
nullptr, // aRequestOrigin not used within CSP
|
||||
originDoc,
|
||||
EmptyCString(), // aMimeTypeGuess
|
||||
nullptr, // aExtra
|
||||
aPrincipal,
|
||||
&shouldLoad);
|
||||
int16_t shouldLoad = nsIContentPolicy::ACCEPT;
|
||||
aRv = NS_CheckContentLoadPolicy(nsIContentPolicy::TYPE_WEBSOCKET,
|
||||
uri,
|
||||
aPrincipal,
|
||||
originDoc,
|
||||
EmptyCString(),
|
||||
nullptr,
|
||||
&shouldLoad,
|
||||
nsContentUtils::GetContentPolicy(),
|
||||
nsContentUtils::GetSecurityManager());
|
||||
|
||||
if (NS_WARN_IF(aRv.Failed())) {
|
||||
return;
|
||||
}
|
||||
|
||||
if (NS_CP_REJECTED(shouldLoad)) {
|
||||
// Disallowed by CSP
|
||||
// Disallowed by content policy
|
||||
aRv.Throw(NS_ERROR_CONTENT_BLOCKED);
|
||||
return;
|
||||
}
|
||||
|
@ -24,9 +24,9 @@ onmessage = function(e) {
|
||||
document.body.removeChild(iframe);
|
||||
}).then(function() {
|
||||
var xhr = new XMLHttpRequest();
|
||||
|
||||
xhr.open("GET", blobURL);
|
||||
try {
|
||||
xhr.open("GET", blobURL);
|
||||
xhr.send();
|
||||
ok(false, "The URL should be done!");
|
||||
} catch(e) {
|
||||
ok(true, "The URL should be done!");
|
||||
|
@ -34,20 +34,20 @@ function runTest() {
|
||||
URL.revokeObjectURL(url + test.part);
|
||||
|
||||
var xhr = new XMLHttpRequest();
|
||||
try {
|
||||
xhr.open('GET', url + test.part);
|
||||
} catch(e) {
|
||||
ok(test.revoke, "This should fail!");
|
||||
runTest();
|
||||
return;
|
||||
}
|
||||
|
||||
xhr.onload = function() {
|
||||
is(xhr.responseText, 'hello world', 'URL: ' + url + test.part);
|
||||
runTest();
|
||||
}
|
||||
|
||||
xhr.send();
|
||||
try {
|
||||
xhr.send();
|
||||
} catch(e) {
|
||||
ok(test.revoke, "This should fail!");
|
||||
runTest();
|
||||
return;
|
||||
}
|
||||
ok(!test.revoke, "This should succeed!");
|
||||
}
|
||||
|
||||
|
@ -157,6 +157,8 @@ parent:
|
||||
|
||||
async ReturnSitesWithData(nsCString[] aSites, uint64_t aCallbackId);
|
||||
|
||||
intr GetKeyState(int32_t aVirtKey)
|
||||
returns (int16_t aState);
|
||||
};
|
||||
|
||||
} // namespace plugins
|
||||
|
@ -90,6 +90,9 @@ static WindowsDllInterceptor sUser32Intercept;
|
||||
typedef BOOL (WINAPI *GetWindowInfoPtr)(HWND hwnd, PWINDOWINFO pwi);
|
||||
static GetWindowInfoPtr sGetWindowInfoPtrStub = nullptr;
|
||||
static HWND sBrowserHwnd = nullptr;
|
||||
// sandbox process doesn't get current key states. So we need get it on chrome.
|
||||
typedef SHORT (WINAPI *GetKeyStatePtr)(int);
|
||||
static GetKeyStatePtr sGetKeyStatePtrStub = nullptr;
|
||||
#endif
|
||||
|
||||
/* static */
|
||||
@ -2066,6 +2069,20 @@ PMCGetWindowInfoHook(HWND hWnd, PWINDOWINFO pwi)
|
||||
pwi->rcWindow = pwi->rcClient;
|
||||
return result;
|
||||
}
|
||||
|
||||
// static
|
||||
SHORT WINAPI
|
||||
PMCGetKeyState(int aVirtKey)
|
||||
{
|
||||
PluginModuleChild* chromeInstance = PluginModuleChild::GetChrome();
|
||||
if (chromeInstance) {
|
||||
int16_t ret = 0;
|
||||
if (chromeInstance->CallGetKeyState(aVirtKey, &ret)) {
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
return sGetKeyStatePtrStub(aVirtKey);
|
||||
}
|
||||
#endif
|
||||
|
||||
PPluginInstanceChild*
|
||||
@ -2086,12 +2103,18 @@ PluginModuleChild::AllocPPluginInstanceChild(const nsCString& aMimeType,
|
||||
mQuirks = GetChrome()->mQuirks;
|
||||
|
||||
#ifdef XP_WIN
|
||||
sUser32Intercept.Init("user32.dll");
|
||||
if ((mQuirks & QUIRK_FLASH_HOOK_GETWINDOWINFO) &&
|
||||
!sGetWindowInfoPtrStub) {
|
||||
sUser32Intercept.Init("user32.dll");
|
||||
sUser32Intercept.AddHook("GetWindowInfo", reinterpret_cast<intptr_t>(PMCGetWindowInfoHook),
|
||||
(void**) &sGetWindowInfoPtrStub);
|
||||
}
|
||||
|
||||
if ((mQuirks & QUIRK_FLASH_HOOK_GETKEYSTATE) &&
|
||||
!sGetKeyStatePtrStub) {
|
||||
sUser32Intercept.AddHook("GetKeyState", reinterpret_cast<intptr_t>(PMCGetKeyState),
|
||||
(void**) &sGetKeyStatePtrStub);
|
||||
}
|
||||
#endif
|
||||
|
||||
return new PluginInstanceChild(&mFunctions, aMimeType, aMode, aNames,
|
||||
|
@ -3365,4 +3365,20 @@ PluginModuleChromeParent::RecvProfile(const nsCString& aProfile)
|
||||
return true;
|
||||
}
|
||||
|
||||
bool
|
||||
PluginModuleParent::AnswerGetKeyState(const int32_t& aVirtKey, int16_t* aRet)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
bool
|
||||
PluginModuleChromeParent::AnswerGetKeyState(const int32_t& aVirtKey,
|
||||
int16_t* aRet)
|
||||
{
|
||||
#if defined(XP_WIN)
|
||||
*aRet = ::GetKeyState(aVirtKey);
|
||||
return true;
|
||||
#else
|
||||
return PluginModuleParent::AnswerGetKeyState(aVirtKey, aRet);
|
||||
#endif
|
||||
}
|
||||
|
@ -208,6 +208,8 @@ protected:
|
||||
|
||||
virtual bool RecvProfile(const nsCString& aProfile) override { return true; }
|
||||
|
||||
virtual bool AnswerGetKeyState(const int32_t& aVirtKey, int16_t* aRet) override;
|
||||
|
||||
virtual bool RecvReturnClearSiteData(const NPError& aRv,
|
||||
const uint64_t& aCallbackId) override;
|
||||
|
||||
@ -498,6 +500,9 @@ class PluginModuleChromeParent
|
||||
virtual bool
|
||||
RecvProfile(const nsCString& aProfile) override;
|
||||
|
||||
virtual bool
|
||||
AnswerGetKeyState(const int32_t& aVirtKey, int16_t* aRet) override;
|
||||
|
||||
private:
|
||||
virtual void
|
||||
EnteredCxxStack() override;
|
||||
|
@ -35,6 +35,9 @@ int GetQuirksFromMimeTypeAndFilename(const nsCString& aMimeType,
|
||||
quirks |= QUIRK_FLASH_HOOK_GETWINDOWINFO;
|
||||
quirks |= QUIRK_FLASH_FIXUP_MOUSE_CAPTURE;
|
||||
quirks |= QUIRK_WINLESS_HOOK_IME;
|
||||
#if defined(_M_X64) || defined(__x86_64__)
|
||||
quirks |= QUIRK_FLASH_HOOK_GETKEYSTATE;
|
||||
#endif
|
||||
#endif
|
||||
}
|
||||
|
||||
|
@ -55,6 +55,8 @@ enum PluginQuirks {
|
||||
QUIRK_UNITY_FIXUP_MOUSE_CAPTURE = 1 << 11,
|
||||
// Win: Hook IMM32 API to handle IME event on windowless plugin
|
||||
QUIRK_WINLESS_HOOK_IME = 1 << 12,
|
||||
// Win: Hook GetKeyState to get keyboard state on sandbox process
|
||||
QUIRK_FLASH_HOOK_GETKEYSTATE = 1 << 13,
|
||||
};
|
||||
|
||||
int GetQuirksFromMimeTypeAndFilename(const nsCString& aMimeType,
|
||||
|
@ -497,6 +497,7 @@ WifiGeoPositionProvider.prototype = {
|
||||
|
||||
try {
|
||||
xhr.open("POST", url, true);
|
||||
xhr.channel.loadFlags = Ci.nsIChannel.LOAD_ANONYMOUS;
|
||||
} catch (e) {
|
||||
this.notifyListener("notifyError",
|
||||
[POSITION_UNAVAILABLE]);
|
||||
@ -505,7 +506,6 @@ WifiGeoPositionProvider.prototype = {
|
||||
xhr.setRequestHeader("Content-Type", "application/json; charset=UTF-8");
|
||||
xhr.responseType = "json";
|
||||
xhr.mozBackgroundRequest = true;
|
||||
xhr.channel.loadFlags = Ci.nsIChannel.LOAD_ANONYMOUS;
|
||||
xhr.timeout = Services.prefs.getIntPref("geo.wifi.xhr.timeout");
|
||||
xhr.ontimeout = (function() {
|
||||
LOG("Location request XHR timed out.")
|
||||
|
@ -1526,11 +1526,11 @@ XMLHttpRequestMainThread::OpenInternal(const nsACString& aMethod,
|
||||
mFlagAborted = false;
|
||||
mFlagTimedOut = false;
|
||||
|
||||
// The channel should really be created on send(), but we have a chrome-only
|
||||
// XHR.channel API which necessitates creating the channel now, while doing
|
||||
// the rest of the channel-setup later at send-time.
|
||||
rv = CreateChannel();
|
||||
NS_ENSURE_SUCCESS(rv, rv);
|
||||
// Per spec we should only create the channel on send(), but we have internal
|
||||
// code that relies on the channel being created now, and that code is not
|
||||
// always IsSystemXHR(). However, we're not supposed to throw channel-creation
|
||||
// errors during open(), so we silently ignore those here.
|
||||
CreateChannel();
|
||||
|
||||
// Step 12
|
||||
if (mState != State::opened) {
|
||||
@ -2632,7 +2632,7 @@ XMLHttpRequestMainThread::InitiateFetch(nsIInputStream* aUploadStream,
|
||||
|
||||
// Per spec, we throw on sync errors, but not async.
|
||||
if (mFlagSynchronous) {
|
||||
return rv;
|
||||
return NS_ERROR_DOM_NETWORK_ERR;
|
||||
}
|
||||
}
|
||||
|
||||
@ -2724,19 +2724,25 @@ XMLHttpRequestMainThread::SendInternal(const RequestBodyBase* aBody)
|
||||
{
|
||||
NS_ENSURE_TRUE(mPrincipal, NS_ERROR_NOT_INITIALIZED);
|
||||
|
||||
PopulateNetworkInterfaceId();
|
||||
// Steps 1 and 2
|
||||
if (mState != State::opened || mFlagSend) {
|
||||
return NS_ERROR_DOM_INVALID_STATE_ERR;
|
||||
}
|
||||
|
||||
nsresult rv = CheckInnerWindowCorrectness();
|
||||
if (NS_FAILED(rv)) {
|
||||
return NS_ERROR_DOM_INVALID_STATE_ERR;
|
||||
}
|
||||
|
||||
if (mState != State::opened || // Step 1
|
||||
mFlagSend || // Step 2
|
||||
!mChannel) { // Gecko-specific
|
||||
return NS_ERROR_DOM_INVALID_STATE_ERR;
|
||||
// If open() failed to create the channel, then throw a network error
|
||||
// as per spec. We really should create the channel here in send(), but
|
||||
// we have internal code relying on the channel being created in open().
|
||||
if (!mChannel) {
|
||||
return NS_ERROR_DOM_NETWORK_ERR;
|
||||
}
|
||||
|
||||
PopulateNetworkInterfaceId();
|
||||
|
||||
// XXX We should probably send a warning to the JS console
|
||||
// if there are no event listeners set and we are doing
|
||||
// an asynchronous call.
|
||||
@ -2893,7 +2899,7 @@ XMLHttpRequestMainThread::SendInternal(const RequestBodyBase* aBody)
|
||||
if (!mChannel) {
|
||||
// Per spec, silently fail on async request failures; throw for sync.
|
||||
if (mFlagSynchronous) {
|
||||
return NS_ERROR_FAILURE;
|
||||
return NS_ERROR_DOM_NETWORK_ERR;
|
||||
} else {
|
||||
// Defer the actual sending of async events just in case listeners
|
||||
// are attached after the send() method is called.
|
||||
|
@ -2,7 +2,7 @@
|
||||
@namespace html url('http://www.w3.org/1999/xhtml');
|
||||
|
||||
/* We need to have a non-transparent placeholder so we can test it. */
|
||||
html|input::-moz-placeholder {
|
||||
html|input::placeholder {
|
||||
opacity: 1.0;
|
||||
color: graytext;
|
||||
}
|
||||
}
|
||||
|
@ -1111,8 +1111,9 @@ nsWindowWatcher::OpenWindowInternal(mozIDOMWindowProxy* aParent,
|
||||
auto* docShell = static_cast<nsDocShell*>(newDocShell.get());
|
||||
|
||||
// If this is not a chrome docShell, we apply originAttributes from the
|
||||
// subjectPrincipal.
|
||||
// subjectPrincipal unless if it's an expanded principal.
|
||||
if (subjectPrincipal &&
|
||||
!subjectPrincipal->GetIsExpandedPrincipal() &&
|
||||
docShell->ItemType() != nsIDocShellTreeItem::typeChrome) {
|
||||
DocShellOriginAttributes attrs;
|
||||
attrs.InheritFromDocToChildDocShell(BasePrincipal::Cast(subjectPrincipal)->OriginAttributesRef());
|
||||
|
@ -329,6 +329,9 @@ CalculateDistanceToEllipticArc(const Point& P, const Point& normal,
|
||||
Float n1 = (- B + S) / A;
|
||||
Float n2 = (- B - S) / A;
|
||||
|
||||
MOZ_ASSERT(n1 >= 0);
|
||||
MOZ_ASSERT(n2 >= 0);
|
||||
|
||||
return n1 < n2 ? n1 : n2;
|
||||
}
|
||||
|
||||
|
@ -162,6 +162,11 @@ struct LayerPropertiesBase : public LayerProperties
|
||||
MOZ_COUNT_DTOR(LayerPropertiesBase);
|
||||
}
|
||||
|
||||
protected:
|
||||
LayerPropertiesBase(const LayerPropertiesBase& a) = delete;
|
||||
LayerPropertiesBase& operator=(const LayerPropertiesBase& a) = delete;
|
||||
|
||||
public:
|
||||
virtual nsIntRegion ComputeDifferences(Layer* aRoot,
|
||||
NotifySubDocInvalidationFunc aCallback,
|
||||
bool* aGeometryChanged);
|
||||
@ -282,6 +287,11 @@ struct ContainerLayerProperties : public LayerPropertiesBase
|
||||
}
|
||||
}
|
||||
|
||||
protected:
|
||||
ContainerLayerProperties(const ContainerLayerProperties& a) = delete;
|
||||
ContainerLayerProperties& operator=(const ContainerLayerProperties& a) = delete;
|
||||
|
||||
public:
|
||||
nsIntRegion ComputeChangeInternal(NotifySubDocInvalidationFunc aCallback,
|
||||
bool& aGeometryChanged) override
|
||||
{
|
||||
@ -439,6 +449,11 @@ struct ColorLayerProperties : public LayerPropertiesBase
|
||||
, mBounds(aLayer->GetBounds())
|
||||
{ }
|
||||
|
||||
protected:
|
||||
ColorLayerProperties(const ColorLayerProperties& a) = delete;
|
||||
ColorLayerProperties& operator=(const ColorLayerProperties& a) = delete;
|
||||
|
||||
public:
|
||||
virtual nsIntRegion ComputeChangeInternal(NotifySubDocInvalidationFunc aCallback,
|
||||
bool& aGeometryChanged)
|
||||
{
|
||||
|
@ -32,6 +32,13 @@ typedef void (*NotifySubDocInvalidationFunc)(ContainerLayer* aLayer,
|
||||
*/
|
||||
struct LayerProperties
|
||||
{
|
||||
protected:
|
||||
LayerProperties() {}
|
||||
|
||||
LayerProperties(const LayerProperties& a) = delete;
|
||||
LayerProperties& operator=(const LayerProperties& a) = delete;
|
||||
|
||||
public:
|
||||
virtual ~LayerProperties() {}
|
||||
|
||||
/**
|
||||
|
@ -190,7 +190,8 @@ nsPNGDecoder::CreateFrame(const FrameInfo& aFrameInfo)
|
||||
MOZ_ASSERT(!IsMetadataDecode());
|
||||
|
||||
// Check if we have transparency, and send notifications if needed.
|
||||
auto transparency = GetTransparencyType(aFrameInfo.mFormat, aFrameInfo.mFrameRect);
|
||||
auto transparency = GetTransparencyType(aFrameInfo.mFormat,
|
||||
aFrameInfo.mFrameRect);
|
||||
PostHasTransparencyIfNeeded(transparency);
|
||||
SurfaceFormat format = transparency == TransparencyType::eNone
|
||||
? SurfaceFormat::B8G8R8X8
|
||||
@ -686,13 +687,15 @@ nsPNGDecoder::info_callback(png_structp png_ptr, png_infop info_ptr)
|
||||
#endif
|
||||
|
||||
if (decoder->IsMetadataDecode()) {
|
||||
// If we are animated then the first frame rect is either: 1) the whole image
|
||||
// if the IDAT chunk is part of the animation 2) the frame rect of the first
|
||||
// fDAT chunk otherwise. If we are not animated then we want to make sure to
|
||||
// call PostHasTransparency in the metadata decode if we need to. So it's okay
|
||||
// to pass IntRect(0, 0, width, height) here for animated images; they will
|
||||
// call with the proper first frame rect in the full decode.
|
||||
auto transparency = decoder->GetTransparencyType(decoder->format, frameRect);
|
||||
// If we are animated then the first frame rect is either:
|
||||
// 1) the whole image if the IDAT chunk is part of the animation
|
||||
// 2) the frame rect of the first fDAT chunk otherwise.
|
||||
// If we are not animated then we want to make sure to call
|
||||
// PostHasTransparency in the metadata decode if we need to. So it's
|
||||
// okay to pass IntRect(0, 0, width, height) here for animated images;
|
||||
// they will call with the proper first frame rect in the full decode.
|
||||
auto transparency = decoder->GetTransparencyType(decoder->format,
|
||||
frameRect);
|
||||
decoder->PostHasTransparencyIfNeeded(transparency);
|
||||
|
||||
// We have the metadata we're looking for, so stop here, before we allocate
|
||||
@ -757,7 +760,8 @@ static NextPixel<uint32_t>
|
||||
PackRGBPixelAndAdvance(uint8_t*& aRawPixelInOut)
|
||||
{
|
||||
const uint32_t pixel =
|
||||
gfxPackedPixel(0xFF, aRawPixelInOut[0], aRawPixelInOut[1], aRawPixelInOut[2]);
|
||||
gfxPackedPixel(0xFF, aRawPixelInOut[0], aRawPixelInOut[1],
|
||||
aRawPixelInOut[2]);
|
||||
aRawPixelInOut += 3;
|
||||
return AsVariant(pixel);
|
||||
}
|
||||
@ -831,7 +835,8 @@ nsPNGDecoder::row_callback(png_structp png_ptr, png_bytep new_row,
|
||||
decoder->mPass++;
|
||||
}
|
||||
|
||||
const png_uint_32 height = static_cast<png_uint_32>(decoder->mFrameRect.height);
|
||||
const png_uint_32 height =
|
||||
static_cast<png_uint_32>(decoder->mFrameRect.height);
|
||||
|
||||
if (row_num >= height) {
|
||||
// Bail if we receive extra rows. This is especially important because if we
|
||||
@ -936,7 +941,8 @@ nsPNGDecoder::DoYield(png_structp aPNGStruct)
|
||||
// the data that was passed to png_process_data() have not been consumed yet.
|
||||
// We use this information to tell StreamingLexer where to place us in the
|
||||
// input stream when we come back from the yield.
|
||||
png_size_t pendingBytes = png_process_data_pause(aPNGStruct, /* save = */ false);
|
||||
png_size_t pendingBytes = png_process_data_pause(aPNGStruct,
|
||||
/* save = */ false);
|
||||
|
||||
MOZ_ASSERT(pendingBytes < mLastChunkLength);
|
||||
size_t consumedBytes = mLastChunkLength - min(pendingBytes, mLastChunkLength);
|
||||
@ -978,10 +984,12 @@ nsPNGDecoder::frame_info_callback(png_structp png_ptr, png_uint_32 frame_num)
|
||||
|
||||
#ifndef MOZ_EMBEDDED_LIBPNG
|
||||
// if using system library, check frame_width and height against 0
|
||||
if (frameRect.width == 0)
|
||||
if (frameRect.width == 0) {
|
||||
png_error(png_ptr, "Frame width must not be 0");
|
||||
if (frameRect.height == 0)
|
||||
}
|
||||
if (frameRect.height == 0) {
|
||||
png_error(png_ptr, "Frame height must not be 0");
|
||||
}
|
||||
#endif
|
||||
|
||||
const FrameInfo info { decoder->format, frameRect, isInterlaced };
|
||||
@ -1068,7 +1076,7 @@ nsPNGDecoder::IsValidICO() const
|
||||
|
||||
// If there are errors in the call to png_get_IHDR, the error_callback in
|
||||
// nsPNGDecoder.cpp is called. In this error callback we do a longjmp, so
|
||||
// we need to save the jump buffer here. Oterwise we'll end up without a
|
||||
// we need to save the jump buffer here. Otherwise we'll end up without a
|
||||
// proper callstack.
|
||||
if (setjmp(png_jmpbuf(mPNG))) {
|
||||
// We got here from a longjmp call indirectly from png_get_IHDR
|
||||
|
@ -65,6 +65,22 @@ using mozilla::PositiveInfinity;
|
||||
using JS::AsmJSOption;
|
||||
using JS::GenericNaN;
|
||||
|
||||
/*****************************************************************************/
|
||||
|
||||
// The asm.js valid heap lengths are precisely the WASM valid heap lengths for ARM
|
||||
// greater or equal to MinHeapLength
|
||||
static const size_t MinHeapLength = PageSize;
|
||||
|
||||
static uint32_t
|
||||
RoundUpToNextValidAsmJSHeapLength(uint32_t length)
|
||||
{
|
||||
if (length <= MinHeapLength)
|
||||
return MinHeapLength;
|
||||
|
||||
return wasm::RoundUpToNextValidARMImmediate(length);
|
||||
}
|
||||
|
||||
|
||||
/*****************************************************************************/
|
||||
// asm.js module object
|
||||
|
||||
@ -270,6 +286,7 @@ struct AsmJSMetadataCacheablePod
|
||||
uint32_t numFFIs;
|
||||
uint32_t srcLength;
|
||||
uint32_t srcLengthWithRightBrace;
|
||||
bool usesSimd;
|
||||
|
||||
AsmJSMetadataCacheablePod() { PodZero(this); }
|
||||
};
|
||||
@ -1590,6 +1607,7 @@ class MOZ_STACK_CLASS ModuleValidator
|
||||
ImportMap importMap_;
|
||||
ArrayViewVector arrayViews_;
|
||||
bool atomicsPresent_;
|
||||
bool simdPresent_;
|
||||
|
||||
// State used to build the AsmJSModule in finish():
|
||||
ModuleGenerator mg_;
|
||||
@ -1669,6 +1687,7 @@ class MOZ_STACK_CLASS ModuleValidator
|
||||
importMap_(cx),
|
||||
arrayViews_(cx),
|
||||
atomicsPresent_(false),
|
||||
simdPresent_(false),
|
||||
mg_(ImportVector()),
|
||||
errorString_(nullptr),
|
||||
errorOffset_(UINT32_MAX),
|
||||
@ -1990,6 +2009,8 @@ class MOZ_STACK_CLASS ModuleValidator
|
||||
return asmJSMetadata_->asmJSGlobals.append(Move(g));
|
||||
}
|
||||
bool addSimdCtor(PropertyName* var, SimdType type, PropertyName* field) {
|
||||
simdPresent_ = true;
|
||||
|
||||
UniqueChars fieldChars = StringToNewUTF8CharsZ(cx_, *field);
|
||||
if (!fieldChars)
|
||||
return false;
|
||||
@ -2006,6 +2027,8 @@ class MOZ_STACK_CLASS ModuleValidator
|
||||
return asmJSMetadata_->asmJSGlobals.append(Move(g));
|
||||
}
|
||||
bool addSimdOperation(PropertyName* var, SimdType type, SimdOperation op, PropertyName* field) {
|
||||
simdPresent_ = true;
|
||||
|
||||
UniqueChars fieldChars = StringToNewUTF8CharsZ(cx_, *field);
|
||||
if (!fieldChars)
|
||||
return false;
|
||||
@ -2284,6 +2307,8 @@ class MOZ_STACK_CLASS ModuleValidator
|
||||
if (!arrayViews_.empty())
|
||||
mg_.initMemoryUsage(atomicsPresent_ ? MemoryUsage::Shared : MemoryUsage::Unshared);
|
||||
|
||||
asmJSMetadata_->usesSimd = simdPresent_;
|
||||
|
||||
MOZ_ASSERT(asmJSMetadata_->asmJSFuncNames.empty());
|
||||
for (const Func* func : functions_) {
|
||||
CacheableChars funcName = StringToNewUTF8CharsZ(cx_, *func->name());
|
||||
@ -5564,12 +5589,10 @@ CheckSimdOperationCall(FunctionValidator& f, ParseNode* call, const ModuleValida
|
||||
case SimdOperation::Fn_load:
|
||||
case SimdOperation::Fn_load1:
|
||||
case SimdOperation::Fn_load2:
|
||||
case SimdOperation::Fn_load3:
|
||||
return CheckSimdLoad(f, call, opType, op, type);
|
||||
case SimdOperation::Fn_store:
|
||||
case SimdOperation::Fn_store1:
|
||||
case SimdOperation::Fn_store2:
|
||||
case SimdOperation::Fn_store3:
|
||||
return CheckSimdStore(f, call, opType, op, type);
|
||||
|
||||
case SimdOperation::Fn_select:
|
||||
@ -5583,6 +5606,10 @@ CheckSimdOperationCall(FunctionValidator& f, ParseNode* call, const ModuleValida
|
||||
case SimdOperation::Fn_anyTrue:
|
||||
return CheckSimdAnyTrue(f, call, opType, type);
|
||||
|
||||
case SimdOperation::Fn_load3:
|
||||
case SimdOperation::Fn_store3:
|
||||
return f.fail(call, "asm.js does not support 3-element SIMD loads or stores");
|
||||
|
||||
case SimdOperation::Constructor:
|
||||
MOZ_CRASH("constructors are handled in CheckSimdCtorCall");
|
||||
case SimdOperation::Fn_fromFloat64x2Bits:
|
||||
@ -7810,9 +7837,21 @@ CheckBuffer(JSContext* cx, const AsmJSMetadata& metadata, HandleValue bufferVal,
|
||||
}
|
||||
|
||||
if (buffer->is<ArrayBufferObject>()) {
|
||||
Rooted<ArrayBufferObject*> abheap(cx, &buffer->as<ArrayBufferObject>());
|
||||
if (!ArrayBufferObject::prepareForAsmJS(cx, abheap))
|
||||
// On 64-bit, bounds checks are statically removed so the huge guard
|
||||
// region is always necessary. On 32-bit, allocating a guard page
|
||||
// requires reallocating the incoming ArrayBuffer which could trigger
|
||||
// OOM. Thus, only ask for a guard page when SIMD is used since SIMD
|
||||
// allows unaligned memory access (see MaxMemoryAccessSize comment);
|
||||
#ifdef WASM_HUGE_MEMORY
|
||||
bool needGuard = true;
|
||||
#else
|
||||
bool needGuard = metadata.usesSimd;
|
||||
#endif
|
||||
Rooted<ArrayBufferObject*> arrayBuffer(cx, &buffer->as<ArrayBufferObject>());
|
||||
if (!ArrayBufferObject::prepareForAsmJS(cx, arrayBuffer, needGuard))
|
||||
return LinkFail(cx, "Unable to prepare ArrayBuffer for asm.js use");
|
||||
|
||||
MOZ_ASSERT(arrayBuffer->isAsmJSMalloced() || arrayBuffer->isWasmMapped());
|
||||
} else {
|
||||
if (!buffer->as<SharedArrayBufferObject>().isPreparedForAsmJS())
|
||||
return LinkFail(cx, "SharedArrayBuffer must be created with wasm test mode enabled");
|
||||
@ -8827,24 +8866,11 @@ js::AsmJSFunctionToString(JSContext* cx, HandleFunction fun)
|
||||
return out.finishString();
|
||||
}
|
||||
|
||||
// The asm.js valid heap lengths are precisely the WASM valid heap lengths for ARM
|
||||
// greater or equal to MinHeapLength
|
||||
static const size_t MinHeapLength = PageSize;
|
||||
|
||||
bool
|
||||
js::IsValidAsmJSHeapLength(uint32_t length)
|
||||
{
|
||||
if (length < MinHeapLength)
|
||||
return false;
|
||||
|
||||
return wasm::IsValidARMLengthImmediate(length);
|
||||
}
|
||||
|
||||
uint32_t
|
||||
js::RoundUpToNextValidAsmJSHeapLength(uint32_t length)
|
||||
{
|
||||
if (length <= MinHeapLength)
|
||||
return MinHeapLength;
|
||||
|
||||
return wasm::RoundUpToNextValidARMLengthImmediate(length);
|
||||
return wasm::IsValidARMImmediate(length);
|
||||
}
|
||||
|
@ -84,9 +84,6 @@ AsmJSModuleToString(JSContext* cx, HandleFunction fun, bool addParenToLambda);
|
||||
extern bool
|
||||
IsValidAsmJSHeapLength(uint32_t length);
|
||||
|
||||
extern uint32_t
|
||||
RoundUpToNextValidAsmJSHeapLength(uint32_t length);
|
||||
|
||||
} // namespace js
|
||||
|
||||
#endif // asmjs_asmjs_h
|
||||
|
@ -2904,124 +2904,27 @@ class BaseCompiler
|
||||
//
|
||||
// Heap access.
|
||||
|
||||
// TODO / CLEANUP - cloned from MIRGraph.cpp, should share.
|
||||
|
||||
bool needsBoundsCheckBranch(const MWasmMemoryAccess& access) const {
|
||||
// A heap access needs a bounds-check branch if we're not relying on signal
|
||||
// handlers to catch errors, and if it's not proven to be within bounds.
|
||||
// We use signal-handlers on x64, but on x86 there isn't enough address
|
||||
// space for a guard region. Also, on x64 the atomic loads and stores
|
||||
// can't (yet) use the signal handlers.
|
||||
|
||||
#ifdef WASM_HUGE_MEMORY
|
||||
return false;
|
||||
#else
|
||||
return access.needsBoundsCheck();
|
||||
#endif
|
||||
// Return true only for real asm.js (HEAP[i>>2]|0) accesses which have the
|
||||
// peculiar property of not throwing on out-of-bounds. Everything else
|
||||
// (wasm, SIMD.js, Atomics) throws on out-of-bounds.
|
||||
bool isAsmJSAccess(const MWasmMemoryAccess& access) {
|
||||
return isCompilingAsmJS() && !access.isSimdAccess() && !access.isAtomicAccess();
|
||||
}
|
||||
|
||||
bool throwOnOutOfBounds(const MWasmMemoryAccess& access) {
|
||||
return !isCompilingAsmJS();
|
||||
}
|
||||
|
||||
// For asm.js code only: If we have a non-zero offset, it's possible that
|
||||
// |ptr| itself is out of bounds, while adding the offset computes an
|
||||
// in-bounds address. To catch this case, we need a second branch, which we
|
||||
// emit out of line since it's unlikely to be needed in normal programs.
|
||||
// For this, we'll generate an OffsetBoundsCheck OOL stub.
|
||||
|
||||
bool needsOffsetBoundsCheck(const MWasmMemoryAccess& access) const {
|
||||
return isCompilingAsmJS() && access.offset() != 0;
|
||||
}
|
||||
|
||||
#if defined(JS_CODEGEN_X86) || defined(JS_CODEGEN_X64)
|
||||
class OffsetBoundsCheck : public OutOfLineCode
|
||||
{
|
||||
Label* maybeOutOfBounds;
|
||||
Register ptrReg;
|
||||
int32_t offset;
|
||||
|
||||
public:
|
||||
OffsetBoundsCheck(Label* maybeOutOfBounds, Register ptrReg, int32_t offset)
|
||||
: maybeOutOfBounds(maybeOutOfBounds),
|
||||
ptrReg(ptrReg),
|
||||
offset(offset)
|
||||
{}
|
||||
|
||||
void generate(MacroAssembler& masm) {
|
||||
// asm.js code only:
|
||||
//
|
||||
// The access is heap[ptr + offset]. The inline code checks that
|
||||
// ptr < heap.length - offset. We get here when that fails. We need to check
|
||||
// for the case where ptr + offset >= 0, in which case the access is still
|
||||
// in bounds.
|
||||
|
||||
MOZ_ASSERT(offset != 0,
|
||||
"An access without a constant offset doesn't need a separate "
|
||||
"OffsetBoundsCheck");
|
||||
masm.cmp32(ptrReg, Imm32(-uint32_t(offset)));
|
||||
if (maybeOutOfBounds)
|
||||
masm.j(Assembler::Below, maybeOutOfBounds);
|
||||
else
|
||||
masm.j(Assembler::Below, wasm::JumpTarget::OutOfBounds);
|
||||
|
||||
# ifdef JS_CODEGEN_X64
|
||||
// In order to get the offset to wrap properly, we must sign-extend the
|
||||
// pointer to 32-bits. We'll zero out the sign extension immediately
|
||||
// after the access to restore asm.js invariants.
|
||||
masm.movslq(ptrReg, ptrReg);
|
||||
# endif
|
||||
|
||||
masm.jmp(rejoin());
|
||||
}
|
||||
};
|
||||
|
||||
// CodeGeneratorX86Shared::emitAsmJSBoundsCheckBranch()
|
||||
|
||||
MOZ_MUST_USE
|
||||
bool emitBoundsCheckBranch(const MWasmMemoryAccess& access, RegI32 ptr, Label* maybeFail) {
|
||||
Label* pass = nullptr;
|
||||
|
||||
if (needsOffsetBoundsCheck(access)) {
|
||||
auto* oolCheck = new(alloc_) OffsetBoundsCheck(maybeFail, ptr.reg, access.offset());
|
||||
maybeFail = oolCheck->entry();
|
||||
pass = oolCheck->rejoin();
|
||||
if (!addOutOfLineCode(oolCheck))
|
||||
return false;
|
||||
}
|
||||
|
||||
// The bounds check is a comparison with an immediate value. The asm.js
|
||||
// module linking process will add the length of the heap to the immediate
|
||||
// field, so -access->endOffset() will turn into
|
||||
// (heapLength - access->endOffset()), allowing us to test whether the end
|
||||
// of the access is beyond the end of the heap.
|
||||
MOZ_ASSERT(access.endOffset() >= 1,
|
||||
"need to subtract 1 to use JAE, see also AssemblerX86Shared::UpdateBoundsCheck");
|
||||
|
||||
uint32_t cmpOffset = masm.cmp32WithPatch(ptr.reg, Imm32(1 - access.endOffset())).offset();
|
||||
if (maybeFail)
|
||||
masm.j(Assembler::AboveOrEqual, maybeFail);
|
||||
else
|
||||
masm.j(Assembler::AboveOrEqual, wasm::JumpTarget::OutOfBounds);
|
||||
|
||||
if (pass)
|
||||
masm.bind(pass);
|
||||
|
||||
masm.append(wasm::BoundsCheck(cmpOffset));
|
||||
return true;
|
||||
}
|
||||
|
||||
class OutOfLineLoadTypedArrayOOB : public OutOfLineCode
|
||||
#ifndef WASM_HUGE_MEMORY
|
||||
class AsmJSLoadOOB : public OutOfLineCode
|
||||
{
|
||||
Scalar::Type viewType;
|
||||
AnyRegister dest;
|
||||
|
||||
public:
|
||||
OutOfLineLoadTypedArrayOOB(Scalar::Type viewType, AnyRegister dest)
|
||||
AsmJSLoadOOB(Scalar::Type viewType, AnyRegister dest)
|
||||
: viewType(viewType),
|
||||
dest(dest)
|
||||
{}
|
||||
|
||||
void generate(MacroAssembler& masm) {
|
||||
#if defined(JS_CODEGEN_X86)
|
||||
switch (viewType) {
|
||||
case Scalar::Float32x4:
|
||||
case Scalar::Int32x4:
|
||||
@ -3048,242 +2951,116 @@ class BaseCompiler
|
||||
MOZ_CRASH("unexpected array type");
|
||||
}
|
||||
masm.jump(rejoin());
|
||||
#else
|
||||
Unused << viewType;
|
||||
Unused << dest;
|
||||
MOZ_CRASH("Compiler bug: Unexpected platform.");
|
||||
#endif
|
||||
}
|
||||
};
|
||||
#endif
|
||||
|
||||
MOZ_MUST_USE
|
||||
bool maybeEmitLoadBoundsCheck(const MWasmMemoryAccess& access, RegI32 ptr, AnyRegister dest,
|
||||
OutOfLineCode** ool)
|
||||
{
|
||||
*ool = nullptr;
|
||||
if (!needsBoundsCheckBranch(access))
|
||||
return true;
|
||||
|
||||
if (throwOnOutOfBounds(access))
|
||||
return emitBoundsCheckBranch(access, ptr, nullptr);
|
||||
|
||||
// TODO / MEMORY: We'll allocate *a lot* of these OOL objects,
|
||||
// thus risking OOM on a platform that is already
|
||||
// memory-constrained. We could opt to allocate this path
|
||||
// in-line instead.
|
||||
*ool = new (alloc_) OutOfLineLoadTypedArrayOOB(access.accessType(), dest);
|
||||
if (!addOutOfLineCode(*ool))
|
||||
return false;
|
||||
|
||||
return emitBoundsCheckBranch(access, ptr, (*ool)->entry());
|
||||
}
|
||||
|
||||
MOZ_MUST_USE
|
||||
bool maybeEmitStoreBoundsCheck(const MWasmMemoryAccess& access, RegI32 ptr, Label** rejoin) {
|
||||
*rejoin = nullptr;
|
||||
if (!needsBoundsCheckBranch(access))
|
||||
return true;
|
||||
|
||||
if (throwOnOutOfBounds(access))
|
||||
return emitBoundsCheckBranch(access, ptr, nullptr);
|
||||
|
||||
*rejoin = newLabel();
|
||||
if (!*rejoin)
|
||||
return false;
|
||||
|
||||
return emitBoundsCheckBranch(access, ptr, *rejoin);
|
||||
}
|
||||
|
||||
void cleanupAfterBoundsCheck(const MWasmMemoryAccess& access, RegI32 ptr) {
|
||||
# ifdef JS_CODEGEN_X64
|
||||
if (needsOffsetBoundsCheck(access)) {
|
||||
// Zero out the high 32 bits, in case the OffsetBoundsCheck code had to
|
||||
// sign-extend (movslq) the pointer value to get wraparound to work.
|
||||
masm.movl(ptr.reg, ptr.reg);
|
||||
private:
|
||||
void checkOffset(MWasmMemoryAccess* access, RegI32 ptr) {
|
||||
if (access->offset() >= OffsetGuardLimit) {
|
||||
masm.branchAdd32(Assembler::CarrySet,
|
||||
Imm32(access->offset()), ptr.reg,
|
||||
JumpTarget::OutOfBounds);
|
||||
access->clearOffset();
|
||||
}
|
||||
# endif
|
||||
}
|
||||
|
||||
public:
|
||||
MOZ_MUST_USE
|
||||
bool loadHeap(const MWasmMemoryAccess& access, RegI32 ptr, AnyReg dest) {
|
||||
if (access.offset() > INT32_MAX) {
|
||||
masm.jump(wasm::JumpTarget::OutOfBounds);
|
||||
return true;
|
||||
}
|
||||
bool load(MWasmMemoryAccess access, RegI32 ptr, AnyReg dest) {
|
||||
checkOffset(&access, ptr);
|
||||
|
||||
OutOfLineCode* ool = nullptr;
|
||||
if (!maybeEmitLoadBoundsCheck(access, ptr, dest.any(), &ool))
|
||||
return false;
|
||||
#ifndef WASM_HUGE_MEMORY
|
||||
if (isAsmJSAccess(access)) {
|
||||
ool = new (alloc_) AsmJSLoadOOB(access.accessType(), dest.any());
|
||||
if (!addOutOfLineCode(ool))
|
||||
return false;
|
||||
|
||||
masm.wasmBoundsCheck(Assembler::AboveOrEqual, ptr.reg, ool->entry());
|
||||
} else {
|
||||
masm.wasmBoundsCheck(Assembler::AboveOrEqual, ptr.reg, JumpTarget::OutOfBounds);
|
||||
}
|
||||
#endif
|
||||
|
||||
# if defined(JS_CODEGEN_X64)
|
||||
Operand srcAddr(HeapReg, ptr.reg, TimesOne, access.offset());
|
||||
|
||||
uint32_t before = masm.size();
|
||||
if (dest.tag == AnyReg::I64) {
|
||||
Register out = dest.i64().reg.reg;
|
||||
switch (access.accessType()) {
|
||||
case Scalar::Int8: masm.movsbq(srcAddr, out); break;
|
||||
case Scalar::Uint8: masm.movzbq(srcAddr, out); break;
|
||||
case Scalar::Int16: masm.movswq(srcAddr, out); break;
|
||||
case Scalar::Uint16: masm.movzwq(srcAddr, out); break;
|
||||
case Scalar::Int32: masm.movslq(srcAddr, out); break;
|
||||
// Int32 to int64 moves zero-extend by default.
|
||||
case Scalar::Uint32: masm.movl(srcAddr, out); break;
|
||||
case Scalar::Int64: masm.movq(srcAddr, out); break;
|
||||
default:
|
||||
MOZ_CRASH("Compiler bug: Unexpected array type in int64 load");
|
||||
}
|
||||
} else {
|
||||
switch (access.accessType()) {
|
||||
case Scalar::Int8: masm.movsbl(srcAddr, dest.i32().reg); break;
|
||||
case Scalar::Uint8: masm.movzbl(srcAddr, dest.i32().reg); break;
|
||||
case Scalar::Int16: masm.movswl(srcAddr, dest.i32().reg); break;
|
||||
case Scalar::Uint16: masm.movzwl(srcAddr, dest.i32().reg); break;
|
||||
case Scalar::Int32:
|
||||
case Scalar::Uint32: masm.movl(srcAddr, dest.i32().reg); break;
|
||||
case Scalar::Float32: masm.loadFloat32(srcAddr, dest.f32().reg); break;
|
||||
case Scalar::Float64: masm.loadDouble(srcAddr, dest.f64().reg); break;
|
||||
default:
|
||||
MOZ_CRASH("Compiler bug: Unexpected array type");
|
||||
}
|
||||
}
|
||||
if (dest.tag == AnyReg::I64)
|
||||
masm.wasmLoadI64(access.accessType(), srcAddr, dest.i64().reg);
|
||||
else
|
||||
masm.wasmLoad(access.accessType(), 0, srcAddr, dest.any());
|
||||
|
||||
if (isCompilingAsmJS())
|
||||
masm.append(MemoryAccess(before, MemoryAccess::CarryOn, MemoryAccess::WrapOffset));
|
||||
// TODO: call verifyHeapAccessDisassembly somehow
|
||||
if (isAsmJSAccess(access))
|
||||
masm.append(MemoryAccess(before));
|
||||
# elif defined(JS_CODEGEN_X86)
|
||||
Operand srcAddr(ptr.reg, access.offset());
|
||||
|
||||
if (dest.tag == AnyReg::I64)
|
||||
MOZ_CRASH("Not implemented: I64 support");
|
||||
bool byteRegConflict = access.byteSize() == 1 && !singleByteRegs_.has(dest.i32().reg);
|
||||
AnyRegister out = byteRegConflict ? AnyRegister(ScratchRegX86) : dest.any();
|
||||
|
||||
bool mustMove = access.byteSize() == 1 && !singleByteRegs_.has(dest.i32().reg);
|
||||
switch (access.accessType()) {
|
||||
case Scalar::Int8:
|
||||
case Scalar::Uint8: {
|
||||
Register rd = mustMove ? ScratchRegX86 : dest.i32().reg;
|
||||
if (access.accessType() == Scalar::Int8)
|
||||
masm.movsblWithPatch(srcAddr, rd);
|
||||
else
|
||||
masm.movzblWithPatch(srcAddr, rd);
|
||||
break;
|
||||
}
|
||||
case Scalar::Int16: masm.movswlWithPatch(srcAddr, dest.i32().reg); break;
|
||||
case Scalar::Uint16: masm.movzwlWithPatch(srcAddr, dest.i32().reg); break;
|
||||
case Scalar::Int32:
|
||||
case Scalar::Uint32: masm.movlWithPatch(srcAddr, dest.i32().reg); break;
|
||||
case Scalar::Float32: masm.vmovssWithPatch(srcAddr, dest.f32().reg); break;
|
||||
case Scalar::Float64: masm.vmovsdWithPatch(srcAddr, dest.f64().reg); break;
|
||||
default:
|
||||
MOZ_CRASH("Compiler bug: Unexpected array type");
|
||||
}
|
||||
uint32_t after = masm.size();
|
||||
if (mustMove)
|
||||
masm.wasmLoad(access.accessType(), 0, srcAddr, out);
|
||||
|
||||
if (byteRegConflict)
|
||||
masm.mov(ScratchRegX86, dest.i32().reg);
|
||||
|
||||
masm.append(wasm::MemoryAccess(after));
|
||||
// TODO: call verifyHeapAccessDisassembly somehow
|
||||
# else
|
||||
MOZ_CRASH("Compiler bug: Unexpected platform.");
|
||||
# endif
|
||||
|
||||
if (ool) {
|
||||
cleanupAfterBoundsCheck(access, ptr);
|
||||
if (ool)
|
||||
masm.bind(ool->rejoin());
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
MOZ_MUST_USE
|
||||
bool storeHeap(const MWasmMemoryAccess& access, RegI32 ptr, AnyReg src) {
|
||||
if (access.offset() > INT32_MAX) {
|
||||
masm.jump(wasm::JumpTarget::OutOfBounds);
|
||||
return true;
|
||||
}
|
||||
bool store(MWasmMemoryAccess access, RegI32 ptr, AnyReg src) {
|
||||
checkOffset(&access, ptr);
|
||||
|
||||
Label* rejoin = nullptr;
|
||||
if (!maybeEmitStoreBoundsCheck(access, ptr, &rejoin))
|
||||
return false;
|
||||
Label rejoin;
|
||||
#ifndef WASM_HUGE_MEMORY
|
||||
if (isAsmJSAccess(access))
|
||||
masm.wasmBoundsCheck(Assembler::AboveOrEqual, ptr.reg, &rejoin);
|
||||
else
|
||||
masm.wasmBoundsCheck(Assembler::AboveOrEqual, ptr.reg, JumpTarget::OutOfBounds);
|
||||
#endif
|
||||
|
||||
// Emit the store
|
||||
# if defined(JS_CODEGEN_X64)
|
||||
Operand dstAddr(HeapReg, ptr.reg, TimesOne, access.offset());
|
||||
|
||||
Register intReg = Register::Invalid();
|
||||
if (src.tag == AnyReg::I32)
|
||||
intReg = src.i32().reg;
|
||||
else if (src.tag == AnyReg::I64)
|
||||
intReg = src.i64().reg.reg;
|
||||
|
||||
uint32_t before = masm.size();
|
||||
switch (access.accessType()) {
|
||||
case Scalar::Int8:
|
||||
case Scalar::Uint8: masm.movb(intReg, dstAddr); break;
|
||||
case Scalar::Int16:
|
||||
case Scalar::Uint16: masm.movw(intReg, dstAddr); break;
|
||||
case Scalar::Int32:
|
||||
case Scalar::Uint32: masm.movl(intReg, dstAddr); break;
|
||||
case Scalar::Int64: masm.movq(intReg, dstAddr); break;
|
||||
case Scalar::Float32: masm.storeFloat32(src.f32().reg, dstAddr); break;
|
||||
case Scalar::Float64: masm.storeDouble(src.f64().reg, dstAddr); break;
|
||||
default:
|
||||
MOZ_CRASH("Compiler bug: Unexpected array type");
|
||||
}
|
||||
masm.wasmStore(access.accessType(), 0, src.any(), dstAddr);
|
||||
|
||||
if (isCompilingAsmJS())
|
||||
masm.append(MemoryAccess(before, MemoryAccess::CarryOn, MemoryAccess::WrapOffset));
|
||||
// TODO: call verifyHeapAccessDisassembly somehow
|
||||
masm.append(MemoryAccess(before));
|
||||
# elif defined(JS_CODEGEN_X86)
|
||||
Operand dstAddr(ptr.reg, access.offset());
|
||||
|
||||
if (src.tag == AnyReg::I64)
|
||||
MOZ_CRASH("Not implemented: I64 support");
|
||||
|
||||
bool didMove = false;
|
||||
AnyRegister value;
|
||||
if (access.byteSize() == 1 && !singleByteRegs_.has(src.i32().reg)) {
|
||||
didMove = true;
|
||||
masm.mov(src.i32().reg, ScratchRegX86);
|
||||
value = AnyRegister(ScratchRegX86);
|
||||
} else {
|
||||
value = src.any();
|
||||
}
|
||||
switch (access.accessType()) {
|
||||
case Scalar::Int8:
|
||||
case Scalar::Uint8: {
|
||||
Register rs = src.i32().reg;
|
||||
Register rt = didMove ? ScratchRegX86 : rs;
|
||||
masm.movbWithPatch(rt, dstAddr);
|
||||
break;
|
||||
}
|
||||
case Scalar::Int16:
|
||||
case Scalar::Uint16: masm.movwWithPatch(src.i32().reg, dstAddr); break;
|
||||
case Scalar::Int32:
|
||||
case Scalar::Uint32: masm.movlWithPatch(src.i32().reg, dstAddr); break;
|
||||
case Scalar::Float32: masm.vmovssWithPatch(src.f32().reg, dstAddr); break;
|
||||
case Scalar::Float64: masm.vmovsdWithPatch(src.f64().reg, dstAddr); break;
|
||||
default:
|
||||
MOZ_CRASH("Compiler bug: Unexpected array type");
|
||||
}
|
||||
uint32_t after = masm.size();
|
||||
|
||||
masm.append(wasm::MemoryAccess(after));
|
||||
// TODO: call verifyHeapAccessDisassembly somehow
|
||||
masm.wasmStore(access.accessType(), 0, value, dstAddr);
|
||||
# else
|
||||
MOZ_CRASH("Compiler bug: unexpected platform");
|
||||
# endif
|
||||
|
||||
if (rejoin) {
|
||||
cleanupAfterBoundsCheck(access, ptr);
|
||||
masm.bind(rejoin);
|
||||
}
|
||||
if (rejoin.used())
|
||||
masm.bind(&rejoin);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
#else
|
||||
|
||||
MOZ_MUST_USE
|
||||
bool loadHeap(const MWasmMemoryAccess& access, RegI32 ptr, AnyReg dest) {
|
||||
MOZ_CRASH("BaseCompiler platform hook: loadHeap");
|
||||
}
|
||||
|
||||
MOZ_MUST_USE
|
||||
bool storeHeap(const MWasmMemoryAccess& access, RegI32 ptr, AnyReg src) {
|
||||
MOZ_CRASH("BaseCompiler platform hook: storeHeap");
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
////////////////////////////////////////////////////////////
|
||||
|
||||
// Generally speaking, ABOVE this point there should be no value
|
||||
@ -5732,7 +5509,7 @@ BaseCompiler::emitLoad(ValType type, Scalar::Type viewType)
|
||||
switch (type) {
|
||||
case ValType::I32: {
|
||||
RegI32 rp = popI32();
|
||||
if (!loadHeap(access, rp, AnyReg(rp)))
|
||||
if (!load(access, rp, AnyReg(rp)))
|
||||
return false;
|
||||
pushI32(rp);
|
||||
break;
|
||||
@ -5740,7 +5517,7 @@ BaseCompiler::emitLoad(ValType type, Scalar::Type viewType)
|
||||
case ValType::I64: {
|
||||
RegI32 rp = popI32();
|
||||
RegI64 rv = needI64();
|
||||
if (!loadHeap(access, rp, AnyReg(rv)))
|
||||
if (!load(access, rp, AnyReg(rv)))
|
||||
return false;
|
||||
pushI64(rv);
|
||||
freeI32(rp);
|
||||
@ -5749,7 +5526,7 @@ BaseCompiler::emitLoad(ValType type, Scalar::Type viewType)
|
||||
case ValType::F32: {
|
||||
RegI32 rp = popI32();
|
||||
RegF32 rv = needF32();
|
||||
if (!loadHeap(access, rp, AnyReg(rv)))
|
||||
if (!load(access, rp, AnyReg(rv)))
|
||||
return false;
|
||||
pushF32(rv);
|
||||
freeI32(rp);
|
||||
@ -5758,14 +5535,14 @@ BaseCompiler::emitLoad(ValType type, Scalar::Type viewType)
|
||||
case ValType::F64: {
|
||||
RegI32 rp = popI32();
|
||||
RegF64 rv = needF64();
|
||||
if (!loadHeap(access, rp, AnyReg(rv)))
|
||||
if (!load(access, rp, AnyReg(rv)))
|
||||
return false;
|
||||
pushF64(rv);
|
||||
freeI32(rp);
|
||||
break;
|
||||
}
|
||||
default:
|
||||
MOZ_CRASH("loadHeap type");
|
||||
MOZ_CRASH("load type");
|
||||
break;
|
||||
}
|
||||
return true;
|
||||
@ -5791,7 +5568,7 @@ BaseCompiler::emitStore(ValType resultType, Scalar::Type viewType)
|
||||
case ValType::I32: {
|
||||
RegI32 rp, rv;
|
||||
pop2xI32(&rp, &rv);
|
||||
if (!storeHeap(access, rp, AnyReg(rv)))
|
||||
if (!store(access, rp, AnyReg(rv)))
|
||||
return false;
|
||||
freeI32(rp);
|
||||
pushI32(rv);
|
||||
@ -5800,7 +5577,7 @@ BaseCompiler::emitStore(ValType resultType, Scalar::Type viewType)
|
||||
case ValType::I64: {
|
||||
RegI64 rv = popI64();
|
||||
RegI32 rp = popI32();
|
||||
if (!storeHeap(access, rp, AnyReg(rv)))
|
||||
if (!store(access, rp, AnyReg(rv)))
|
||||
return false;
|
||||
freeI32(rp);
|
||||
pushI64(rv);
|
||||
@ -5809,7 +5586,7 @@ BaseCompiler::emitStore(ValType resultType, Scalar::Type viewType)
|
||||
case ValType::F32: {
|
||||
RegF32 rv = popF32();
|
||||
RegI32 rp = popI32();
|
||||
if (!storeHeap(access, rp, AnyReg(rv)))
|
||||
if (!store(access, rp, AnyReg(rv)))
|
||||
return false;
|
||||
freeI32(rp);
|
||||
pushF32(rv);
|
||||
@ -5818,14 +5595,14 @@ BaseCompiler::emitStore(ValType resultType, Scalar::Type viewType)
|
||||
case ValType::F64: {
|
||||
RegF64 rv = popF64();
|
||||
RegI32 rp = popI32();
|
||||
if (!storeHeap(access, rp, AnyReg(rv)))
|
||||
if (!store(access, rp, AnyReg(rv)))
|
||||
return false;
|
||||
freeI32(rp);
|
||||
pushF64(rv);
|
||||
break;
|
||||
}
|
||||
default:
|
||||
MOZ_CRASH("storeHeap type");
|
||||
MOZ_CRASH("store type");
|
||||
break;
|
||||
}
|
||||
return true;
|
||||
@ -6080,7 +5857,7 @@ BaseCompiler::emitStoreWithCoercion(ValType resultType, Scalar::Type viewType)
|
||||
RegF64 rw = needF64();
|
||||
masm.convertFloat32ToDouble(rv.reg, rw.reg);
|
||||
RegI32 rp = popI32();
|
||||
if (!storeHeap(access, rp, AnyReg(rw)))
|
||||
if (!store(access, rp, AnyReg(rw)))
|
||||
return false;
|
||||
pushF32(rv);
|
||||
freeI32(rp);
|
||||
@ -6091,7 +5868,7 @@ BaseCompiler::emitStoreWithCoercion(ValType resultType, Scalar::Type viewType)
|
||||
RegF32 rw = needF32();
|
||||
masm.convertDoubleToFloat32(rv.reg, rw.reg);
|
||||
RegI32 rp = popI32();
|
||||
if (!storeHeap(access, rp, AnyReg(rw)))
|
||||
if (!store(access, rp, AnyReg(rw)))
|
||||
return false;
|
||||
pushF64(rv);
|
||||
freeI32(rp);
|
||||
|
@ -106,25 +106,34 @@ StaticallyLink(CodeSegment& cs, const LinkData& linkData, ExclusiveContext* cx)
|
||||
}
|
||||
|
||||
static void
|
||||
SpecializeToMemory(CodeSegment& cs, const Metadata& metadata, HandleWasmMemoryObject memory)
|
||||
SpecializeToMemory(uint8_t* prevMemoryBase, CodeSegment& cs, const Metadata& metadata,
|
||||
ArrayBufferObjectMaybeShared& buffer)
|
||||
{
|
||||
if (!metadata.boundsChecks.empty()) {
|
||||
uint32_t length = memory->buffer().wasmBoundsCheckLimit();
|
||||
MOZ_RELEASE_ASSERT(length == LegalizeMapLength(length));
|
||||
MOZ_RELEASE_ASSERT(length >= memory->buffer().wasmActualByteLength());
|
||||
#ifdef WASM_HUGE_MEMORY
|
||||
MOZ_RELEASE_ASSERT(metadata.boundsChecks.empty());
|
||||
MOZ_RELEASE_ASSERT(metadata.isAsmJS() || metadata.memoryAccesses.empty());
|
||||
#else
|
||||
uint32_t limit = buffer.wasmBoundsCheckLimit();
|
||||
MOZ_RELEASE_ASSERT(IsValidBoundsCheckImmediate(limit));
|
||||
|
||||
for (const BoundsCheck& check : metadata.boundsChecks)
|
||||
Assembler::UpdateBoundsCheck(check.patchAt(cs.base()), length);
|
||||
}
|
||||
for (const BoundsCheck& check : metadata.boundsChecks)
|
||||
MacroAssembler::wasmPatchBoundsCheck(check.patchAt(cs.base()), limit);
|
||||
#endif
|
||||
|
||||
#if defined(JS_CODEGEN_X86)
|
||||
uint8_t* base = memory->buffer().dataPointerEither().unwrap();
|
||||
for (const MemoryAccess& access : metadata.memoryAccesses) {
|
||||
// Patch memory pointer immediate.
|
||||
void* addr = access.patchMemoryPtrImmAt(cs.base());
|
||||
uint32_t disp = reinterpret_cast<uint32_t>(X86Encoding::GetPointer(addr));
|
||||
MOZ_ASSERT(disp <= INT32_MAX);
|
||||
X86Encoding::SetPointer(addr, (void*)(base + disp));
|
||||
uint8_t* memoryBase = buffer.dataPointerEither().unwrap(/* code patching */);
|
||||
if (prevMemoryBase != memoryBase) {
|
||||
for (const MemoryAccess& access : metadata.memoryAccesses) {
|
||||
void* patchAt = access.patchMemoryPtrImmAt(cs.base());
|
||||
|
||||
uint8_t* prevImm = (uint8_t*)X86Encoding::GetPointer(patchAt);
|
||||
MOZ_ASSERT(prevImm >= prevMemoryBase);
|
||||
|
||||
uint32_t offset = prevImm - prevMemoryBase;
|
||||
MOZ_ASSERT(offset <= INT32_MAX);
|
||||
|
||||
X86Encoding::SetPointer(patchAt, memoryBase + offset);
|
||||
}
|
||||
}
|
||||
#endif
|
||||
}
|
||||
@ -232,7 +241,7 @@ CodeSegment::create(JSContext* cx,
|
||||
memcpy(codeBase, bytecode.begin(), bytecode.length());
|
||||
StaticallyLink(*cs, linkData, cx);
|
||||
if (memory)
|
||||
SpecializeToMemory(*cs, metadata, memory);
|
||||
SpecializeToMemory(nullptr, *cs, metadata, memory->buffer());
|
||||
}
|
||||
|
||||
if (!ExecutableAllocator::makeExecutable(codeBase, cs->codeLength())) {
|
||||
@ -258,6 +267,16 @@ CodeSegment::~CodeSegment()
|
||||
DeallocateExecutableMemory(bytes_, totalLength(), gc::SystemPageSize());
|
||||
}
|
||||
|
||||
void
|
||||
CodeSegment::onMovingGrow(uint8_t* prevMemoryBase, const Metadata& metadata, ArrayBufferObject& buffer)
|
||||
{
|
||||
AutoWritableJitCode awjc(base(), codeLength());
|
||||
AutoFlushICache afc("CodeSegment::onMovingGrow");
|
||||
AutoFlushICache::setRange(uintptr_t(base()), codeLength());
|
||||
|
||||
SpecializeToMemory(prevMemoryBase, *this, metadata, buffer);
|
||||
}
|
||||
|
||||
size_t
|
||||
FuncDefExport::serializedSize() const
|
||||
{
|
||||
|
@ -100,6 +100,11 @@ class CodeSegment
|
||||
bool containsCodePC(const void* pc) const {
|
||||
return pc >= base() && pc < (base() + codeLength_);
|
||||
}
|
||||
|
||||
// onMovingGrow must be called if the memory passed to 'create' performs a
|
||||
// moving grow operation.
|
||||
|
||||
void onMovingGrow(uint8_t* prevMemoryBase, const Metadata& metadata, ArrayBufferObject& buffer);
|
||||
};
|
||||
|
||||
// ShareableBytes is a ref-counted vector of bytes which are incrementally built
|
||||
@ -517,6 +522,7 @@ class Code
|
||||
const Metadata& metadata,
|
||||
const ShareableBytes* maybeBytecode);
|
||||
|
||||
CodeSegment& segment() { return *segment_; }
|
||||
const CodeSegment& segment() const { return *segment_; }
|
||||
const Metadata& metadata() const { return *metadata_; }
|
||||
|
||||
|
@ -292,42 +292,6 @@ Instance::currentMemory_i32(Instance* instance)
|
||||
return instance->currentMemory();
|
||||
}
|
||||
|
||||
uint32_t
|
||||
Instance::growMemory(uint32_t delta)
|
||||
{
|
||||
MOZ_RELEASE_ASSERT(memory_);
|
||||
|
||||
// Using uint64_t to avoid worrying about overflows in safety comp.
|
||||
uint64_t curNumPages = currentMemory();
|
||||
uint64_t newNumPages = curNumPages + (uint64_t) delta;
|
||||
|
||||
if (metadata().maxMemoryLength) {
|
||||
ArrayBufferObject &buf = memory_->buffer().as<ArrayBufferObject>();
|
||||
// Guaranteed by instantiateMemory
|
||||
MOZ_RELEASE_ASSERT(buf.wasmMaxSize() && buf.wasmMaxSize() <= metadata().maxMemoryLength);
|
||||
|
||||
if (newNumPages * wasm::PageSize > buf.wasmMaxSize().value())
|
||||
return (uint32_t) -1;
|
||||
|
||||
// Try to grow the memory
|
||||
if (!buf.growForWasm(delta))
|
||||
return (uint32_t) -1;
|
||||
} else {
|
||||
return -1; // TODO: implement grow_memory w/o max when we add realloc
|
||||
}
|
||||
|
||||
return curNumPages;
|
||||
}
|
||||
|
||||
uint32_t
|
||||
Instance::currentMemory()
|
||||
{
|
||||
MOZ_RELEASE_ASSERT(memory_);
|
||||
uint32_t curMemByteLen = memory_->buffer().wasmActualByteLength();
|
||||
MOZ_ASSERT(curMemByteLen % wasm::PageSize == 0);
|
||||
return curMemByteLen / wasm::PageSize;
|
||||
}
|
||||
|
||||
Instance::Instance(JSContext* cx,
|
||||
Handle<WasmInstanceObject*> object,
|
||||
UniqueCode code,
|
||||
@ -411,6 +375,9 @@ Instance::Instance(JSContext* cx,
|
||||
bool
|
||||
Instance::init(JSContext* cx)
|
||||
{
|
||||
if (memory_ && memory_->movingGrowable() && !memory_->addMovingGrowObserver(cx, object_))
|
||||
return false;
|
||||
|
||||
if (!metadata().sigIds.empty()) {
|
||||
ExclusiveData<SigIdSet>::Guard lockedSigIdSet = sigIdSet.lock();
|
||||
|
||||
@ -590,6 +557,9 @@ Instance::object() const
|
||||
bool
|
||||
Instance::callExport(JSContext* cx, uint32_t funcDefIndex, CallArgs args)
|
||||
{
|
||||
// If there has been a moving grow, this Instance should have been notified.
|
||||
MOZ_RELEASE_ASSERT(!memory_ || tlsData_.memoryBase == memory_->buffer().dataPointerEither());
|
||||
|
||||
if (!cx->compartment()->wasm.ensureProfilingState(cx))
|
||||
return false;
|
||||
|
||||
@ -806,6 +776,33 @@ Instance::callExport(JSContext* cx, uint32_t funcDefIndex, CallArgs args)
|
||||
return true;
|
||||
}
|
||||
|
||||
uint32_t
|
||||
Instance::currentMemory()
|
||||
{
|
||||
MOZ_RELEASE_ASSERT(memory_);
|
||||
uint32_t byteLength = memory_->buffer().wasmActualByteLength();
|
||||
MOZ_ASSERT(byteLength % wasm::PageSize == 0);
|
||||
return byteLength / wasm::PageSize;
|
||||
}
|
||||
|
||||
uint32_t
|
||||
Instance::growMemory(uint32_t delta)
|
||||
{
|
||||
MOZ_ASSERT(!isAsmJS());
|
||||
uint32_t ret = memory_->grow(delta);
|
||||
MOZ_RELEASE_ASSERT(tlsData_.memoryBase == memory_->buffer().dataPointerEither());
|
||||
return ret;
|
||||
}
|
||||
|
||||
void
|
||||
Instance::onMovingGrow(uint8_t* prevMemoryBase)
|
||||
{
|
||||
MOZ_ASSERT(!isAsmJS());
|
||||
ArrayBufferObject& buffer = memory_->buffer().as<ArrayBufferObject>();
|
||||
tlsData_.memoryBase = buffer.dataPointer();
|
||||
code_->segment().onMovingGrow(prevMemoryBase, metadata(), buffer);
|
||||
}
|
||||
|
||||
void
|
||||
Instance::deoptimizeImportExit(uint32_t funcImportIndex)
|
||||
{
|
||||
|
@ -58,16 +58,17 @@ class Instance
|
||||
static int32_t callImport_f64(Instance*, int32_t, int32_t, uint64_t*);
|
||||
static uint32_t growMemory_i32(Instance* instance, uint32_t delta);
|
||||
static uint32_t currentMemory_i32(Instance* instance);
|
||||
|
||||
bool callImport(JSContext* cx, uint32_t funcImportIndex, unsigned argc, const uint64_t* argv,
|
||||
MutableHandleValue rval);
|
||||
uint32_t growMemory(uint32_t delta);
|
||||
uint32_t currentMemory();
|
||||
|
||||
// Only WasmInstanceObject can call the private trace function.
|
||||
friend class js::WasmInstanceObject;
|
||||
void tracePrivate(JSTracer* trc);
|
||||
|
||||
// Only WasmMemoryObject can call the private onMovingGrow notification.
|
||||
friend class js::WasmMemoryObject;
|
||||
void onMovingGrow(uint8_t* prevMemoryBase);
|
||||
|
||||
public:
|
||||
Instance(JSContext* cx,
|
||||
HandleWasmInstanceObject object,
|
||||
@ -106,6 +107,12 @@ class Instance
|
||||
|
||||
MOZ_MUST_USE bool callExport(JSContext* cx, uint32_t funcDefIndex, CallArgs args);
|
||||
|
||||
// These methods implement their respective wasm operator but may also be
|
||||
// called via the Memory JS API.
|
||||
|
||||
uint32_t currentMemory();
|
||||
uint32_t growMemory(uint32_t delta);
|
||||
|
||||
// Initially, calls to imports in wasm code call out through the generic
|
||||
// callImport method. If the imported callee gets JIT compiled and the types
|
||||
// match up, callImport will patch the code to instead call through a thunk
|
||||
@ -113,6 +120,10 @@ class Instance
|
||||
// be notified so it can go back to the generic callImport.
|
||||
|
||||
void deoptimizeImportExit(uint32_t funcImportIndex);
|
||||
|
||||
// Called by simulators to check whether accessing 'numBytes' starting at
|
||||
// 'addr' would trigger a fault and be safely handled by signal handlers.
|
||||
|
||||
bool memoryAccessWouldFault(uint8_t* addr, unsigned numBytes);
|
||||
|
||||
// See Code::ensureProfilingState comment.
|
||||
|
@ -702,117 +702,98 @@ class FunctionCompiler
|
||||
}
|
||||
|
||||
private:
|
||||
// False means we're sure to be out-of-bounds after this bounds check.
|
||||
bool maybeAddBoundsCheck(MDefinition* base, const MWasmMemoryAccess& access)
|
||||
void checkOffsetAndBounds(MWasmMemoryAccess* access, MDefinition** base)
|
||||
{
|
||||
if (access.offset() > uint32_t(INT32_MAX)) {
|
||||
curBlock_->end(MWasmTrap::New(alloc(), Trap::OutOfBounds));
|
||||
curBlock_ = nullptr;
|
||||
return false;
|
||||
// If the offset is bigger than the guard region, a separate instruction
|
||||
// is necessary to add the offset to the base and check for overflow.
|
||||
if (access->offset() >= OffsetGuardLimit || !JitOptions.wasmFoldOffsets) {
|
||||
auto* ins = MWasmAddOffset::New(alloc(), *base, access->offset());
|
||||
curBlock_->add(ins);
|
||||
|
||||
*base = ins;
|
||||
access->clearOffset();
|
||||
}
|
||||
|
||||
#ifndef WASM_HUGE_MEMORY
|
||||
curBlock_->add(MWasmBoundsCheck::New(alloc(), base, access));
|
||||
curBlock_->add(MWasmBoundsCheck::New(alloc(), *base));
|
||||
#endif
|
||||
return true;
|
||||
}
|
||||
|
||||
MDefinition* loadHeapPrivate(MDefinition* base, const MWasmMemoryAccess& access,
|
||||
bool isInt64 = false)
|
||||
// Return true only for real asm.js (HEAP[i>>2]|0) accesses which have the
|
||||
// peculiar property of not throwing on out-of-bounds. Everything else
|
||||
// (wasm, SIMD.js, Atomics) throws on out-of-bounds.
|
||||
bool isAsmJSAccess(const MWasmMemoryAccess& access) {
|
||||
return mg().isAsmJS() && !access.isSimdAccess() && !access.isAtomicAccess();
|
||||
}
|
||||
|
||||
public:
|
||||
MDefinition* load(MDefinition* base, MWasmMemoryAccess access, ValType result)
|
||||
{
|
||||
if (inDeadCode())
|
||||
return nullptr;
|
||||
|
||||
MInstruction* load = nullptr;
|
||||
if (mg().isAsmJS()) {
|
||||
load = MAsmJSLoadHeap::New(alloc(), base, access);
|
||||
if (isAsmJSAccess(access)) {
|
||||
MOZ_ASSERT(access.offset() == 0);
|
||||
load = MAsmJSLoadHeap::New(alloc(), base, access.accessType());
|
||||
} else {
|
||||
if (!maybeAddBoundsCheck(base, access))
|
||||
return nullptr;
|
||||
load = MWasmLoad::New(alloc(), base, access, isInt64);
|
||||
checkOffsetAndBounds(&access, &base);
|
||||
load = MWasmLoad::New(alloc(), base, access, ToMIRType(result));
|
||||
}
|
||||
|
||||
curBlock_->add(load);
|
||||
return load;
|
||||
}
|
||||
|
||||
void storeHeapPrivate(MDefinition* base, const MWasmMemoryAccess& access, MDefinition* v)
|
||||
void store(MDefinition* base, MWasmMemoryAccess access, MDefinition* v)
|
||||
{
|
||||
if (inDeadCode())
|
||||
return;
|
||||
|
||||
MInstruction* store = nullptr;
|
||||
if (mg().isAsmJS()) {
|
||||
store = MAsmJSStoreHeap::New(alloc(), base, access, v);
|
||||
if (isAsmJSAccess(access)) {
|
||||
MOZ_ASSERT(access.offset() == 0);
|
||||
store = MAsmJSStoreHeap::New(alloc(), base, access.accessType(), v);
|
||||
} else {
|
||||
if (!maybeAddBoundsCheck(base, access))
|
||||
return;
|
||||
checkOffsetAndBounds(&access, &base);
|
||||
store = MWasmStore::New(alloc(), base, access, v);
|
||||
}
|
||||
|
||||
curBlock_->add(store);
|
||||
}
|
||||
|
||||
public:
|
||||
MDefinition* loadHeap(MDefinition* base, const MWasmMemoryAccess& access, bool isInt64)
|
||||
{
|
||||
MOZ_ASSERT(!Scalar::isSimdType(access.accessType()), "SIMD loads should use loadSimdHeap");
|
||||
return loadHeapPrivate(base, access, isInt64);
|
||||
}
|
||||
MDefinition* loadSimdHeap(MDefinition* base, const MWasmMemoryAccess& access)
|
||||
{
|
||||
MOZ_ASSERT(Scalar::isSimdType(access.accessType()), "non-SIMD loads should use loadHeap");
|
||||
return loadHeapPrivate(base, access);
|
||||
}
|
||||
MDefinition* loadAtomicHeap(MDefinition* base, const MWasmMemoryAccess& access)
|
||||
{
|
||||
return loadHeapPrivate(base, access);
|
||||
}
|
||||
|
||||
void storeHeap(MDefinition* base, const MWasmMemoryAccess& access, MDefinition* v)
|
||||
{
|
||||
MOZ_ASSERT(!Scalar::isSimdType(access.accessType()), "SIMD store should use storeSimdHeap");
|
||||
storeHeapPrivate(base, access, v);
|
||||
}
|
||||
void storeSimdHeap(MDefinition* base, const MWasmMemoryAccess& access, MDefinition* v)
|
||||
{
|
||||
MOZ_ASSERT(Scalar::isSimdType(access.accessType()), "non-SIMD stores should use storeHeap");
|
||||
storeHeapPrivate(base, access, v);
|
||||
}
|
||||
void storeAtomicHeap(MDefinition* base, const MWasmMemoryAccess& access, MDefinition* v)
|
||||
{
|
||||
storeHeapPrivate(base, access, v);
|
||||
}
|
||||
|
||||
MDefinition* atomicCompareExchangeHeap(MDefinition* base, const MWasmMemoryAccess& access,
|
||||
MDefinition* atomicCompareExchangeHeap(MDefinition* base, MWasmMemoryAccess access,
|
||||
MDefinition* oldv, MDefinition* newv)
|
||||
{
|
||||
if (inDeadCode())
|
||||
return nullptr;
|
||||
|
||||
checkOffsetAndBounds(&access, &base);
|
||||
auto* cas = MAsmJSCompareExchangeHeap::New(alloc(), base, access, oldv, newv, tlsPointer_);
|
||||
curBlock_->add(cas);
|
||||
return cas;
|
||||
}
|
||||
|
||||
MDefinition* atomicExchangeHeap(MDefinition* base, const MWasmMemoryAccess& access,
|
||||
MDefinition* atomicExchangeHeap(MDefinition* base, MWasmMemoryAccess access,
|
||||
MDefinition* value)
|
||||
{
|
||||
if (inDeadCode())
|
||||
return nullptr;
|
||||
|
||||
checkOffsetAndBounds(&access, &base);
|
||||
auto* cas = MAsmJSAtomicExchangeHeap::New(alloc(), base, access, value, tlsPointer_);
|
||||
curBlock_->add(cas);
|
||||
return cas;
|
||||
}
|
||||
|
||||
MDefinition* atomicBinopHeap(js::jit::AtomicOp op,
|
||||
MDefinition* base, const MWasmMemoryAccess& access,
|
||||
MDefinition* base, MWasmMemoryAccess access,
|
||||
MDefinition* v)
|
||||
{
|
||||
if (inDeadCode())
|
||||
return nullptr;
|
||||
|
||||
checkOffsetAndBounds(&access, &base);
|
||||
auto* binop = MAsmJSAtomicBinopHeap::New(alloc(), op, base, access, v, tlsPointer_);
|
||||
curBlock_->add(binop);
|
||||
return binop;
|
||||
@ -1113,7 +1094,8 @@ class FunctionCompiler
|
||||
CallSiteDesc desc(call.lineOrBytecode_, CallSiteDesc::Register);
|
||||
auto* ins = MWasmCall::NewBuiltinInstanceMethodCall(alloc(), desc, builtin,
|
||||
call.instanceArg_, call.regArgs_,
|
||||
ToMIRType(ret), call.spIncrement_);
|
||||
ToMIRType(ret), call.spIncrement_,
|
||||
call.tlsStackOffset_);
|
||||
if (!ins)
|
||||
return false;
|
||||
|
||||
@ -2351,7 +2333,7 @@ EmitLoad(FunctionCompiler& f, ValType type, Scalar::Type viewType)
|
||||
return false;
|
||||
|
||||
MWasmMemoryAccess access(viewType, addr.align, addr.offset);
|
||||
f.iter().setResult(f.loadHeap(addr.base, access, type == ValType::I64));
|
||||
f.iter().setResult(f.load(addr.base, access, type));
|
||||
return true;
|
||||
}
|
||||
|
||||
@ -2364,7 +2346,7 @@ EmitStore(FunctionCompiler& f, ValType resultType, Scalar::Type viewType)
|
||||
return false;
|
||||
|
||||
MWasmMemoryAccess access(viewType, addr.align, addr.offset);
|
||||
f.storeHeap(addr.base, access, value);
|
||||
f.store(addr.base, access, value);
|
||||
return true;
|
||||
}
|
||||
|
||||
@ -2384,7 +2366,7 @@ EmitStoreWithCoercion(FunctionCompiler& f, ValType resultType, Scalar::Type view
|
||||
MOZ_CRASH("unexpected coerced store");
|
||||
|
||||
MWasmMemoryAccess access(viewType, addr.align, addr.offset);
|
||||
f.storeHeap(addr.base, access, value);
|
||||
f.store(addr.base, access, value);
|
||||
return true;
|
||||
}
|
||||
|
||||
@ -2458,7 +2440,7 @@ EmitAtomicsLoad(FunctionCompiler& f)
|
||||
|
||||
MWasmMemoryAccess access(viewType, addr.align, addr.offset, 0,
|
||||
MembarBeforeLoad, MembarAfterLoad);
|
||||
f.iter().setResult(f.loadAtomicHeap(addr.base, access));
|
||||
f.iter().setResult(f.load(addr.base, access, ValType::I32));
|
||||
return true;
|
||||
}
|
||||
|
||||
@ -2473,7 +2455,7 @@ EmitAtomicsStore(FunctionCompiler& f)
|
||||
|
||||
MWasmMemoryAccess access(viewType, addr.align, addr.offset, 0,
|
||||
MembarBeforeStore, MembarAfterStore);
|
||||
f.storeAtomicHeap(addr.base, access, value);
|
||||
f.store(addr.base, access, value);
|
||||
f.iter().setResult(value);
|
||||
return true;
|
||||
}
|
||||
@ -2740,7 +2722,7 @@ EmitSimdLoad(FunctionCompiler& f, ValType resultType, unsigned numElems)
|
||||
return false;
|
||||
|
||||
MWasmMemoryAccess access(viewType, addr.align, addr.offset, numElems);
|
||||
f.iter().setResult(f.loadSimdHeap(addr.base, access));
|
||||
f.iter().setResult(f.load(addr.base, access, resultType));
|
||||
return true;
|
||||
}
|
||||
|
||||
@ -2759,7 +2741,7 @@ EmitSimdStore(FunctionCompiler& f, ValType resultType, unsigned numElems)
|
||||
return false;
|
||||
|
||||
MWasmMemoryAccess access(viewType, addr.align, addr.offset, numElems);
|
||||
f.storeSimdHeap(addr.base, access, value);
|
||||
f.store(addr.base, access, value);
|
||||
return true;
|
||||
}
|
||||
|
||||
@ -2939,16 +2921,12 @@ EmitSimdOp(FunctionCompiler& f, ValType type, SimdOperation op, SimdSign sign)
|
||||
return EmitSimdLoad(f, type, 1);
|
||||
case SimdOperation::Fn_load2:
|
||||
return EmitSimdLoad(f, type, 2);
|
||||
case SimdOperation::Fn_load3:
|
||||
return EmitSimdLoad(f, type, 3);
|
||||
case SimdOperation::Fn_store:
|
||||
return EmitSimdStore(f, type, 0);
|
||||
case SimdOperation::Fn_store1:
|
||||
return EmitSimdStore(f, type, 1);
|
||||
case SimdOperation::Fn_store2:
|
||||
return EmitSimdStore(f, type, 2);
|
||||
case SimdOperation::Fn_store3:
|
||||
return EmitSimdStore(f, type, 3);
|
||||
case SimdOperation::Fn_allTrue:
|
||||
return EmitSimdAllTrue(f, type);
|
||||
case SimdOperation::Fn_anyTrue:
|
||||
@ -3002,6 +2980,8 @@ EmitSimdOp(FunctionCompiler& f, ValType type, SimdOperation op, SimdSign sign)
|
||||
return EmitSimdBitcast(f, ValType::I32x4, type);
|
||||
case SimdOperation::Fn_fromFloat32x4Bits:
|
||||
return EmitSimdBitcast(f, ValType::F32x4, type);
|
||||
case SimdOperation::Fn_load3:
|
||||
case SimdOperation::Fn_store3:
|
||||
case SimdOperation::Fn_fromFloat64x2Bits:
|
||||
MOZ_CRASH("NYI");
|
||||
}
|
||||
@ -3027,7 +3007,10 @@ EmitGrowMemory(FunctionCompiler& f, uint32_t callOffset)
|
||||
if (!f.passArg(delta, ValType::I32, &args))
|
||||
return false;
|
||||
|
||||
f.finishCall(&args, PassTls::False, InterModule::False);
|
||||
// As a short-cut, pretend this is an inter-module call so that any pinned
|
||||
// heap pointer will be reloaded after the call. This hack will go away once
|
||||
// we can stop pinning registers.
|
||||
f.finishCall(&args, PassTls::True, InterModule::True);
|
||||
|
||||
MDefinition* ret;
|
||||
if (!f.builtinInstanceMethodCall(SymbolicAddress::GrowMemory, args, ValType::I32, &ret))
|
||||
|
@ -18,6 +18,7 @@
|
||||
|
||||
#include "asmjs/WasmJS.h"
|
||||
|
||||
#include "mozilla/CheckedInt.h"
|
||||
#include "mozilla/Maybe.h"
|
||||
|
||||
#include "asmjs/WasmCompile.h"
|
||||
@ -35,6 +36,7 @@
|
||||
using namespace js;
|
||||
using namespace js::jit;
|
||||
using namespace js::wasm;
|
||||
using mozilla::CheckedInt;
|
||||
using mozilla::Nothing;
|
||||
|
||||
bool
|
||||
@ -710,13 +712,35 @@ wasm::ExportedFunctionToDefinitionIndex(JSFunction* fun)
|
||||
// ============================================================================
|
||||
// WebAssembly.Memory class and methods
|
||||
|
||||
const ClassOps WasmMemoryObject::classOps_ =
|
||||
{
|
||||
nullptr, /* addProperty */
|
||||
nullptr, /* delProperty */
|
||||
nullptr, /* getProperty */
|
||||
nullptr, /* setProperty */
|
||||
nullptr, /* enumerate */
|
||||
nullptr, /* resolve */
|
||||
nullptr, /* mayResolve */
|
||||
WasmMemoryObject::finalize
|
||||
};
|
||||
|
||||
const Class WasmMemoryObject::class_ =
|
||||
{
|
||||
"WebAssembly.Memory",
|
||||
JSCLASS_DELAY_METADATA_BUILDER |
|
||||
JSCLASS_HAS_RESERVED_SLOTS(WasmMemoryObject::RESERVED_SLOTS)
|
||||
JSCLASS_HAS_RESERVED_SLOTS(WasmMemoryObject::RESERVED_SLOTS) |
|
||||
JSCLASS_FOREGROUND_FINALIZE,
|
||||
&WasmMemoryObject::classOps_
|
||||
};
|
||||
|
||||
/* static */ void
|
||||
WasmMemoryObject::finalize(FreeOp* fop, JSObject* obj)
|
||||
{
|
||||
WasmMemoryObject& memory = obj->as<WasmMemoryObject>();
|
||||
if (memory.hasObservers())
|
||||
fop->delete_(&memory.observers());
|
||||
}
|
||||
|
||||
/* static */ WasmMemoryObject*
|
||||
WasmMemoryObject::create(ExclusiveContext* cx, HandleArrayBufferObjectMaybeShared buffer,
|
||||
HandleObject proto)
|
||||
@ -727,6 +751,7 @@ WasmMemoryObject::create(ExclusiveContext* cx, HandleArrayBufferObjectMaybeShare
|
||||
return nullptr;
|
||||
|
||||
obj->initReservedSlot(BUFFER_SLOT, ObjectValue(*buffer));
|
||||
MOZ_ASSERT(!obj->hasObservers());
|
||||
return obj;
|
||||
}
|
||||
|
||||
@ -840,6 +865,104 @@ WasmMemoryObject::buffer() const
|
||||
return getReservedSlot(BUFFER_SLOT).toObject().as<ArrayBufferObjectMaybeShared>();
|
||||
}
|
||||
|
||||
bool
|
||||
WasmMemoryObject::hasObservers() const
|
||||
{
|
||||
return !getReservedSlot(OBSERVERS_SLOT).isUndefined();
|
||||
}
|
||||
|
||||
WasmMemoryObject::WeakInstanceSet&
|
||||
WasmMemoryObject::observers() const
|
||||
{
|
||||
MOZ_ASSERT(hasObservers());
|
||||
return *reinterpret_cast<WeakInstanceSet*>(getReservedSlot(OBSERVERS_SLOT).toPrivate());
|
||||
}
|
||||
|
||||
WasmMemoryObject::WeakInstanceSet*
|
||||
WasmMemoryObject::getOrCreateObservers(JSContext* cx)
|
||||
{
|
||||
if (!hasObservers()) {
|
||||
auto observers = MakeUnique<WeakInstanceSet>(cx->zone(), InstanceSet());
|
||||
if (!observers || !observers->init()) {
|
||||
ReportOutOfMemory(cx);
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
setReservedSlot(OBSERVERS_SLOT, PrivateValue(observers.release()));
|
||||
}
|
||||
|
||||
return &observers();
|
||||
}
|
||||
|
||||
bool
|
||||
WasmMemoryObject::movingGrowable() const
|
||||
{
|
||||
#ifdef WASM_HUGE_MEMORY
|
||||
return false;
|
||||
#else
|
||||
return !buffer().wasmMaxSize();
|
||||
#endif
|
||||
}
|
||||
|
||||
bool
|
||||
WasmMemoryObject::addMovingGrowObserver(JSContext* cx, WasmInstanceObject* instance)
|
||||
{
|
||||
MOZ_ASSERT(movingGrowable());
|
||||
|
||||
WeakInstanceSet* observers = getOrCreateObservers(cx);
|
||||
if (!observers)
|
||||
return false;
|
||||
|
||||
if (!observers->putNew(instance)) {
|
||||
ReportOutOfMemory(cx);
|
||||
return false;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
uint32_t
|
||||
WasmMemoryObject::grow(uint32_t delta)
|
||||
{
|
||||
ArrayBufferObject &buf = buffer().as<ArrayBufferObject>();
|
||||
|
||||
MOZ_ASSERT(buf.wasmActualByteLength() % PageSize == 0);
|
||||
uint32_t oldNumPages = buf.wasmActualByteLength() / PageSize;
|
||||
|
||||
CheckedInt<uint32_t> newSize = oldNumPages;
|
||||
newSize += delta;
|
||||
newSize *= PageSize;
|
||||
if (!newSize.isValid())
|
||||
return -1;
|
||||
|
||||
if (Maybe<uint32_t> maxSize = buf.wasmMaxSize()) {
|
||||
if (newSize.value() > maxSize.value())
|
||||
return -1;
|
||||
|
||||
if (!buf.wasmGrowToSizeInPlace(newSize.value()))
|
||||
return -1;
|
||||
} else {
|
||||
#ifdef WASM_HUGE_MEMORY
|
||||
if (!buf.wasmGrowToSizeInPlace(newSize.value()))
|
||||
return -1;
|
||||
#else
|
||||
MOZ_ASSERT(movingGrowable());
|
||||
|
||||
uint8_t* prevMemoryBase = buf.dataPointer();
|
||||
|
||||
if (!buf.wasmMovingGrowToSize(newSize.value()))
|
||||
return -1;
|
||||
|
||||
if (hasObservers()) {
|
||||
for (InstanceSet::Range r = observers().all(); !r.empty(); r.popFront())
|
||||
r.front()->instance().onMovingGrow(prevMemoryBase);
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
return oldNumPages;
|
||||
}
|
||||
|
||||
// ============================================================================
|
||||
// WebAssembly.Table class and methods
|
||||
|
||||
|
@ -160,9 +160,20 @@ class WasmInstanceObject : public NativeObject
|
||||
class WasmMemoryObject : public NativeObject
|
||||
{
|
||||
static const unsigned BUFFER_SLOT = 0;
|
||||
static const unsigned OBSERVERS_SLOT = 1;
|
||||
static const ClassOps classOps_;
|
||||
static void finalize(FreeOp* fop, JSObject* obj);
|
||||
|
||||
using InstanceSet = GCHashSet<ReadBarrieredWasmInstanceObject,
|
||||
MovableCellHasher<ReadBarrieredWasmInstanceObject>,
|
||||
SystemAllocPolicy>;
|
||||
using WeakInstanceSet = JS::WeakCache<InstanceSet>;
|
||||
bool hasObservers() const;
|
||||
WeakInstanceSet& observers() const;
|
||||
WeakInstanceSet* getOrCreateObservers(JSContext* cx);
|
||||
|
||||
public:
|
||||
static const unsigned RESERVED_SLOTS = 1;
|
||||
static const unsigned RESERVED_SLOTS = 2;
|
||||
static const Class class_;
|
||||
static const JSPropertySpec properties[];
|
||||
static const JSFunctionSpec methods[];
|
||||
@ -172,6 +183,10 @@ class WasmMemoryObject : public NativeObject
|
||||
Handle<ArrayBufferObjectMaybeShared*> buffer,
|
||||
HandleObject proto);
|
||||
ArrayBufferObjectMaybeShared& buffer() const;
|
||||
|
||||
bool movingGrowable() const;
|
||||
bool addMovingGrowObserver(JSContext* cx, WasmInstanceObject* instance);
|
||||
uint32_t grow(uint32_t delta);
|
||||
};
|
||||
|
||||
// The class of WebAssembly.Table. A WasmTableObject holds a refcount on a
|
||||
|
@ -576,10 +576,10 @@ Module::instantiateMemory(JSContext* cx, MutableHandleWasmMemoryObject memory) c
|
||||
return false;
|
||||
}
|
||||
|
||||
// For asm.js maxMemoryLength doesn't play a role since we can't grow memory.
|
||||
// For wasm we require that either both memory and module don't specify a max size
|
||||
// OR that the memory's max size is less than the modules.
|
||||
if (!metadata_->isAsmJS()) {
|
||||
if (metadata_->isAsmJS()) {
|
||||
MOZ_ASSERT(IsValidAsmJSHeapLength(actualLength));
|
||||
MOZ_ASSERT(actualLength == buffer->wasmMaxSize().value());
|
||||
} else {
|
||||
Maybe<uint32_t> actualMax = buffer->as<ArrayBufferObject>().wasmMaxSize();
|
||||
if (declaredMax.isSome() != actualMax.isSome() || declaredMax < actualMax) {
|
||||
JS_ReportErrorNumber(cx, GetErrorMessage, nullptr, JSMSG_WASM_BAD_IMP_SIZE, "Memory");
|
||||
|
@ -601,11 +601,24 @@ ComputeAccessAddress(EMULATOR_CONTEXT* context, const Disassembler::ComplexAddre
|
||||
return reinterpret_cast<uint8_t*>(result);
|
||||
}
|
||||
|
||||
MOZ_COLD static uint8_t*
|
||||
EmulateHeapAccess(EMULATOR_CONTEXT* context, uint8_t* pc, uint8_t* faultingAddress,
|
||||
const MemoryAccess* memoryAccess, const Instance& instance)
|
||||
MOZ_COLD static bool
|
||||
HugeMemoryAccess(EMULATOR_CONTEXT* context, uint8_t* pc, uint8_t* faultingAddress,
|
||||
const Instance& instance, uint8_t** ppc)
|
||||
{
|
||||
MOZ_RELEASE_ASSERT(instance.codeSegment().containsFunctionPC(pc));
|
||||
|
||||
// On WASM_HUGE_MEMORY platforms, wasm::MemoryAccess is only created for
|
||||
// asm.js loads and stores since they unfortunately do not simply throw on
|
||||
// out-of-bounds. Everything else (WebAssembly and experimental
|
||||
// SIMD/Atomics) throws.
|
||||
|
||||
const MemoryAccess* memoryAccess = instance.code().lookupMemoryAccess(pc);
|
||||
if (!memoryAccess) {
|
||||
*ppc = instance.codeSegment().outOfBoundsCode();
|
||||
return true;
|
||||
}
|
||||
|
||||
MOZ_RELEASE_ASSERT(instance.isAsmJS());
|
||||
MOZ_RELEASE_ASSERT(memoryAccess->insnOffset() == (pc - instance.codeBase()));
|
||||
|
||||
// Disassemble the instruction which caused the trap so that we can extract
|
||||
@ -651,13 +664,6 @@ EmulateHeapAccess(EMULATOR_CONTEXT* context, uint8_t* pc, uint8_t* faultingAddre
|
||||
instance.memoryLength(),
|
||||
"Computed access address is not actually out of bounds");
|
||||
|
||||
// Wasm loads/stores don't wrap offsets at all, so hitting the guard page
|
||||
// means we are out of bounds in any cases.
|
||||
if (!memoryAccess->wrapOffset()) {
|
||||
MOZ_ASSERT(memoryAccess->throwOnOOB());
|
||||
return instance.codeSegment().outOfBoundsCode();
|
||||
}
|
||||
|
||||
// The basic sandbox model is that all heap accesses are a heap base
|
||||
// register plus an index, and the index is always computed with 32-bit
|
||||
// operations, so we know it can only be 4 GiB off of the heap base.
|
||||
@ -665,10 +671,10 @@ EmulateHeapAccess(EMULATOR_CONTEXT* context, uint8_t* pc, uint8_t* faultingAddre
|
||||
// However, we wish to support the optimization of folding immediates
|
||||
// and scaled indices into addresses, and any address arithmetic we fold
|
||||
// gets done at full pointer width, so it doesn't get properly wrapped.
|
||||
// We support this by extending MappedSize to the greatest size that could
|
||||
// be reached by such an unwrapped address, and then when we arrive here in
|
||||
// the signal handler for such an access, we compute the fully wrapped
|
||||
// address, and perform the load or store on it.
|
||||
// We support this by extending HugeMappedSize to the greatest size that
|
||||
// could be reached by such an unwrapped address, and then when we arrive
|
||||
// here in the signal handler for such an access, we compute the fully
|
||||
// wrapped address, and perform the load or store on it.
|
||||
//
|
||||
// Taking a signal is really slow, but in theory programs really shouldn't
|
||||
// be hitting this anyway.
|
||||
@ -678,13 +684,6 @@ EmulateHeapAccess(EMULATOR_CONTEXT* context, uint8_t* pc, uint8_t* faultingAddre
|
||||
MOZ_RELEASE_ASSERT(wrappedOffset + size > wrappedOffset);
|
||||
bool inBounds = wrappedOffset + size < instance.memoryLength();
|
||||
|
||||
// If this is storing Z of an XYZ, check whether X is also in bounds, so
|
||||
// that we don't store anything before throwing.
|
||||
MOZ_RELEASE_ASSERT(unwrappedOffset > memoryAccess->offsetWithinWholeSimdVector());
|
||||
uint32_t wrappedBaseOffset = uint32_t(unwrappedOffset - memoryAccess->offsetWithinWholeSimdVector());
|
||||
if (wrappedBaseOffset >= instance.memoryLength())
|
||||
inBounds = false;
|
||||
|
||||
if (inBounds) {
|
||||
// We now know that this is an access that is actually in bounds when
|
||||
// properly wrapped. Complete the load or store with the wrapped
|
||||
@ -711,10 +710,6 @@ EmulateHeapAccess(EMULATOR_CONTEXT* context, uint8_t* pc, uint8_t* faultingAddre
|
||||
} else {
|
||||
// We now know that this is an out-of-bounds access made by an asm.js
|
||||
// load/store that we should handle.
|
||||
|
||||
if (memoryAccess->throwOnOOB())
|
||||
return instance.codeSegment().outOfBoundsCode();
|
||||
|
||||
switch (access.kind()) {
|
||||
case Disassembler::HeapAccess::Load:
|
||||
case Disassembler::HeapAccess::LoadSext32:
|
||||
@ -736,7 +731,8 @@ EmulateHeapAccess(EMULATOR_CONTEXT* context, uint8_t* pc, uint8_t* faultingAddre
|
||||
}
|
||||
}
|
||||
|
||||
return end;
|
||||
*ppc = end;
|
||||
return true;
|
||||
}
|
||||
#endif // JS_CODEGEN_X64
|
||||
|
||||
@ -803,15 +799,11 @@ HandleFault(PEXCEPTION_POINTERS exception)
|
||||
}
|
||||
|
||||
#ifdef WASM_HUGE_MEMORY
|
||||
const MemoryAccess* memoryAccess = instance->code().lookupMemoryAccess(pc);
|
||||
if (!memoryAccess)
|
||||
*ppc = instance->codeSegment().outOfBoundsCode();
|
||||
else
|
||||
*ppc = EmulateHeapAccess(context, pc, faultingAddress, memoryAccess, *instance);
|
||||
return HugeMemoryAccess(context, pc, faultingAddress, *instance, ppc);
|
||||
#else
|
||||
*ppc = instance->codeSegment().outOfBoundsCode();
|
||||
#endif
|
||||
return true;
|
||||
#endif
|
||||
}
|
||||
|
||||
static LONG WINAPI
|
||||
@ -934,11 +926,8 @@ HandleMachException(JSRuntime* rt, const ExceptionRequest& request)
|
||||
return false;
|
||||
|
||||
#ifdef WASM_HUGE_MEMORY
|
||||
const MemoryAccess* memoryAccess = instance->code().lookupMemoryAccess(pc);
|
||||
if (!memoryAccess)
|
||||
*ppc = instance->codeSegment().outOfBoundsCode();
|
||||
else
|
||||
*ppc = EmulateHeapAccess(&context, pc, faultingAddress, memoryAccess, *instance);
|
||||
if (!HugeMemoryAccess(&context, pc, faultingAddress, *instance, ppc))
|
||||
return false;
|
||||
#else
|
||||
*ppc = instance->codeSegment().outOfBoundsCode();
|
||||
#endif
|
||||
@ -1153,24 +1142,18 @@ HandleFault(int signum, siginfo_t* info, void* ctx)
|
||||
return false;
|
||||
|
||||
#ifdef WASM_HUGE_MEMORY
|
||||
MOZ_RELEASE_ASSERT(signal == Signal::SegFault);
|
||||
const MemoryAccess* memoryAccess = instance->code().lookupMemoryAccess(pc);
|
||||
if (!memoryAccess)
|
||||
*ppc = instance->codeSegment().outOfBoundsCode();
|
||||
else
|
||||
*ppc = EmulateHeapAccess(context, pc, faultingAddress, memoryAccess, *instance);
|
||||
return HugeMemoryAccess(context, pc, faultingAddress, *instance, ppc);
|
||||
#elif defined(JS_CODEGEN_ARM)
|
||||
MOZ_RELEASE_ASSERT(signal == Signal::BusError || signal == Signal::SegFault);
|
||||
if (signal == Signal::BusError)
|
||||
*ppc = instance->codeSegment().unalignedAccessCode();
|
||||
else
|
||||
*ppc = instance->codeSegment().outOfBoundsCode();
|
||||
#else
|
||||
MOZ_RELEASE_ASSERT(signal == Signal::SegFault);
|
||||
*ppc = instance->codeSegment().outOfBoundsCode();
|
||||
#endif
|
||||
|
||||
return true;
|
||||
#else
|
||||
*ppc = instance->codeSegment().outOfBoundsCode();
|
||||
return true;
|
||||
#endif
|
||||
}
|
||||
|
||||
static struct sigaction sPrevSEGVHandler;
|
||||
|
@ -624,50 +624,62 @@ Assumptions::sizeOfExcludingThis(MallocSizeOf mallocSizeOf) const
|
||||
// or
|
||||
// 2^24 * n for n >= 1.
|
||||
bool
|
||||
wasm::IsValidARMLengthImmediate(uint32_t length)
|
||||
wasm::IsValidARMImmediate(uint32_t i)
|
||||
{
|
||||
bool valid = (IsPowerOfTwo(length) ||
|
||||
(length & 0x00ffffff) == 0);
|
||||
bool valid = (IsPowerOfTwo(i) ||
|
||||
(i & 0x00ffffff) == 0);
|
||||
|
||||
MOZ_ASSERT_IF(valid, length % PageSize == 0);
|
||||
MOZ_ASSERT_IF(valid, i % PageSize == 0);
|
||||
|
||||
return valid;
|
||||
}
|
||||
|
||||
uint32_t
|
||||
wasm::RoundUpToNextValidARMLengthImmediate(uint32_t length)
|
||||
wasm::RoundUpToNextValidARMImmediate(uint32_t i)
|
||||
{
|
||||
MOZ_ASSERT(length <= 0xff000000);
|
||||
MOZ_ASSERT(i <= 0xff000000);
|
||||
|
||||
if (length <= 16 * 1024 * 1024)
|
||||
length = length ? mozilla::RoundUpPow2(length) : 0;
|
||||
if (i <= 16 * 1024 * 1024)
|
||||
i = i ? mozilla::RoundUpPow2(i) : 0;
|
||||
else
|
||||
length = (length + 0x00ffffff) & ~0x00ffffff;
|
||||
i = (i + 0x00ffffff) & ~0x00ffffff;
|
||||
|
||||
MOZ_ASSERT(IsValidARMLengthImmediate(length));
|
||||
MOZ_ASSERT(IsValidARMImmediate(i));
|
||||
|
||||
return length;
|
||||
return i;
|
||||
}
|
||||
|
||||
#ifndef WASM_HUGE_MEMORY
|
||||
|
||||
bool
|
||||
wasm::IsValidBoundsCheckImmediate(uint32_t i)
|
||||
{
|
||||
#ifdef JS_CODEGEN_ARM
|
||||
return IsValidARMImmediate(i);
|
||||
#else
|
||||
return true;
|
||||
#endif
|
||||
}
|
||||
|
||||
size_t
|
||||
wasm::LegalizeMapLength(size_t requestedSize)
|
||||
wasm::ComputeMappedSize(uint32_t maxSize)
|
||||
{
|
||||
#ifdef WASM_HUGE_MEMORY
|
||||
// On 64-bit platforms just give us a 4G guard region
|
||||
return wasm::MappedSize;
|
||||
#else
|
||||
uint32_t res = requestedSize;
|
||||
MOZ_ASSERT(maxSize % PageSize == 0);
|
||||
|
||||
// On 32-bit platforms clamp down to 1GB
|
||||
uint32_t MaxMappedSize = (1 << 30);
|
||||
res = Min(res, MaxMappedSize);
|
||||
// It is the bounds-check limit, not the mapped size, that gets baked into
|
||||
// code. Thus round up the maxSize to the next valid immediate value
|
||||
// *before* adding in the guard page.
|
||||
|
||||
# ifdef JS_CODEGEN_ARM
|
||||
// On Arm round so that it fits in a single instruction
|
||||
res = RoundUpToNextValidARMLengthImmediate(res);
|
||||
MOZ_RELEASE_ASSERT(res <= MaxMappedSize);
|
||||
uint32_t boundsCheckLimit = RoundUpToNextValidARMImmediate(maxSize);
|
||||
# else
|
||||
uint32_t boundsCheckLimit = maxSize;
|
||||
# endif
|
||||
MOZ_ASSERT(IsValidBoundsCheckImmediate(boundsCheckLimit));
|
||||
|
||||
return res;
|
||||
#endif
|
||||
MOZ_ASSERT(boundsCheckLimit % gc::SystemPageSize() == 0);
|
||||
MOZ_ASSERT(GuardSize % gc::SystemPageSize() == 0);
|
||||
return boundsCheckLimit + GuardSize;
|
||||
}
|
||||
|
||||
#endif // WASM_HUGE_MEMORY
|
||||
|
@ -791,95 +791,6 @@ class CallSiteAndTarget : public CallSite
|
||||
|
||||
typedef Vector<CallSiteAndTarget, 0, SystemAllocPolicy> CallSiteAndTargetVector;
|
||||
|
||||
// Metadata for a bounds check that may need patching later.
|
||||
|
||||
class BoundsCheck
|
||||
{
|
||||
public:
|
||||
BoundsCheck() = default;
|
||||
|
||||
explicit BoundsCheck(uint32_t cmpOffset)
|
||||
: cmpOffset_(cmpOffset)
|
||||
{ }
|
||||
|
||||
uint8_t* patchAt(uint8_t* code) const { return code + cmpOffset_; }
|
||||
void offsetBy(uint32_t offset) { cmpOffset_ += offset; }
|
||||
|
||||
private:
|
||||
uint32_t cmpOffset_; // absolute offset of the comparison
|
||||
};
|
||||
|
||||
// Summarizes a heap access made by wasm code that needs to be patched later
|
||||
// and/or looked up by the wasm signal handlers. Different architectures need
|
||||
// to know different things (x64: intruction offset, wrapping and failure
|
||||
// behavior, ARM: nothing, x86: offset of end of instruction (heap length to
|
||||
// patch is last 4 bytes of instruction)).
|
||||
#if defined(JS_CODEGEN_X86)
|
||||
class MemoryAccess
|
||||
{
|
||||
uint32_t nextInsOffset_;
|
||||
|
||||
public:
|
||||
MemoryAccess() = default;
|
||||
|
||||
explicit MemoryAccess(uint32_t nextInsOffset)
|
||||
: nextInsOffset_(nextInsOffset)
|
||||
{ }
|
||||
|
||||
void* patchMemoryPtrImmAt(uint8_t* code) const { return code + nextInsOffset_; }
|
||||
void offsetBy(uint32_t offset) { nextInsOffset_ += offset; }
|
||||
};
|
||||
#elif defined(JS_CODEGEN_X64)
|
||||
class MemoryAccess
|
||||
{
|
||||
uint32_t insnOffset_;
|
||||
uint8_t offsetWithinWholeSimdVector_; // if is this e.g. the Z of an XYZ
|
||||
bool throwOnOOB_; // should we throw on OOB?
|
||||
bool wrapOffset_; // should we wrap the offset on OOB?
|
||||
|
||||
public:
|
||||
enum OutOfBoundsBehavior {
|
||||
Throw,
|
||||
CarryOn,
|
||||
};
|
||||
enum WrappingBehavior {
|
||||
WrapOffset,
|
||||
DontWrapOffset,
|
||||
};
|
||||
|
||||
MemoryAccess() = default;
|
||||
|
||||
MemoryAccess(uint32_t insnOffset, OutOfBoundsBehavior onOOB, WrappingBehavior onWrap,
|
||||
uint32_t offsetWithinWholeSimdVector = 0)
|
||||
: insnOffset_(insnOffset),
|
||||
offsetWithinWholeSimdVector_(offsetWithinWholeSimdVector),
|
||||
throwOnOOB_(onOOB == OutOfBoundsBehavior::Throw),
|
||||
wrapOffset_(onWrap == WrappingBehavior::WrapOffset)
|
||||
{
|
||||
MOZ_ASSERT(offsetWithinWholeSimdVector_ == offsetWithinWholeSimdVector, "fits in uint8");
|
||||
}
|
||||
|
||||
uint32_t insnOffset() const { return insnOffset_; }
|
||||
uint32_t offsetWithinWholeSimdVector() const { return offsetWithinWholeSimdVector_; }
|
||||
bool throwOnOOB() const { return throwOnOOB_; }
|
||||
bool wrapOffset() const { return wrapOffset_; }
|
||||
|
||||
void offsetBy(uint32_t offset) { insnOffset_ += offset; }
|
||||
};
|
||||
#elif defined(JS_CODEGEN_ARM) || defined(JS_CODEGEN_ARM64) || \
|
||||
defined(JS_CODEGEN_MIPS32) || defined(JS_CODEGEN_MIPS64) || \
|
||||
defined(JS_CODEGEN_NONE)
|
||||
// Nothing! We don't patch or emulate memory accesses on these platforms.
|
||||
class MemoryAccess {
|
||||
public:
|
||||
void offsetBy(uint32_t) { MOZ_CRASH(); }
|
||||
uint32_t insnOffset() const { MOZ_CRASH(); }
|
||||
};
|
||||
#endif
|
||||
|
||||
WASM_DECLARE_POD_VECTOR(MemoryAccess, MemoryAccessVector)
|
||||
WASM_DECLARE_POD_VECTOR(BoundsCheck, BoundsCheckVector)
|
||||
|
||||
// A wasm::SymbolicAddress represents a pointer to a well-known function or
|
||||
// object that is embedded in wasm code. Since wasm code is serialized and
|
||||
// later deserialized into a different address space, symbolic addresses must be
|
||||
@ -1259,21 +1170,153 @@ struct ExternalTableElem
|
||||
TlsData* tls;
|
||||
};
|
||||
|
||||
// Constants:
|
||||
// Because ARM has a fixed-width instruction encoding, ARM can only express a
|
||||
// limited subset of immediates (in a single instruction).
|
||||
|
||||
extern bool
|
||||
IsValidARMImmediate(uint32_t i);
|
||||
|
||||
extern uint32_t
|
||||
RoundUpToNextValidARMImmediate(uint32_t i);
|
||||
|
||||
// The WebAssembly spec hard-codes the virtual page size to be 64KiB and
|
||||
// requires linear memory to always be a multiple of 64KiB.
|
||||
// requires the size of linear memory to always be a multiple of 64KiB.
|
||||
|
||||
static const unsigned PageSize = 64 * 1024;
|
||||
|
||||
// Bounds checks always compare the base of the memory access with the bounds
|
||||
// check limit. If the memory access is unaligned, this means that, even if the
|
||||
// bounds check succeeds, a few bytes of the access can extend past the end of
|
||||
// memory. To guard against this, extra space is included in the guard region to
|
||||
// catch the overflow. MaxMemoryAccessSize is a conservative approximation of
|
||||
// the maximum guard space needed to catch all unaligned overflows.
|
||||
|
||||
static const unsigned MaxMemoryAccessSize = sizeof(Val);
|
||||
|
||||
#ifdef JS_CODEGEN_X64
|
||||
#define WASM_HUGE_MEMORY
|
||||
static const uint64_t Uint32Range = uint64_t(UINT32_MAX) + 1;
|
||||
static const uint64_t MappedSize = 2 * Uint32Range + PageSize;
|
||||
|
||||
// All other code should use WASM_HUGE_MEMORY instead of JS_CODEGEN_X64 so that
|
||||
// it is easy to use the huge-mapping optimization for other 64-bit platforms in
|
||||
// the future.
|
||||
# define WASM_HUGE_MEMORY
|
||||
|
||||
// On WASM_HUGE_MEMORY platforms, every asm.js or WebAssembly memory
|
||||
// unconditionally allocates a huge region of virtual memory of size
|
||||
// wasm::HugeMappedSize. This allows all memory resizing to work without
|
||||
// reallocation and provides enough guard space for all offsets to be folded
|
||||
// into memory accesses.
|
||||
|
||||
static const uint64_t IndexRange = uint64_t(UINT32_MAX) + 1;
|
||||
static const uint64_t OffsetGuardLimit = uint64_t(INT32_MAX) + 1;
|
||||
static const uint64_t UnalignedGuardPage = PageSize;
|
||||
static const uint64_t HugeMappedSize = IndexRange + OffsetGuardLimit + UnalignedGuardPage;
|
||||
|
||||
static_assert(MaxMemoryAccessSize <= UnalignedGuardPage, "rounded up to static page size");
|
||||
|
||||
#else // !WASM_HUGE_MEMORY
|
||||
|
||||
// On !WASM_HUGE_MEMORY platforms:
|
||||
// - To avoid OOM in ArrayBuffer::prepareForAsmJS, asm.js continues to use the
|
||||
// original ArrayBuffer allocation which has no guard region at all.
|
||||
// - For WebAssembly memories, an additional GuardSize is mapped after the
|
||||
// accessible region of the memory to catch folded (base+offset) accesses
|
||||
// where `offset < OffsetGuardLimit` as well as the overflow from unaligned
|
||||
// accesses, as described above for MaxMemoryAccessSize.
|
||||
|
||||
static const size_t OffsetGuardLimit = PageSize - MaxMemoryAccessSize;
|
||||
static const size_t GuardSize = PageSize;
|
||||
|
||||
// Return whether the given immediate satisfies the constraints of the platform
|
||||
// (viz. that, on ARM, IsValidARMImmediate).
|
||||
|
||||
extern bool
|
||||
IsValidBoundsCheckImmediate(uint32_t i);
|
||||
|
||||
// For a given WebAssembly/asm.js max size, return the number of bytes to
|
||||
// map which will necessarily be a multiple of the system page size and greater
|
||||
// than maxSize. For a returned mappedSize:
|
||||
// boundsCheckLimit = mappedSize - GuardSize
|
||||
// IsValidBoundsCheckImmediate(boundsCheckLimit)
|
||||
|
||||
extern size_t
|
||||
ComputeMappedSize(uint32_t maxSize);
|
||||
|
||||
#endif // WASM_HUGE_MEMORY
|
||||
|
||||
// Metadata for bounds check instructions that are patched at runtime with the
|
||||
// appropriate bounds check limit. On WASM_HUGE_MEMORY platforms for wasm (and
|
||||
// SIMD/Atomic) bounds checks, no BoundsCheck is created: the signal handler
|
||||
// catches everything. On !WASM_HUGE_MEMORY, a BoundsCheck is created for each
|
||||
// memory access (except when statically eliminated by optimizations) so that
|
||||
// the length can be patched in as an immediate. This requires that the bounds
|
||||
// check limit IsValidBoundsCheckImmediate.
|
||||
|
||||
class BoundsCheck
|
||||
{
|
||||
public:
|
||||
BoundsCheck() = default;
|
||||
|
||||
explicit BoundsCheck(uint32_t cmpOffset)
|
||||
: cmpOffset_(cmpOffset)
|
||||
{ }
|
||||
|
||||
uint8_t* patchAt(uint8_t* code) const { return code + cmpOffset_; }
|
||||
void offsetBy(uint32_t offset) { cmpOffset_ += offset; }
|
||||
|
||||
private:
|
||||
uint32_t cmpOffset_;
|
||||
};
|
||||
|
||||
// Metadata for memory accesses. On WASM_HUGE_MEMORY platforms, only
|
||||
// (non-SIMD/Atomic) asm.js loads and stores create a MemoryAccess so that the
|
||||
// signal handler can implement the semantically-correct wraparound logic; the
|
||||
// rest simply redirect to the out-of-bounds stub in the signal handler. On x86,
|
||||
// the base address of memory is baked into each memory access instruction so
|
||||
// the MemoryAccess records the location of each for patching. On all other
|
||||
// platforms, no MemoryAccess is created.
|
||||
|
||||
#ifdef WASM_HUGE_MEMORY
|
||||
class MemoryAccess
|
||||
{
|
||||
uint32_t insnOffset_;
|
||||
|
||||
public:
|
||||
MemoryAccess() = default;
|
||||
explicit MemoryAccess(uint32_t insnOffset)
|
||||
: insnOffset_(insnOffset)
|
||||
{}
|
||||
|
||||
uint32_t insnOffset() const { return insnOffset_; }
|
||||
|
||||
void offsetBy(uint32_t offset) { insnOffset_ += offset; }
|
||||
};
|
||||
#elif defined(JS_CODEGEN_X86)
|
||||
class MemoryAccess
|
||||
{
|
||||
uint32_t nextInsOffset_;
|
||||
|
||||
public:
|
||||
MemoryAccess() = default;
|
||||
explicit MemoryAccess(uint32_t nextInsOffset)
|
||||
: nextInsOffset_(nextInsOffset)
|
||||
{ }
|
||||
|
||||
void* patchMemoryPtrImmAt(uint8_t* code) const { return code + nextInsOffset_; }
|
||||
void offsetBy(uint32_t offset) { nextInsOffset_ += offset; }
|
||||
};
|
||||
#else
|
||||
class MemoryAccess {
|
||||
public:
|
||||
MemoryAccess() { MOZ_CRASH(); }
|
||||
void offsetBy(uint32_t) { MOZ_CRASH(); }
|
||||
uint32_t insnOffset() const { MOZ_CRASH(); }
|
||||
};
|
||||
#endif
|
||||
|
||||
bool IsValidARMLengthImmediate(uint32_t length);
|
||||
uint32_t RoundUpToNextValidARMLengthImmediate(uint32_t length);
|
||||
size_t LegalizeMapLength(size_t requestedSize);
|
||||
WASM_DECLARE_POD_VECTOR(MemoryAccess, MemoryAccessVector)
|
||||
WASM_DECLARE_POD_VECTOR(BoundsCheck, BoundsCheckVector)
|
||||
|
||||
// Constants:
|
||||
|
||||
static const unsigned NaN64GlobalDataOffset = 0;
|
||||
static const unsigned NaN32GlobalDataOffset = NaN64GlobalDataOffset + sizeof(double);
|
||||
|
@ -245,14 +245,10 @@ GetBuildConfiguration(JSContext* cx, unsigned argc, Value* vp)
|
||||
if (!JS_SetProperty(cx, info, "intl-api", value))
|
||||
return false;
|
||||
|
||||
#if defined(XP_WIN)
|
||||
#if defined(SOLARIS)
|
||||
value = BooleanValue(false);
|
||||
#elif defined(SOLARIS)
|
||||
value = BooleanValue(false);
|
||||
#elif defined(XP_UNIX)
|
||||
value = BooleanValue(true);
|
||||
#else
|
||||
value = BooleanValue(false);
|
||||
value = BooleanValue(true);
|
||||
#endif
|
||||
if (!JS_SetProperty(cx, info, "mapped-array-buffer", value))
|
||||
return false;
|
||||
|
@ -1038,7 +1038,44 @@ Parser<ParseHandler>::tryDeclareVar(HandlePropertyName name, DeclarationKind kin
|
||||
{
|
||||
if (AddDeclaredNamePtr p = scope->lookupDeclaredNameForAdd(name)) {
|
||||
DeclarationKind declaredKind = p->value()->kind();
|
||||
if (!DeclarationKindIsVar(declaredKind) && !DeclarationKindIsParameter(declaredKind)) {
|
||||
if (DeclarationKindIsVar(declaredKind)) {
|
||||
// Any vars that are redeclared as body-level functions must
|
||||
// be recorded as body-level functions.
|
||||
//
|
||||
// In the case of global and eval scripts, GlobalDeclaration-
|
||||
// Instantiation [1] and EvalDeclarationInstantiation [2]
|
||||
// check for the declarability of global var and function
|
||||
// bindings via CanDeclareVar [3] and CanDeclareGlobal-
|
||||
// Function [4]. CanDeclareGlobalFunction is strictly more
|
||||
// restrictive than CanDeclareGlobalVar, so record the more
|
||||
// restrictive kind. These semantics are implemented in
|
||||
// CheckCanDeclareGlobalBinding.
|
||||
//
|
||||
// For a var previously declared as ForOfVar, this previous
|
||||
// DeclarationKind is used only to check for if the
|
||||
// 'arguments' binding should be declared. Since body-level
|
||||
// functions shadow 'arguments' [5], it is correct to alter
|
||||
// the kind to BodyLevelFunction. See
|
||||
// declareFunctionArgumentsObject.
|
||||
//
|
||||
// For a var previously declared as
|
||||
// VarForAnnexBLexicalFunction, this previous DeclarationKind
|
||||
// is used so that vars synthesized solely for Annex B.3.3 may
|
||||
// be removed if an early error would occur. If a synthesized
|
||||
// Annex B.3.3 var has the same name as a body-level function,
|
||||
// this is not a redeclaration, and indeed, because the
|
||||
// body-level function binds the name, this name should not be
|
||||
// removed should a redeclaration occur in the future. Thus it
|
||||
// is also correct to alter the kind to BodyLevelFunction.
|
||||
//
|
||||
// [1] ES 15.1.11
|
||||
// [2] ES 18.2.1.3
|
||||
// [3] ES 8.1.1.4.15
|
||||
// [4] ES 8.1.1.4.16
|
||||
// [5] ES 9.2.12
|
||||
if (kind == DeclarationKind::BodyLevelFunction)
|
||||
p->value()->alterKind(kind);
|
||||
} else if (!DeclarationKindIsParameter(declaredKind)) {
|
||||
// Annex B.3.5 allows redeclaring simple (non-destructured)
|
||||
// catch parameters with var declarations, except when it
|
||||
// appears in a for-of.
|
||||
|
@ -280,45 +280,49 @@ GetPageFaultCount()
|
||||
return pmc.PageFaultCount;
|
||||
}
|
||||
|
||||
// On Windows the minimum size for a mapping is the allocation granularity
|
||||
// (64KiB in practice), so mapping very small buffers is potentially wasteful.
|
||||
void*
|
||||
AllocateMappedContent(int fd, size_t offset, size_t length, size_t alignment)
|
||||
{
|
||||
// The allocation granularity must be a whole multiple of the alignment and
|
||||
// the caller must request an aligned offset to satisfy Windows' and the
|
||||
// caller's alignment requirements.
|
||||
MOZ_ASSERT(length && alignment);
|
||||
|
||||
// The allocation granularity and the requested offset
|
||||
// must both be divisible by the requested alignment.
|
||||
if (allocGranularity % alignment != 0 || offset % alignment != 0)
|
||||
return nullptr;
|
||||
|
||||
// Make sure file exists and do sanity check for offset and size.
|
||||
HANDLE hFile = reinterpret_cast<HANDLE>(intptr_t(fd));
|
||||
MOZ_ASSERT(hFile != INVALID_HANDLE_VALUE);
|
||||
|
||||
uint32_t fSizeHgh;
|
||||
uint32_t fSizeLow = GetFileSize(hFile, LPDWORD(&fSizeHgh));
|
||||
if (fSizeLow == INVALID_FILE_SIZE && GetLastError() != NO_ERROR)
|
||||
return nullptr;
|
||||
|
||||
uint64_t fSize = (uint64_t(fSizeHgh) << 32) + fSizeLow;
|
||||
if (offset >= size_t(fSize) || length == 0 || length > fSize - offset)
|
||||
return nullptr;
|
||||
|
||||
uint64_t mapSize = length + offset;
|
||||
HANDLE hMap = CreateFileMapping(hFile, nullptr, PAGE_READONLY, mapSize >> 32, mapSize, nullptr);
|
||||
// This call will fail if the file does not exist, which is what we want.
|
||||
HANDLE hMap = CreateFileMapping(hFile, nullptr, PAGE_READONLY, 0, 0, nullptr);
|
||||
if (!hMap)
|
||||
return nullptr;
|
||||
|
||||
// MapViewOfFile requires the offset to be a whole multiple of the
|
||||
// allocation granularity.
|
||||
size_t alignOffset = offset - (offset % allocGranularity);
|
||||
size_t alignLength = length + (offset % allocGranularity);
|
||||
void* map = MapViewOfFile(hMap, FILE_MAP_COPY, 0, alignOffset, alignLength);
|
||||
size_t alignedOffset = offset - (offset % allocGranularity);
|
||||
size_t alignedLength = length + (offset % allocGranularity);
|
||||
|
||||
DWORD offsetH = uint32_t(uint64_t(alignedOffset) >> 32);
|
||||
DWORD offsetL = uint32_t(alignedOffset);
|
||||
|
||||
// If the offset or length are out of bounds, this call will fail.
|
||||
uint8_t* map = static_cast<uint8_t*>(MapViewOfFile(hMap, FILE_MAP_COPY, offsetH,
|
||||
offsetL, alignedLength));
|
||||
|
||||
// This just decreases the file mapping object's internal reference count;
|
||||
// it won't actually be destroyed until we unmap the associated view.
|
||||
CloseHandle(hMap);
|
||||
|
||||
if (!map)
|
||||
return nullptr;
|
||||
|
||||
return reinterpret_cast<void*>(uintptr_t(map) + (offset - alignOffset));
|
||||
#ifdef DEBUG
|
||||
// Zero out data before and after the desired mapping to catch errors early.
|
||||
if (offset != alignedOffset)
|
||||
memset(map, 0, offset - alignedOffset);
|
||||
if (alignedLength % pageSize)
|
||||
memset(map + alignedLength, 0, pageSize - (alignedLength % pageSize));
|
||||
#endif
|
||||
|
||||
return map + (offset - alignedOffset);
|
||||
}
|
||||
|
||||
void
|
||||
@ -328,9 +332,9 @@ DeallocateMappedContent(void* p, size_t /*length*/)
|
||||
return;
|
||||
|
||||
// Calculate the address originally returned by MapViewOfFile.
|
||||
// This is required because AllocateMappedContent returns a pointer that
|
||||
// might be offset into the view, necessitated by the requirement that the
|
||||
// beginning of a view must be aligned with the allocation granularity.
|
||||
// This is needed because AllocateMappedContent returns a pointer
|
||||
// that might be offset from the view, as the beginning of a
|
||||
// view must be aligned with the allocation granularity.
|
||||
uintptr_t map = uintptr_t(p) - (uintptr_t(p) % allocGranularity);
|
||||
MOZ_ALWAYS_TRUE(UnmapViewOfFile(reinterpret_cast<void*>(map)));
|
||||
}
|
||||
@ -776,61 +780,50 @@ GetPageFaultCount()
|
||||
void*
|
||||
AllocateMappedContent(int fd, size_t offset, size_t length, size_t alignment)
|
||||
{
|
||||
#define NEED_PAGE_ALIGNED 0
|
||||
size_t pa_start; // Page aligned starting
|
||||
size_t pa_end; // Page aligned ending
|
||||
size_t pa_size; // Total page aligned size
|
||||
MOZ_ASSERT(length && alignment);
|
||||
|
||||
// The allocation granularity and the requested offset
|
||||
// must both be divisible by the requested alignment.
|
||||
if (allocGranularity % alignment != 0 || offset % alignment != 0)
|
||||
return nullptr;
|
||||
|
||||
// Sanity check the offset and size, as mmap does not do this for us.
|
||||
struct stat st;
|
||||
uint8_t* buf;
|
||||
|
||||
// Make sure file exists and do sanity check for offset and size.
|
||||
if (fstat(fd, &st) < 0 || offset >= (size_t) st.st_size ||
|
||||
length == 0 || length > (size_t) st.st_size - offset)
|
||||
if (fstat(fd, &st) || offset >= uint64_t(st.st_size) || length > uint64_t(st.st_size) - offset)
|
||||
return nullptr;
|
||||
|
||||
// Check for minimal alignment requirement.
|
||||
#if NEED_PAGE_ALIGNED
|
||||
alignment = std::max(alignment, pageSize);
|
||||
size_t alignedOffset = offset - (offset % allocGranularity);
|
||||
size_t alignedLength = length + (offset % allocGranularity);
|
||||
|
||||
uint8_t* map = static_cast<uint8_t*>(MapMemory(alignedLength, PROT_READ | PROT_WRITE,
|
||||
MAP_PRIVATE, fd, alignedOffset));
|
||||
if (!map)
|
||||
return nullptr;
|
||||
|
||||
#ifdef DEBUG
|
||||
// Zero out data before and after the desired mapping to catch errors early.
|
||||
if (offset != alignedOffset)
|
||||
memset(map, 0, offset - alignedOffset);
|
||||
if (alignedLength % pageSize)
|
||||
memset(map + alignedLength, 0, pageSize - (alignedLength % pageSize));
|
||||
#endif
|
||||
if (offset & (alignment - 1))
|
||||
return nullptr;
|
||||
|
||||
// Page aligned starting of the offset.
|
||||
pa_start = offset & ~(pageSize - 1);
|
||||
// Calculate page aligned ending by adding one page to the page aligned
|
||||
// starting of data end position(offset + length - 1).
|
||||
pa_end = ((offset + length - 1) & ~(pageSize - 1)) + pageSize;
|
||||
pa_size = pa_end - pa_start;
|
||||
|
||||
// Ask for a continuous memory location.
|
||||
buf = (uint8_t*) MapMemory(pa_size);
|
||||
if (!buf)
|
||||
return nullptr;
|
||||
|
||||
buf = (uint8_t*) MapMemoryAt(buf, pa_size, PROT_READ | PROT_WRITE,
|
||||
MAP_PRIVATE | MAP_FIXED, fd, pa_start);
|
||||
if (!buf)
|
||||
return nullptr;
|
||||
|
||||
// Reset the data before target file, which we don't need to see.
|
||||
memset(buf, 0, offset - pa_start);
|
||||
|
||||
// Reset the data after target file, which we don't need to see.
|
||||
memset(buf + (offset - pa_start) + length, 0, pa_end - (offset + length));
|
||||
|
||||
return buf + (offset - pa_start);
|
||||
return map + (offset - alignedOffset);
|
||||
}
|
||||
|
||||
void
|
||||
DeallocateMappedContent(void* p, size_t length)
|
||||
{
|
||||
void* pa_start; // Page aligned starting
|
||||
size_t total_size; // Total allocated size
|
||||
if (!p)
|
||||
return;
|
||||
|
||||
pa_start = (void*)(uintptr_t(p) & ~(pageSize - 1));
|
||||
total_size = ((uintptr_t(p) + length) & ~(pageSize - 1)) + pageSize - uintptr_t(pa_start);
|
||||
if (munmap(pa_start, total_size))
|
||||
MOZ_ASSERT(errno == ENOMEM);
|
||||
// Calculate the address originally returned by mmap.
|
||||
// This is needed because AllocateMappedContent returns a pointer
|
||||
// that might be offset from the mapping, as the beginning of a
|
||||
// mapping must be aligned with the allocation granularity.
|
||||
uintptr_t map = uintptr_t(p) - (uintptr_t(p) % allocGranularity);
|
||||
size_t alignedLength = length + (uintptr_t(p) % allocGranularity);
|
||||
UnmapPages(reinterpret_cast<void*>(map), alignedLength);
|
||||
}
|
||||
|
||||
#else
|
||||
|
@ -1,3 +1,4 @@
|
||||
// |jit-test| slow
|
||||
"use strict";
|
||||
|
||||
if (!('oomTest' in this))
|
||||
|
@ -1,3 +1,5 @@
|
||||
// |jit-test| slow
|
||||
|
||||
const USE_ASM = '"use asm";';
|
||||
if (!('oomTest' in this))
|
||||
quit();
|
||||
|
@ -1,8 +1,11 @@
|
||||
// |jit-test| test-also-noasmjs
|
||||
// |jit-test|
|
||||
load(libdir + "asm.js");
|
||||
load(libdir + "simd.js");
|
||||
load(libdir + "asserts.js");
|
||||
|
||||
// Avoid pathological --ion-eager compile times due to bails in loops
|
||||
setJitCompilerOption('ion.warmup.trigger', 1000000);
|
||||
|
||||
// Set to true to see more JS debugging spew
|
||||
const DEBUG = false;
|
||||
|
||||
@ -273,43 +276,33 @@ function MakeCodeFor(typeName) {
|
||||
|
||||
var l1 = type.load1;
|
||||
var l2 = type.load2;
|
||||
var l3 = type.load3;
|
||||
|
||||
var s1 = type.store1;
|
||||
var s2 = type.store2;
|
||||
var s3 = type.store3;
|
||||
|
||||
var u8 = new glob.Uint8Array(heap);
|
||||
|
||||
function load1(i) { i=i|0; return l1(u8, i); }
|
||||
function load2(i) { i=i|0; return l2(u8, i); }
|
||||
function load3(i) { i=i|0; return l3(u8, i); }
|
||||
|
||||
function loadCst1() { return l1(u8, 41 << 2); }
|
||||
function loadCst2() { return l2(u8, 41 << 2); }
|
||||
function loadCst3() { return l3(u8, 41 << 2); }
|
||||
|
||||
function store1(i, x) { i=i|0; x=c(x); return s1(u8, i, x); }
|
||||
function store2(i, x) { i=i|0; x=c(x); return s2(u8, i, x); }
|
||||
function store3(i, x) { i=i|0; x=c(x); return s3(u8, i, x); }
|
||||
|
||||
function storeCst1(x) { x=c(x); return s1(u8, 41 << 2, x); }
|
||||
function storeCst2(x) { x=c(x); return s2(u8, 41 << 2, x); }
|
||||
function storeCst3(x) { x=c(x); return s3(u8, 41 << 2, x); }
|
||||
|
||||
return {
|
||||
load1: load1,
|
||||
load2: load2,
|
||||
load3: load3,
|
||||
loadCst1: loadCst1,
|
||||
loadCst2: loadCst2,
|
||||
loadCst3: loadCst3,
|
||||
store1: store1,
|
||||
store2: store2,
|
||||
store3: store3,
|
||||
storeCst1: storeCst1,
|
||||
storeCst2: storeCst2,
|
||||
storeCst3: storeCst3,
|
||||
}
|
||||
`;
|
||||
}
|
||||
@ -329,33 +322,27 @@ function TestPartialLoads(m, typedArray, x, y, z, w) {
|
||||
var i = 0, j = 0; // i in elems, j in bytes
|
||||
assertEqX4(m.load1(j), [x(i), 0, 0, 0]);
|
||||
assertEqX4(m.load2(j), [x(i), y(i), 0, 0]);
|
||||
assertEqX4(m.load3(j), [x(i), y(i), z(i), 0]);
|
||||
|
||||
j += 4;
|
||||
assertEqX4(m.load1(j), [y(i), 0, 0, 0]);
|
||||
assertEqX4(m.load2(j), [y(i), z(i), 0, 0]);
|
||||
assertEqX4(m.load3(j), [y(i), z(i), w(i), 0]);
|
||||
|
||||
j += 4;
|
||||
assertEqX4(m.load1(j), [z(i), 0, 0, 0]);
|
||||
assertEqX4(m.load2(j), [z(i), w(i), 0, 0]);
|
||||
assertEqX4(m.load3(j), [z(i), w(i), x(i+4), 0]);
|
||||
|
||||
j += 4;
|
||||
assertEqX4(m.load1(j), [w(i), 0, 0, 0]);
|
||||
assertEqX4(m.load2(j), [w(i), x(i+4), 0, 0]);
|
||||
assertEqX4(m.load3(j), [w(i), x(i+4), y(i+4), 0]);
|
||||
|
||||
j += 4;
|
||||
i += 4;
|
||||
assertEqX4(m.load1(j), [x(i), 0, 0, 0]);
|
||||
assertEqX4(m.load2(j), [x(i), y(i), 0, 0]);
|
||||
assertEqX4(m.load3(j), [x(i), y(i), z(i), 0]);
|
||||
|
||||
// Test loads with constant indexes (41)
|
||||
assertEqX4(m.loadCst1(), [y(40), 0, 0, 0]);
|
||||
assertEqX4(m.loadCst2(), [y(40), z(40), 0, 0]);
|
||||
assertEqX4(m.loadCst3(), [y(40), z(40), w(40), 0]);
|
||||
|
||||
// Test limit and OOB accesses
|
||||
assertEqX4(m.load1((SIZE - 1) << 2), [w(SIZE - 4), 0, 0, 0]);
|
||||
@ -363,9 +350,6 @@ function TestPartialLoads(m, typedArray, x, y, z, w) {
|
||||
|
||||
assertEqX4(m.load2((SIZE - 2) << 2), [z(SIZE - 4), w(SIZE - 4), 0, 0]);
|
||||
assertThrowsInstanceOf(() => m.load2(((SIZE - 2) << 2) + 1), RangeError);
|
||||
|
||||
assertEqX4(m.load3((SIZE - 3) << 2), [y(SIZE - 4), z(SIZE - 4), w(SIZE - 4), 0]);
|
||||
assertThrowsInstanceOf(() => m.load3(((SIZE - 3) << 2) + 1), RangeError);
|
||||
}
|
||||
|
||||
// Partial stores
|
||||
@ -399,18 +383,6 @@ function TestPartialStores(m, typedArray, typeName, x, y, z, w) {
|
||||
typedArray[(i >> 2) + 1] = (i >> 2) + 2;
|
||||
}
|
||||
|
||||
function TestStore3(i) {
|
||||
m.store3(i, val);
|
||||
CheckNotModified(0, i >> 2);
|
||||
assertEq(typedArray[i >> 2], x);
|
||||
assertEq(typedArray[(i >> 2) + 1], y);
|
||||
assertEq(typedArray[(i >> 2) + 2], z);
|
||||
CheckNotModified((i >> 2) + 3, SIZE);
|
||||
typedArray[i >> 2] = (i >> 2) + 1;
|
||||
typedArray[(i >> 2) + 1] = (i >> 2) + 2;
|
||||
typedArray[(i >> 2) + 2] = (i >> 2) + 3;
|
||||
}
|
||||
|
||||
function TestOOBStore(f) {
|
||||
assertThrowsInstanceOf(f, RangeError);
|
||||
CheckNotModified(0, SIZE);
|
||||
@ -440,18 +412,6 @@ function TestPartialStores(m, typedArray, typeName, x, y, z, w) {
|
||||
TestOOBStore(() => m.store2(i + 1, val));
|
||||
TestOOBStore(() => m.store2(-1, val));
|
||||
|
||||
TestStore3(0);
|
||||
TestStore3(1 << 2);
|
||||
TestStore3(2 << 2);
|
||||
TestStore3(3 << 2);
|
||||
TestStore3(1337 << 2);
|
||||
|
||||
var i = (SIZE - 3) << 2;
|
||||
TestStore3(i);
|
||||
TestOOBStore(() => m.store3(i + 1, val));
|
||||
TestOOBStore(() => m.store3(-1, val));
|
||||
TestOOBStore(() => m.store3(-9, val));
|
||||
|
||||
// Constant indexes (41)
|
||||
m.storeCst1(val);
|
||||
CheckNotModified(0, 41);
|
||||
@ -466,16 +426,6 @@ function TestPartialStores(m, typedArray, typeName, x, y, z, w) {
|
||||
CheckNotModified(43, SIZE);
|
||||
typedArray[41] = 42;
|
||||
typedArray[42] = 43;
|
||||
|
||||
m.storeCst3(val);
|
||||
CheckNotModified(0, 41);
|
||||
assertEq(typedArray[41], x);
|
||||
assertEq(typedArray[42], y);
|
||||
assertEq(typedArray[43], z);
|
||||
CheckNotModified(44, SIZE);
|
||||
typedArray[41] = 42;
|
||||
typedArray[42] = 43;
|
||||
typedArray[43] = 44;
|
||||
}
|
||||
|
||||
var f32 = new Float32Array(SIZE);
|
||||
|
@ -3,6 +3,9 @@ load(libdir + "simd.js");
|
||||
load(libdir + "asserts.js");
|
||||
var heap = new ArrayBuffer(0x10000);
|
||||
|
||||
// Avoid pathological --ion-eager compile times due to bails in loops
|
||||
setJitCompilerOption('ion.warmup.trigger', 1000000);
|
||||
|
||||
// Set to true to see more JS debugging spew
|
||||
const DEBUG = false;
|
||||
|
||||
|
@ -1,10 +1,7 @@
|
||||
// |jit-test| test-also-noasmjs
|
||||
// |jit-test|
|
||||
load(libdir + "asm.js");
|
||||
load(libdir + "asserts.js");
|
||||
|
||||
setIonCheckGraphCoherency(false);
|
||||
setCachingEnabled(false);
|
||||
|
||||
var ab = new ArrayBuffer(BUF_MIN);
|
||||
|
||||
// Compute a set of interesting indices.
|
||||
@ -104,39 +101,30 @@ function testSimdX4(ctor, shift, scale, disp, simdName, simdCtor) {
|
||||
'var SIMD_' + simdName + ' = glob.SIMD.' + simdName + '; ' +
|
||||
'var SIMD_' + simdName + '_check = SIMD_' + simdName + '.check; ' +
|
||||
'var SIMD_' + simdName + '_load = SIMD_' + simdName + '.load; ' +
|
||||
'var SIMD_' + simdName + '_load3 = SIMD_' + simdName + '.load3; ' +
|
||||
'var SIMD_' + simdName + '_load2 = SIMD_' + simdName + '.load2; ' +
|
||||
'var SIMD_' + simdName + '_load1 = SIMD_' + simdName + '.load1; ' +
|
||||
'var SIMD_' + simdName + '_store = SIMD_' + simdName + '.store; ' +
|
||||
'var SIMD_' + simdName + '_store3 = SIMD_' + simdName + '.store3; ' +
|
||||
'var SIMD_' + simdName + '_store2 = SIMD_' + simdName + '.store2; ' +
|
||||
'var SIMD_' + simdName + '_store1 = SIMD_' + simdName + '.store1; ' +
|
||||
'function load(i) {i=i|0; return SIMD_' + simdName + '_check(SIMD_' + simdName + '_load(arr, ((i<<' + scale + ')+' + disp + ')>>' + shift + ')) } ' +
|
||||
'function load3(i) {i=i|0; return SIMD_' + simdName + '_check(SIMD_' + simdName + '_load3(arr, ((i<<' + scale + ')+' + disp + ')>>' + shift + ')) } ' +
|
||||
'function load2(i) {i=i|0; return SIMD_' + simdName + '_check(SIMD_' + simdName + '_load2(arr, ((i<<' + scale + ')+' + disp + ')>>' + shift + ')) } ' +
|
||||
'function load1(i) {i=i|0; return SIMD_' + simdName + '_check(SIMD_' + simdName + '_load1(arr, ((i<<' + scale + ')+' + disp + ')>>' + shift + ')) } ' +
|
||||
'function store(i,j) {i=i|0;j=SIMD_' + simdName + '_check(j); SIMD_' + simdName + '_store(arr, ((i<<' + scale + ')+' + disp + ')>>' + shift + ', j) } ' +
|
||||
'function store3(i,j) {i=i|0;j=SIMD_' + simdName + '_check(j); SIMD_' + simdName + '_store3(arr, ((i<<' + scale + ')+' + disp + ')>>' + shift + ', j) } ' +
|
||||
'function store2(i,j) {i=i|0;j=SIMD_' + simdName + '_check(j); SIMD_' + simdName + '_store2(arr, ((i<<' + scale + ')+' + disp + ')>>' + shift + ', j) } ' +
|
||||
'function store1(i,j) {i=i|0;j=SIMD_' + simdName + '_check(j); SIMD_' + simdName + '_store1(arr, ((i<<' + scale + ')+' + disp + ')>>' + shift + ', j) } ' +
|
||||
'return { load: load, load3: load3, load2: load2, load1: load1, store: store, store3: store3, store2 : store2, store1 : store1 }');
|
||||
'return { load: load, load2: load2, load1: load1, store: store, store2 : store2, store1 : store1 }');
|
||||
var f = asmLink(c, this, null, ab);
|
||||
|
||||
for (var i of indices) {
|
||||
var index = ((i<<scale)+disp)>>shift;
|
||||
|
||||
var v, v3, v2, v1;
|
||||
var t = false, t3 = false, t2 = false, t1 = false;
|
||||
var v, v2, v1;
|
||||
var t = false, t2 = false, t1 = false;
|
||||
try { v = simdCtor.load(arr, index); }
|
||||
catch (e) {
|
||||
assertEq(e instanceof RangeError, true);
|
||||
t = true;
|
||||
}
|
||||
try { v3 = simdCtor.load3(arr, index); }
|
||||
catch (e) {
|
||||
assertEq(e instanceof RangeError, true);
|
||||
t3 = true;
|
||||
}
|
||||
try { v2 = simdCtor.load2(arr, index); }
|
||||
catch (e) {
|
||||
assertEq(e instanceof RangeError, true);
|
||||
@ -149,18 +137,13 @@ function testSimdX4(ctor, shift, scale, disp, simdName, simdCtor) {
|
||||
}
|
||||
|
||||
// Loads
|
||||
var l, l3, l2, l1;
|
||||
var r = false, r3 = false, r2 = false, r1 = false;
|
||||
var l, l2, l1;
|
||||
var r = false, r2 = false, r1 = false;
|
||||
try { l = f.load(i); }
|
||||
catch (e) {
|
||||
assertEq(e instanceof RangeError, true);
|
||||
r = true;
|
||||
}
|
||||
try { l3 = f.load3(i); }
|
||||
catch (e) {
|
||||
assertEq(e instanceof RangeError, true);
|
||||
r3 = true;
|
||||
}
|
||||
try { l2 = f.load2(i); }
|
||||
catch (e) {
|
||||
assertEq(e instanceof RangeError, true);
|
||||
@ -172,11 +155,9 @@ function testSimdX4(ctor, shift, scale, disp, simdName, simdCtor) {
|
||||
r1 = true;
|
||||
}
|
||||
assertEq(t, r);
|
||||
assertEq(t3, r3);
|
||||
assertEq(t2, r2);
|
||||
assertEq(t1, r1);
|
||||
if (!t) assertEqX4(v, l);
|
||||
if (!t3) assertEqX4(v3, l3);
|
||||
if (!t2) assertEqX4(v2, l2);
|
||||
if (!t1) assertEqX4(v1, l1);
|
||||
|
||||
@ -187,12 +168,6 @@ function testSimdX4(ctor, shift, scale, disp, simdName, simdCtor) {
|
||||
assertEqX4(simdCtor.load(arr, index), v);
|
||||
} else
|
||||
assertThrowsInstanceOf(() => f.store(i, simdCtor()), RangeError);
|
||||
if (!t3) {
|
||||
simdCtor.store3(arr, index, simdCtor.neg(v3));
|
||||
f.store3(i, v3);
|
||||
assertEqX4(simdCtor.load3(arr, index), v3);
|
||||
} else
|
||||
assertThrowsInstanceOf(() => f.store3(i, simdCtor()), RangeError);
|
||||
if (!t2) {
|
||||
simdCtor.store2(arr, index, simdCtor.neg(v2));
|
||||
f.store2(i, v2);
|
||||
@ -239,6 +214,14 @@ test(testInt, Uint32Array, 2);
|
||||
test(testFloat32, Float32Array, 2);
|
||||
test(testFloat64, Float64Array, 3);
|
||||
if (typeof SIMD !== 'undefined' && isSimdAvailable()) {
|
||||
// Avoid pathological --ion-eager compile times due to bails in loops
|
||||
setJitCompilerOption('ion.warmup.trigger', 1000000);
|
||||
|
||||
// Use a fresh ArrayBuffer so prepareForAsmJS can allocated a guard page
|
||||
// which SIMD.js needs. Since the original ArrayBuffer was prepared for
|
||||
// asm.js that didn't use SIMD.js, it has no guard page (on 32-bit).
|
||||
ab = new ArrayBuffer(BUF_MIN);
|
||||
|
||||
test(testInt32x4, Uint8Array, 0);
|
||||
test(testFloat32x4, Uint8Array, 0);
|
||||
}
|
||||
|
12
js/src/jit-test/tests/debug/bug1300517.js
Normal file
12
js/src/jit-test/tests/debug/bug1300517.js
Normal file
@ -0,0 +1,12 @@
|
||||
// |jit-test| error: ReferenceError
|
||||
g = newGlobal();
|
||||
g.log *= "";
|
||||
Debugger(g).onDebuggerStatement = frame => frame.eval("log += this.Math.toString();");
|
||||
let forceException = g.eval(`
|
||||
(class extends class {} {
|
||||
constructor() {
|
||||
debugger;
|
||||
}
|
||||
})
|
||||
`);
|
||||
new forceException;
|
34
js/src/jit-test/tests/debug/bug1300528.js
Normal file
34
js/src/jit-test/tests/debug/bug1300528.js
Normal file
@ -0,0 +1,34 @@
|
||||
load(libdir + "asserts.js");
|
||||
|
||||
if (helperThreadCount() === 0)
|
||||
quit(0);
|
||||
|
||||
function BigInteger(a, b, c) {}
|
||||
function montConvert(x) {
|
||||
var r = new BigInteger(null);
|
||||
return r;
|
||||
}
|
||||
var ba = new Array();
|
||||
a = new BigInteger(ba);
|
||||
g = montConvert(a);
|
||||
var lfGlobal = newGlobal();
|
||||
for (lfLocal in this) {
|
||||
if (!(lfLocal in lfGlobal)) {
|
||||
lfGlobal[lfLocal] = this[lfLocal];
|
||||
}
|
||||
}
|
||||
lfGlobal.offThreadCompileScript(`
|
||||
var dbg = new Debugger(g);
|
||||
dbg.onEnterFrame = function (frame) {
|
||||
var frameThis = frame.this;
|
||||
}
|
||||
`);
|
||||
lfGlobal.runOffThreadScript();
|
||||
assertThrowsInstanceOf(test, ReferenceError);
|
||||
function test() {
|
||||
function check(fun, msg, todo) {
|
||||
success = fun();
|
||||
}
|
||||
check(() => Object.getPrototypeOf(view) == Object.getPrototypeOf(simple));
|
||||
typeof this;
|
||||
}
|
10
js/src/jit-test/tests/wasm/big-resize.js
Normal file
10
js/src/jit-test/tests/wasm/big-resize.js
Normal file
@ -0,0 +1,10 @@
|
||||
load(libdir + "wasm.js");
|
||||
|
||||
assertEq(new WebAssembly.Instance(new WebAssembly.Module(textToBinary(`(module
|
||||
(memory 1 32768)
|
||||
(func $test (result i32)
|
||||
(if (i32.eq (grow_memory (i32.const 16384)) (i32.const -1)) (return (i32.const 42)))
|
||||
(i32.store (i32.const 1073807356) (i32.const 42))
|
||||
(i32.load (i32.const 1073807356)))
|
||||
(export "test" $test)
|
||||
)`))).exports.test(), 42);
|
@ -148,14 +148,10 @@ for (let [type, ext] of [
|
||||
assertErrorMessage(() => badStoreModule(type, ext), TypeError, /can't touch memory/);
|
||||
}
|
||||
|
||||
for (var ind = 0; ind < 1; ind++) {
|
||||
/*
|
||||
* TODO: wasm.explicit-bounds-check option is being deprecated. We will be adding a
|
||||
* new option that treats all offset as "non-foldable". When that is added trigger
|
||||
* it here when ind == 1.
|
||||
if (ind == 1)
|
||||
setJitCompilerOption('wasm.explicit-bounds-checks', 1);
|
||||
*/
|
||||
assertEq(getJitCompilerOptions()['wasm.fold-offsets'], 1);
|
||||
|
||||
for (var foldOffsets = 0; foldOffsets <= 1; foldOffsets++) {
|
||||
setJitCompilerOption('wasm.fold-offsets', foldOffsets);
|
||||
|
||||
testLoad('i32', '', 0, 0, 0, 0x03020100);
|
||||
testLoad('i32', '', 1, 0, 1, 0x04030201);
|
||||
@ -399,3 +395,5 @@ for (var ind = 0; ind < 1; ind++) {
|
||||
setJitCompilerOption('wasm.test-mode', 0);
|
||||
}
|
||||
}
|
||||
|
||||
setJitCompilerOption('wasm.fold-offsets', 1);
|
||||
|
@ -1,4 +1,4 @@
|
||||
// |jit-test| allow-oom
|
||||
// |jit-test| slow; allow-oom
|
||||
|
||||
if (typeof oomTest !== 'function' || !wasmIsSupported()) {
|
||||
print('Missing oomTest or wasm support in wasm/regress/oom-eval');
|
||||
|
@ -1,4 +1,4 @@
|
||||
// |jit-test| allow-oom
|
||||
// |jit-test| slow; allow-oom
|
||||
|
||||
if (typeof oomTest !== 'function' || !wasmIsSupported())
|
||||
quit();
|
||||
|
77
js/src/jit-test/tests/wasm/resizing.js
Normal file
77
js/src/jit-test/tests/wasm/resizing.js
Normal file
@ -0,0 +1,77 @@
|
||||
// |jit-test| test-also-wasm-baseline
|
||||
load(libdir + "wasm.js");
|
||||
|
||||
const Module = WebAssembly.Module;
|
||||
const Instance = WebAssembly.Instance;
|
||||
const Table = WebAssembly.Table;
|
||||
const Memory = WebAssembly.Memory;
|
||||
|
||||
// Test for stale heap pointers after resize
|
||||
|
||||
// Grow directly from builtin call:
|
||||
assertEq(evalText(`(module
|
||||
(memory 1)
|
||||
(func $test (result i32)
|
||||
(i32.store (i32.const 0) (i32.const 1))
|
||||
(i32.store (i32.const 65532) (i32.const 10))
|
||||
(grow_memory (i32.const 99))
|
||||
(i32.store (i32.const 6553596) (i32.const 100))
|
||||
(i32.add
|
||||
(i32.load (i32.const 0))
|
||||
(i32.add
|
||||
(i32.load (i32.const 65532))
|
||||
(i32.load (i32.const 6553596)))))
|
||||
(export "test" $test)
|
||||
)`).exports.test(), 111);
|
||||
|
||||
// Grow during call_import:
|
||||
var exports = evalText(`(module
|
||||
(import $imp "a" "imp")
|
||||
(memory 1)
|
||||
(func $grow (grow_memory (i32.const 99)))
|
||||
(export "grow" $grow)
|
||||
(func $test (result i32)
|
||||
(i32.store (i32.const 0) (i32.const 1))
|
||||
(i32.store (i32.const 65532) (i32.const 10))
|
||||
(call $imp)
|
||||
(i32.store (i32.const 6553596) (i32.const 100))
|
||||
(i32.add
|
||||
(i32.load (i32.const 0))
|
||||
(i32.add
|
||||
(i32.load (i32.const 65532))
|
||||
(i32.load (i32.const 6553596)))))
|
||||
(export "test" $test)
|
||||
)`, {a:{imp() { exports.grow() }}}).exports;
|
||||
|
||||
setJitCompilerOption("baseline.warmup.trigger", 2);
|
||||
setJitCompilerOption("ion.warmup.trigger", 4);
|
||||
for (var i = 0; i < 10; i++)
|
||||
assertEq(exports.test(), 111);
|
||||
|
||||
// Grow during call_indirect:
|
||||
var mem = new Memory({initial:1});
|
||||
var tbl = new Table({initial:1, element:"anyfunc"});
|
||||
var exports1 = evalText(`(module
|
||||
(import "a" "mem" (memory 1))
|
||||
(func $grow
|
||||
(i32.store (i32.const 65532) (i32.const 10))
|
||||
(grow_memory (i32.const 99))
|
||||
(i32.store (i32.const 6553596) (i32.const 100)))
|
||||
(export "grow" $grow)
|
||||
)`, {a:{mem}}).exports;
|
||||
var exports2 = evalText(`(module
|
||||
(import "a" "tbl" (table 1))
|
||||
(import "a" "mem" (memory 1))
|
||||
(type $v2v (func))
|
||||
(func $test (result i32)
|
||||
(i32.store (i32.const 0) (i32.const 1))
|
||||
(call_indirect $v2v (i32.const 0))
|
||||
(i32.add
|
||||
(i32.load (i32.const 0))
|
||||
(i32.add
|
||||
(i32.load (i32.const 65532))
|
||||
(i32.load (i32.const 6553596)))))
|
||||
(export "test" $test)
|
||||
)`, {a:{tbl, mem}}).exports;
|
||||
tbl.set(0, exports1.grow);
|
||||
assertEq(exports2.test(), 111);
|
@ -16,7 +16,7 @@
|
||||
)
|
||||
|
||||
(export "grow_memory" $grow_memory)
|
||||
(func $grow_memory (param i32)
|
||||
(func $grow_memory (param i32) (result i32)
|
||||
(grow_memory (get_local 0))
|
||||
)
|
||||
)
|
||||
@ -33,4 +33,4 @@
|
||||
(assert_trap (invoke "load" (i32.const 0)) "out of bounds memory access")
|
||||
(assert_trap (invoke "store" (i32.const 0x80000000) (i32.const 13)) "out of bounds memory access")
|
||||
(assert_trap (invoke "load" (i32.const 0x80000000)) "out of bounds memory access")
|
||||
(assert_trap (invoke "grow_memory" (i32.const 0x80000000)) "memory size exceeds implementation limit")
|
||||
(assert_return (invoke "grow_memory" (i32.const 0x80000000)) (i32.const -1))
|
||||
|
@ -1,4 +1,2 @@
|
||||
// |jit-test| test-also-wasm-baseline
|
||||
// TODO current_memory opcode + traps on OOB
|
||||
quit();
|
||||
var importedArgs = ['memory_trap.wast']; load(scriptdir + '../spec.js');
|
||||
|
@ -1,4 +1,2 @@
|
||||
// |jit-test| test-also-wasm-baseline
|
||||
// TODO memory resizing (you don't say)
|
||||
quit();
|
||||
var importedArgs = ['resizing.wast']; load(scriptdir + '../spec.js');
|
||||
|
@ -11615,6 +11615,27 @@ CodeGenerator::visitWasmTrap(LWasmTrap* lir)
|
||||
masm.jump(wasm::JumpTarget(lir->mir()->trap()));
|
||||
}
|
||||
|
||||
void
|
||||
CodeGenerator::visitWasmBoundsCheck(LWasmBoundsCheck* ins)
|
||||
{
|
||||
const MWasmBoundsCheck* mir = ins->mir();
|
||||
Register ptr = ToRegister(ins->ptr());
|
||||
|
||||
if (mir->isRedundant()) {
|
||||
// For better test coverage, inject debug assertions that redundant
|
||||
// bounds checks really are redundant.
|
||||
#ifdef DEBUG
|
||||
Label ok;
|
||||
masm.wasmBoundsCheck(Assembler::Below, ptr, &ok);
|
||||
masm.assumeUnreachable("Redundant bounds check failed!");
|
||||
masm.bind(&ok);
|
||||
#endif
|
||||
return;
|
||||
}
|
||||
|
||||
masm.wasmBoundsCheck(Assembler::AboveOrEqual, ptr, wasm::JumpTarget::OutOfBounds);
|
||||
}
|
||||
|
||||
typedef bool (*RecompileFn)(JSContext*);
|
||||
static const VMFunction RecompileFnInfo = FunctionInfo<RecompileFn>(Recompile, "Recompile");
|
||||
|
||||
|
@ -419,6 +419,7 @@ class CodeGenerator final : public CodeGeneratorSpecific
|
||||
void visitInterruptCheck(LInterruptCheck* lir);
|
||||
void visitOutOfLineInterruptCheckImplicit(OutOfLineInterruptCheckImplicit* ins);
|
||||
void visitWasmTrap(LWasmTrap* lir);
|
||||
void visitWasmBoundsCheck(LWasmBoundsCheck* ins);
|
||||
void visitRecompileCheck(LRecompileCheck* ins);
|
||||
void visitRotate(LRotate* ins);
|
||||
|
||||
|
@ -100,36 +100,33 @@ AnalyzeLsh(TempAllocator& alloc, MLsh* lsh)
|
||||
last->block()->insertAfter(last, eaddr);
|
||||
}
|
||||
|
||||
template<typename MWasmMemoryAccessType>
|
||||
template<typename AsmJSMemoryAccess>
|
||||
bool
|
||||
EffectiveAddressAnalysis::tryAddDisplacement(MWasmMemoryAccessType* ins, int32_t o)
|
||||
EffectiveAddressAnalysis::tryAddDisplacement(AsmJSMemoryAccess* ins, int32_t o)
|
||||
{
|
||||
#ifdef WASM_HUGE_MEMORY
|
||||
// Compute the new offset. Check for overflow.
|
||||
uint32_t oldOffset = ins->offset();
|
||||
uint32_t newOffset = oldOffset + o;
|
||||
if (o < 0 ? (newOffset >= oldOffset) : (newOffset < oldOffset))
|
||||
return false;
|
||||
|
||||
// Compute the new offset to the end of the access. Check for overflow
|
||||
// here also.
|
||||
uint32_t newEnd = newOffset + ins->byteSize();
|
||||
if (newEnd < newOffset)
|
||||
return false;
|
||||
|
||||
// Determine the range of valid offsets which can be folded into this
|
||||
// instruction and check whether our computed offset is within that range.
|
||||
size_t range = mir_->foldableOffsetRange(ins);
|
||||
if (size_t(newEnd) > range)
|
||||
// The offset must ultimately be written into the offset immediate of a load
|
||||
// or store instruction so don't allow folding of the offset is bigger.
|
||||
if (newOffset >= wasm::OffsetGuardLimit)
|
||||
return false;
|
||||
|
||||
// Everything checks out. This is the new offset.
|
||||
ins->setOffset(newOffset);
|
||||
return true;
|
||||
#else
|
||||
return false;
|
||||
#endif
|
||||
}
|
||||
|
||||
template<typename MWasmMemoryAccessType>
|
||||
template<typename AsmJSMemoryAccess>
|
||||
void
|
||||
EffectiveAddressAnalysis::analyzeAsmHeapAccess(MWasmMemoryAccessType* ins)
|
||||
EffectiveAddressAnalysis::analyzeAsmJSHeapAccess(AsmJSMemoryAccess* ins)
|
||||
{
|
||||
MDefinition* base = ins->base();
|
||||
|
||||
@ -198,9 +195,9 @@ EffectiveAddressAnalysis::analyze()
|
||||
if (i->isLsh())
|
||||
AnalyzeLsh(graph_.alloc(), i->toLsh());
|
||||
else if (i->isAsmJSLoadHeap())
|
||||
analyzeAsmHeapAccess(i->toAsmJSLoadHeap());
|
||||
analyzeAsmJSHeapAccess(i->toAsmJSLoadHeap());
|
||||
else if (i->isAsmJSStoreHeap())
|
||||
analyzeAsmHeapAccess(i->toAsmJSStoreHeap());
|
||||
analyzeAsmJSHeapAccess(i->toAsmJSStoreHeap());
|
||||
}
|
||||
}
|
||||
return true;
|
||||
|
@ -19,11 +19,11 @@ class EffectiveAddressAnalysis
|
||||
MIRGenerator* mir_;
|
||||
MIRGraph& graph_;
|
||||
|
||||
template<typename MWasmMemoryAccessType>
|
||||
MOZ_MUST_USE bool tryAddDisplacement(MWasmMemoryAccessType* ins, int32_t o);
|
||||
template <typename AsmJSMemoryAccess>
|
||||
MOZ_MUST_USE bool tryAddDisplacement(AsmJSMemoryAccess* ins, int32_t o);
|
||||
|
||||
template<typename MWasmMemoryAccessType>
|
||||
void analyzeAsmHeapAccess(MWasmMemoryAccessType* ins);
|
||||
template <typename AsmJSMemoryAccess>
|
||||
void analyzeAsmJSHeapAccess(AsmJSMemoryAccess* ins);
|
||||
|
||||
public:
|
||||
EffectiveAddressAnalysis(MIRGenerator* mir, MIRGraph& graph)
|
||||
|
@ -224,6 +224,10 @@ DefaultJitOptions::DefaultJitOptions()
|
||||
// Test whether wasm int64 / double NaN bits testing is enabled.
|
||||
SET_DEFAULT(wasmTestMode, false);
|
||||
|
||||
// Toggles the optimization whereby offsets are folded into loads and not
|
||||
// included in the bounds check.
|
||||
SET_DEFAULT(wasmFoldOffsets, true);
|
||||
|
||||
// Determines whether we suppress using signal handlers
|
||||
// for interrupting jit-ed code. This is used only for testing.
|
||||
SET_DEFAULT(ionInterruptWithoutSignals, false);
|
||||
|
@ -70,6 +70,7 @@ struct DefaultJitOptions
|
||||
bool limitScriptSize;
|
||||
bool osr;
|
||||
bool wasmTestMode;
|
||||
bool wasmFoldOffsets;
|
||||
bool ionInterruptWithoutSignals;
|
||||
uint32_t baselineWarmUpThreshold;
|
||||
uint32_t exceptionBailoutThreshold;
|
||||
|
@ -4124,6 +4124,29 @@ LIRGenerator::visitHasClass(MHasClass* ins)
|
||||
define(new(alloc()) LHasClass(useRegister(ins->object())), ins);
|
||||
}
|
||||
|
||||
void
|
||||
LIRGenerator::visitWasmAddOffset(MWasmAddOffset* ins)
|
||||
{
|
||||
MOZ_ASSERT(ins->base()->type() == MIRType::Int32);
|
||||
MOZ_ASSERT(ins->type() == MIRType::Int32);
|
||||
define(new(alloc()) LWasmAddOffset(useRegisterAtStart(ins->base())), ins);
|
||||
}
|
||||
|
||||
void
|
||||
LIRGenerator::visitWasmBoundsCheck(MWasmBoundsCheck* ins)
|
||||
{
|
||||
#ifndef DEBUG
|
||||
if (ins->isRedundant())
|
||||
return;
|
||||
#endif
|
||||
|
||||
MDefinition* input = ins->input();
|
||||
MOZ_ASSERT(input->type() == MIRType::Int32);
|
||||
|
||||
auto* lir = new(alloc()) LWasmBoundsCheck(useRegisterAtStart(input));
|
||||
add(lir, ins);
|
||||
}
|
||||
|
||||
void
|
||||
LIRGenerator::visitWasmLoadGlobalVar(MWasmLoadGlobalVar* ins)
|
||||
{
|
||||
|
@ -282,6 +282,8 @@ class LIRGenerator : public LIRGeneratorSpecific
|
||||
void visitIsConstructor(MIsConstructor* ins);
|
||||
void visitIsObject(MIsObject* ins);
|
||||
void visitHasClass(MHasClass* ins);
|
||||
void visitWasmAddOffset(MWasmAddOffset* ins);
|
||||
void visitWasmBoundsCheck(MWasmBoundsCheck* ins);
|
||||
void visitWasmLoadGlobalVar(MWasmLoadGlobalVar* ins);
|
||||
void visitWasmStoreGlobalVar(MWasmStoreGlobalVar* ins);
|
||||
void visitAsmJSParameter(MAsmJSParameter* ins);
|
||||
|
@ -6,6 +6,7 @@
|
||||
|
||||
#include "jit/MIR.h"
|
||||
|
||||
#include "mozilla/CheckedInt.h"
|
||||
#include "mozilla/FloatingPoint.h"
|
||||
#include "mozilla/IntegerPrintfMacros.h"
|
||||
#include "mozilla/MathAlgorithms.h"
|
||||
@ -34,6 +35,7 @@ using namespace js::jit;
|
||||
|
||||
using JS::ToInt32;
|
||||
|
||||
using mozilla::CheckedInt;
|
||||
using mozilla::NumbersAreIdentical;
|
||||
using mozilla::IsFloat32Representable;
|
||||
using mozilla::IsNaN;
|
||||
@ -4944,6 +4946,24 @@ MLoadFixedSlotAndUnbox::foldsTo(TempAllocator& alloc)
|
||||
return this;
|
||||
}
|
||||
|
||||
MDefinition*
|
||||
MWasmAddOffset::foldsTo(TempAllocator& alloc)
|
||||
{
|
||||
MDefinition* baseArg = base();
|
||||
if (!baseArg->isConstant())
|
||||
return this;
|
||||
|
||||
MOZ_ASSERT(baseArg->type() == MIRType::Int32);
|
||||
CheckedInt<uint32_t> ptr = baseArg->toConstant()->toInt32();
|
||||
|
||||
ptr += offset();
|
||||
|
||||
if (!ptr.isValid())
|
||||
return this;
|
||||
|
||||
return MConstant::New(alloc, Int32Value(ptr.value()));
|
||||
}
|
||||
|
||||
MDefinition::AliasType
|
||||
MAsmJSLoadHeap::mightAlias(const MDefinition* def) const
|
||||
{
|
||||
@ -5425,15 +5445,16 @@ MWasmCall::NewBuiltinInstanceMethodCall(TempAllocator& alloc,
|
||||
const ABIArg& instanceArg,
|
||||
const Args& args,
|
||||
MIRType resultType,
|
||||
uint32_t spIncrement)
|
||||
uint32_t spIncrement,
|
||||
uint32_t tlsStackOffset)
|
||||
{
|
||||
auto callee = wasm::CalleeDesc::builtinInstanceMethod(builtin);
|
||||
MWasmCall* call = MWasmCall::New(alloc, desc, callee, args, resultType, spIncrement,
|
||||
MWasmCall::DontSaveTls, nullptr);
|
||||
|
||||
tlsStackOffset, nullptr);
|
||||
if (!call)
|
||||
return nullptr;
|
||||
MOZ_ASSERT(instanceArg != ABIArg()); // instanceArg must be initialized.
|
||||
|
||||
MOZ_ASSERT(instanceArg != ABIArg());
|
||||
call->instanceArg_ = instanceArg;
|
||||
return call;
|
||||
}
|
||||
|
210
js/src/jit/MIR.h
210
js/src/jit/MIR.h
@ -13224,66 +13224,16 @@ class MAsmJSNeg
|
||||
}
|
||||
};
|
||||
|
||||
class MWasmMemoryAccess
|
||||
{
|
||||
uint32_t offset_;
|
||||
uint32_t align_;
|
||||
Scalar::Type accessType_ : 8;
|
||||
bool needsBoundsCheck_;
|
||||
unsigned numSimdElems_;
|
||||
MemoryBarrierBits barrierBefore_;
|
||||
MemoryBarrierBits barrierAfter_;
|
||||
|
||||
public:
|
||||
explicit MWasmMemoryAccess(Scalar::Type accessType, uint32_t align, uint32_t offset,
|
||||
unsigned numSimdElems = 0,
|
||||
MemoryBarrierBits barrierBefore = MembarNobits,
|
||||
MemoryBarrierBits barrierAfter = MembarNobits)
|
||||
: offset_(offset),
|
||||
align_(align),
|
||||
accessType_(accessType),
|
||||
needsBoundsCheck_(true),
|
||||
numSimdElems_(numSimdElems),
|
||||
barrierBefore_(barrierBefore),
|
||||
barrierAfter_(barrierAfter)
|
||||
{
|
||||
MOZ_ASSERT(numSimdElems <= ScalarTypeToLength(accessType));
|
||||
MOZ_ASSERT(mozilla::IsPowerOfTwo(align));
|
||||
}
|
||||
|
||||
uint32_t offset() const { return offset_; }
|
||||
uint32_t endOffset() const { return offset() + byteSize(); }
|
||||
uint32_t align() const { return align_; }
|
||||
Scalar::Type accessType() const { return accessType_; }
|
||||
unsigned byteSize() const {
|
||||
return Scalar::isSimdType(accessType())
|
||||
? Scalar::scalarByteSize(accessType()) * numSimdElems()
|
||||
: TypedArrayElemSize(accessType());
|
||||
}
|
||||
bool needsBoundsCheck() const { return needsBoundsCheck_; }
|
||||
unsigned numSimdElems() const { MOZ_ASSERT(Scalar::isSimdType(accessType_)); return numSimdElems_; }
|
||||
MemoryBarrierBits barrierBefore() const { return barrierBefore_; }
|
||||
MemoryBarrierBits barrierAfter() const { return barrierAfter_; }
|
||||
bool isAtomicAccess() const { return (barrierBefore_ | barrierAfter_) != MembarNobits; }
|
||||
bool isUnaligned() const { return align() && align() < byteSize(); }
|
||||
|
||||
void removeBoundsCheck() { needsBoundsCheck_ = false; }
|
||||
void setOffset(uint32_t o) { offset_ = o; }
|
||||
};
|
||||
|
||||
class MWasmBoundsCheck
|
||||
: public MUnaryInstruction,
|
||||
public MWasmMemoryAccess,
|
||||
public NoTypePolicy::Data
|
||||
{
|
||||
bool redundant_;
|
||||
|
||||
explicit MWasmBoundsCheck(MDefinition* index, const MWasmMemoryAccess& access)
|
||||
explicit MWasmBoundsCheck(MDefinition* index)
|
||||
: MUnaryInstruction(index),
|
||||
MWasmMemoryAccess(access),
|
||||
redundant_(false)
|
||||
{
|
||||
setMovable();
|
||||
setGuard(); // Effectful: throws for OOB.
|
||||
}
|
||||
|
||||
@ -13291,15 +13241,6 @@ class MWasmBoundsCheck
|
||||
INSTRUCTION_HEADER(WasmBoundsCheck)
|
||||
TRIVIAL_NEW_WRAPPERS
|
||||
|
||||
bool congruentTo(const MDefinition* ins) const override {
|
||||
if (!congruentIfOperandsEqual(ins))
|
||||
return false;
|
||||
const MWasmBoundsCheck* other = ins->toWasmBoundsCheck();
|
||||
return accessType() == other->accessType() &&
|
||||
offset() == other->offset() &&
|
||||
align() == other->align();
|
||||
}
|
||||
|
||||
AliasSet getAliasSet() const override {
|
||||
return AliasSet::None();
|
||||
}
|
||||
@ -13313,21 +13254,90 @@ class MWasmBoundsCheck
|
||||
}
|
||||
};
|
||||
|
||||
class MWasmAddOffset
|
||||
: public MUnaryInstruction,
|
||||
public NoTypePolicy::Data
|
||||
{
|
||||
uint32_t offset_;
|
||||
|
||||
MWasmAddOffset(MDefinition* base, uint32_t offset)
|
||||
: MUnaryInstruction(base),
|
||||
offset_(offset)
|
||||
{
|
||||
setGuard();
|
||||
setResultType(MIRType::Int32);
|
||||
}
|
||||
|
||||
public:
|
||||
INSTRUCTION_HEADER(WasmAddOffset)
|
||||
TRIVIAL_NEW_WRAPPERS
|
||||
NAMED_OPERANDS((0, base))
|
||||
|
||||
MDefinition* foldsTo(TempAllocator& alloc) override;
|
||||
|
||||
AliasSet getAliasSet() const override {
|
||||
return AliasSet::None();
|
||||
}
|
||||
|
||||
uint32_t offset() const {
|
||||
return offset_;
|
||||
}
|
||||
};
|
||||
|
||||
class MWasmMemoryAccess
|
||||
{
|
||||
uint32_t offset_;
|
||||
uint32_t align_;
|
||||
Scalar::Type accessType_ : 8;
|
||||
unsigned numSimdElems_;
|
||||
MemoryBarrierBits barrierBefore_;
|
||||
MemoryBarrierBits barrierAfter_;
|
||||
|
||||
public:
|
||||
explicit MWasmMemoryAccess(Scalar::Type accessType, uint32_t align, uint32_t offset,
|
||||
unsigned numSimdElems = 0,
|
||||
MemoryBarrierBits barrierBefore = MembarNobits,
|
||||
MemoryBarrierBits barrierAfter = MembarNobits)
|
||||
: offset_(offset),
|
||||
align_(align),
|
||||
accessType_(accessType),
|
||||
numSimdElems_(numSimdElems),
|
||||
barrierBefore_(barrierBefore),
|
||||
barrierAfter_(barrierAfter)
|
||||
{
|
||||
MOZ_ASSERT(numSimdElems <= ScalarTypeToLength(accessType));
|
||||
MOZ_ASSERT(mozilla::IsPowerOfTwo(align));
|
||||
}
|
||||
|
||||
uint32_t offset() const { return offset_; }
|
||||
uint32_t align() const { return align_; }
|
||||
Scalar::Type accessType() const { return accessType_; }
|
||||
unsigned byteSize() const {
|
||||
return Scalar::isSimdType(accessType())
|
||||
? Scalar::scalarByteSize(accessType()) * numSimdElems()
|
||||
: TypedArrayElemSize(accessType());
|
||||
}
|
||||
unsigned numSimdElems() const { return numSimdElems_; }
|
||||
MemoryBarrierBits barrierBefore() const { return barrierBefore_; }
|
||||
MemoryBarrierBits barrierAfter() const { return barrierAfter_; }
|
||||
bool isAtomicAccess() const { return (barrierBefore_ | barrierAfter_) != MembarNobits; }
|
||||
bool isSimdAccess() const { return Scalar::isSimdType(accessType_); }
|
||||
bool isUnaligned() const { return align() && align() < byteSize(); }
|
||||
|
||||
void clearOffset() { offset_ = 0; }
|
||||
};
|
||||
|
||||
class MWasmLoad
|
||||
: public MUnaryInstruction,
|
||||
public MWasmMemoryAccess,
|
||||
public NoTypePolicy::Data
|
||||
{
|
||||
MWasmLoad(MDefinition* base, const MWasmMemoryAccess& access, bool isInt64)
|
||||
MWasmLoad(MDefinition* base, const MWasmMemoryAccess& access, MIRType resultType)
|
||||
: MUnaryInstruction(base),
|
||||
MWasmMemoryAccess(access)
|
||||
{
|
||||
setGuard();
|
||||
MOZ_ASSERT(access.accessType() != Scalar::Uint8Clamped, "unexpected load heap in wasm");
|
||||
if (isInt64)
|
||||
setResultType(MIRType::Int64);
|
||||
else
|
||||
setResultType(ScalarTypeToMIRType(access.accessType()));
|
||||
setResultType(resultType);
|
||||
}
|
||||
|
||||
public:
|
||||
@ -13366,22 +13376,42 @@ class MWasmStore
|
||||
}
|
||||
};
|
||||
|
||||
class MAsmJSMemoryAccess
|
||||
{
|
||||
uint32_t offset_;
|
||||
Scalar::Type accessType_;
|
||||
bool needsBoundsCheck_;
|
||||
|
||||
public:
|
||||
explicit MAsmJSMemoryAccess(Scalar::Type accessType)
|
||||
: offset_(0),
|
||||
accessType_(accessType),
|
||||
needsBoundsCheck_(true)
|
||||
{
|
||||
MOZ_ASSERT(accessType != Scalar::Uint8Clamped);
|
||||
MOZ_ASSERT(!Scalar::isSimdType(accessType));
|
||||
}
|
||||
|
||||
uint32_t offset() const { return offset_; }
|
||||
uint32_t endOffset() const { return offset() + byteSize(); }
|
||||
Scalar::Type accessType() const { return accessType_; }
|
||||
unsigned byteSize() const { return TypedArrayElemSize(accessType()); }
|
||||
bool needsBoundsCheck() const { return needsBoundsCheck_; }
|
||||
|
||||
void removeBoundsCheck() { needsBoundsCheck_ = false; }
|
||||
void setOffset(uint32_t o) { offset_ = o; }
|
||||
};
|
||||
|
||||
class MAsmJSLoadHeap
|
||||
: public MUnaryInstruction,
|
||||
public MWasmMemoryAccess,
|
||||
public MAsmJSMemoryAccess,
|
||||
public NoTypePolicy::Data
|
||||
{
|
||||
MAsmJSLoadHeap(MDefinition* base, const MWasmMemoryAccess& access)
|
||||
MAsmJSLoadHeap(MDefinition* base, Scalar::Type accessType)
|
||||
: MUnaryInstruction(base),
|
||||
MWasmMemoryAccess(access)
|
||||
MAsmJSMemoryAccess(accessType)
|
||||
{
|
||||
if (access.barrierBefore() | access.barrierAfter())
|
||||
setGuard(); // Not removable
|
||||
else
|
||||
setMovable();
|
||||
|
||||
MOZ_ASSERT(access.accessType() != Scalar::Uint8Clamped, "unexpected load heap in asm.js");
|
||||
setResultType(ScalarTypeToMIRType(access.accessType()));
|
||||
setResultType(ScalarTypeToMIRType(accessType));
|
||||
}
|
||||
|
||||
public:
|
||||
@ -13393,10 +13423,6 @@ class MAsmJSLoadHeap
|
||||
|
||||
bool congruentTo(const MDefinition* ins) const override;
|
||||
AliasSet getAliasSet() const override {
|
||||
// When a barrier is needed make the instruction effectful by
|
||||
// giving it a "store" effect.
|
||||
if (isAtomicAccess())
|
||||
return AliasSet::Store(AliasSet::AsmJSHeap);
|
||||
return AliasSet::Load(AliasSet::AsmJSHeap);
|
||||
}
|
||||
AliasType mightAlias(const MDefinition* def) const override;
|
||||
@ -13404,16 +13430,13 @@ class MAsmJSLoadHeap
|
||||
|
||||
class MAsmJSStoreHeap
|
||||
: public MBinaryInstruction,
|
||||
public MWasmMemoryAccess,
|
||||
public MAsmJSMemoryAccess,
|
||||
public NoTypePolicy::Data
|
||||
{
|
||||
MAsmJSStoreHeap(MDefinition* base, const MWasmMemoryAccess& access, MDefinition* v)
|
||||
MAsmJSStoreHeap(MDefinition* base, Scalar::Type accessType, MDefinition* v)
|
||||
: MBinaryInstruction(base, v),
|
||||
MWasmMemoryAccess(access)
|
||||
{
|
||||
if (access.barrierBefore() | access.barrierAfter())
|
||||
setGuard(); // Not removable
|
||||
}
|
||||
MAsmJSMemoryAccess(accessType)
|
||||
{}
|
||||
|
||||
public:
|
||||
INSTRUCTION_HEADER(AsmJSStoreHeap)
|
||||
@ -13665,9 +13688,13 @@ class MWasmCall final
|
||||
|
||||
static const uint32_t DontSaveTls = UINT32_MAX;
|
||||
|
||||
static MWasmCall* New(TempAllocator& alloc, const wasm::CallSiteDesc& desc,
|
||||
const wasm::CalleeDesc& callee, const Args& args, MIRType resultType,
|
||||
uint32_t spIncrement, uint32_t tlsStackOffset,
|
||||
static MWasmCall* New(TempAllocator& alloc,
|
||||
const wasm::CallSiteDesc& desc,
|
||||
const wasm::CalleeDesc& callee,
|
||||
const Args& args,
|
||||
MIRType resultType,
|
||||
uint32_t spIncrement,
|
||||
uint32_t tlsStackOffset,
|
||||
MDefinition* tableIndex = nullptr);
|
||||
|
||||
static MWasmCall* NewBuiltinInstanceMethodCall(TempAllocator& alloc,
|
||||
@ -13676,7 +13703,8 @@ class MWasmCall final
|
||||
const ABIArg& instanceArg,
|
||||
const Args& args,
|
||||
MIRType resultType,
|
||||
uint32_t spIncrement);
|
||||
uint32_t spIncrement,
|
||||
uint32_t tlsStackOffset);
|
||||
|
||||
size_t numArgs() const {
|
||||
return argRegs_.length();
|
||||
|
@ -214,9 +214,6 @@ class MIRGenerator
|
||||
public:
|
||||
const JitCompileOptions options;
|
||||
|
||||
bool needsBoundsCheckBranch(const MWasmMemoryAccess* access) const;
|
||||
size_t foldableOffsetRange(const MWasmMemoryAccess* access) const;
|
||||
|
||||
private:
|
||||
GraphSpewer gs_;
|
||||
|
||||
|
@ -105,52 +105,6 @@ MIRGenerator::addAbortedPreliminaryGroup(ObjectGroup* group)
|
||||
oomUnsafe.crash("addAbortedPreliminaryGroup");
|
||||
}
|
||||
|
||||
bool
|
||||
MIRGenerator::needsBoundsCheckBranch(const MWasmMemoryAccess* access) const
|
||||
{
|
||||
// A heap access needs a bounds-check branch if we're not relying on signal
|
||||
// handlers to catch errors, and if it's not proven to be within bounds.
|
||||
// We use signal-handlers on x64, but on x86 there isn't enough address
|
||||
// space for a guard region. Also, on x64 the atomic loads and stores
|
||||
// can't (yet) use the signal handlers.
|
||||
#ifdef WASM_HUGE_MEMORY
|
||||
return false;
|
||||
#else
|
||||
return access->needsBoundsCheck();
|
||||
#endif
|
||||
}
|
||||
|
||||
size_t
|
||||
MIRGenerator::foldableOffsetRange(const MWasmMemoryAccess* access) const
|
||||
{
|
||||
// This determines whether it's ok to fold up to WasmImmediateRange
|
||||
// offsets, instead of just WasmCheckedImmediateRange.
|
||||
|
||||
static_assert(WasmCheckedImmediateRange <= WasmImmediateRange,
|
||||
"WasmImmediateRange should be the size of an unconstrained "
|
||||
"address immediate");
|
||||
|
||||
#ifdef WASM_HUGE_MEMORY
|
||||
static_assert(wasm::Uint32Range + WasmImmediateRange + sizeof(wasm::Val) < wasm::MappedSize,
|
||||
"When using signal handlers for bounds checking, a uint32 is added to the base "
|
||||
"address followed by an immediate in the range [0, WasmImmediateRange). An "
|
||||
"unaligned access (whose size is conservatively approximated by wasm::Val) may "
|
||||
"spill over, so ensure a space at the end.");
|
||||
return WasmImmediateRange;
|
||||
#else
|
||||
// On 32-bit platforms, if we've proven the access is in bounds after
|
||||
// 32-bit wrapping, we can fold full offsets because they're added with
|
||||
// 32-bit arithmetic.
|
||||
if (sizeof(intptr_t) == sizeof(int32_t) && !access->needsBoundsCheck())
|
||||
return WasmImmediateRange;
|
||||
|
||||
// Otherwise, only allow the checked size. This is always less than the
|
||||
// minimum heap length, and allows explicit bounds checks to fold in the
|
||||
// offset without overflow.
|
||||
return WasmCheckedImmediateRange;
|
||||
#endif
|
||||
}
|
||||
|
||||
void
|
||||
MIRGraph::addBlock(MBasicBlock* block)
|
||||
{
|
||||
|
@ -271,6 +271,7 @@ namespace jit {
|
||||
_(HasClass) \
|
||||
_(CopySign) \
|
||||
_(WasmBoundsCheck) \
|
||||
_(WasmAddOffset) \
|
||||
_(WasmLoad) \
|
||||
_(WasmStore) \
|
||||
_(WasmTrap) \
|
||||
|
@ -1040,8 +1040,8 @@ class MacroAssembler : public MacroAssemblerSpecific
|
||||
inline void branchFloat32NotInInt64Range(Address src, Register temp, Label* fail);
|
||||
inline void branchFloat32NotInUInt64Range(Address src, Register temp, Label* fail);
|
||||
|
||||
template <typename T>
|
||||
inline void branchAdd32(Condition cond, T src, Register dest, Label* label) PER_SHARED_ARCH;
|
||||
template <typename T, typename L>
|
||||
inline void branchAdd32(Condition cond, T src, Register dest, L label) PER_SHARED_ARCH;
|
||||
template <typename T>
|
||||
inline void branchSub32(Condition cond, T src, Register dest, Label* label) PER_SHARED_ARCH;
|
||||
|
||||
@ -1314,6 +1314,27 @@ class MacroAssembler : public MacroAssemblerSpecific
|
||||
DEFINED_ON(x86, x64);
|
||||
|
||||
public:
|
||||
// ========================================================================
|
||||
// wasm support
|
||||
|
||||
// Emit a bounds check against the (dynamically-patched) wasm bounds check
|
||||
// limit, jumping to 'label' if 'cond' holds.
|
||||
template <class L>
|
||||
inline void wasmBoundsCheck(Condition cond, Register index, L label) PER_ARCH;
|
||||
|
||||
// Called after compilation completes to patch the given limit into the
|
||||
// given instruction's immediate.
|
||||
static inline void wasmPatchBoundsCheck(uint8_t* patchAt, uint32_t limit) PER_ARCH;
|
||||
|
||||
// On x86, each instruction adds its own wasm::MemoryAccess's to the
|
||||
// wasm::MemoryAccessVector (there can be multiple when i64 is involved).
|
||||
// On x64, only some asm.js accesses need a wasm::MemoryAccess so the caller
|
||||
// is responsible for doing this instead.
|
||||
void wasmLoad(Scalar::Type type, unsigned numSimdElems, Operand srcAddr, AnyRegister out) DEFINED_ON(x86, x64);
|
||||
void wasmLoadI64(Scalar::Type type, Operand srcAddr, Register64 out) DEFINED_ON(x86, x64);
|
||||
void wasmStore(Scalar::Type type, unsigned numSimdElems, AnyRegister value, Operand dstAddr) DEFINED_ON(x86, x64);
|
||||
void wasmStoreI64(Register64 value, Operand dstAddr) DEFINED_ON(x86);
|
||||
|
||||
// wasm specific methods, used in both the wasm baseline compiler and ion.
|
||||
void wasmTruncateDoubleToUInt32(FloatRegister input, Register output, Label* oolEntry) DEFINED_ON(x86, x64);
|
||||
void wasmTruncateDoubleToInt32(FloatRegister input, Register output, Label* oolEntry) DEFINED_ON(x86_shared);
|
||||
|
@ -11,12 +11,7 @@ using namespace js;
|
||||
using namespace js::jit;
|
||||
using namespace mozilla;
|
||||
|
||||
struct DefAndOffset {
|
||||
MDefinition* loc;
|
||||
uint32_t endOffset;
|
||||
};
|
||||
|
||||
typedef js::HashMap<uint32_t, DefAndOffset, DefaultHasher<uint32_t>, SystemAllocPolicy>
|
||||
typedef js::HashMap<uint32_t, MDefinition*, DefaultHasher<uint32_t>, SystemAllocPolicy>
|
||||
LastSeenMap;
|
||||
|
||||
// The Wasm Bounds Check Elimination (BCE) pass looks for bounds checks
|
||||
@ -31,7 +26,9 @@ typedef js::HashMap<uint32_t, DefAndOffset, DefaultHasher<uint32_t>, SystemAlloc
|
||||
// check, but a set of checks that together dominate a redundant check?
|
||||
//
|
||||
// TODO (dbounov): Generalize to constant additions relative to one base
|
||||
bool jit::EliminateBoundsChecks(MIRGenerator* mir, MIRGraph& graph) {
|
||||
bool
|
||||
jit::EliminateBoundsChecks(MIRGenerator* mir, MIRGraph& graph)
|
||||
{
|
||||
// Map for dominating block where a given definition was checked
|
||||
LastSeenMap lastSeen;
|
||||
if (!lastSeen.init())
|
||||
@ -46,17 +43,12 @@ bool jit::EliminateBoundsChecks(MIRGenerator* mir, MIRGraph& graph) {
|
||||
case MDefinition::Op_WasmBoundsCheck: {
|
||||
MWasmBoundsCheck* bc = def->toWasmBoundsCheck();
|
||||
MDefinition* addr = def->getOperand(0);
|
||||
LastSeenMap::Ptr checkPtr = lastSeen.lookup(addr->id());
|
||||
|
||||
if (checkPtr &&
|
||||
checkPtr->value().endOffset >= bc->endOffset() &&
|
||||
checkPtr->value().loc->block()->dominates(block)) {
|
||||
// Address already checked. Discard current check
|
||||
LastSeenMap::AddPtr checkPtr = lastSeen.lookupForAdd(addr->id());
|
||||
if (checkPtr && checkPtr->value()->block()->dominates(block)) {
|
||||
bc->setRedundant(true);
|
||||
} else {
|
||||
DefAndOffset defOff = { def, bc->endOffset() };
|
||||
// Address not previously checked - remember current check
|
||||
if (!lastSeen.put(addr->id(), defOff))
|
||||
if (!lastSeen.add(checkPtr, addr->id(), def))
|
||||
return false;
|
||||
}
|
||||
break;
|
||||
@ -64,7 +56,6 @@ bool jit::EliminateBoundsChecks(MIRGenerator* mir, MIRGraph& graph) {
|
||||
case MDefinition::Op_Phi: {
|
||||
MPhi* phi = def->toPhi();
|
||||
bool phiChecked = true;
|
||||
uint32_t off = UINT32_MAX;
|
||||
|
||||
MOZ_ASSERT(phi->numOperands() > 0);
|
||||
|
||||
@ -77,19 +68,16 @@ bool jit::EliminateBoundsChecks(MIRGenerator* mir, MIRGraph& graph) {
|
||||
// cannot be in lastSeen because its block hasn't been traversed yet.
|
||||
for (int i = 0, nOps = phi->numOperands(); i < nOps; i++) {
|
||||
MDefinition* src = phi->getOperand(i);
|
||||
LastSeenMap::Ptr checkPtr = lastSeen.lookup(src->id());
|
||||
|
||||
if (!checkPtr || !checkPtr->value().loc->block()->dominates(block)) {
|
||||
LastSeenMap::Ptr checkPtr = lastSeen.lookup(src->id());
|
||||
if (!checkPtr || !checkPtr->value()->block()->dominates(block)) {
|
||||
phiChecked = false;
|
||||
break;
|
||||
} else {
|
||||
off = Min(off, checkPtr->value().endOffset);
|
||||
}
|
||||
}
|
||||
|
||||
if (phiChecked) {
|
||||
DefAndOffset defOff = { def, off };
|
||||
if (!lastSeen.put(def->id(), defOff))
|
||||
if (!lastSeen.put(def->id(), def))
|
||||
return false;
|
||||
}
|
||||
|
||||
|
@ -650,12 +650,6 @@ static inline bool UseHardFpABI()
|
||||
// have ABIArg which are represented by pair of general purpose registers.
|
||||
#define JS_CODEGEN_REGISTER_PAIR 1
|
||||
|
||||
// See MIRGenerator::foldableOffsetRange for more info.
|
||||
// TODO: Implement this for ARM. Note that it requires Codegen to respect the
|
||||
// offset field of AsmJSHeapAccess.
|
||||
static const size_t WasmCheckedImmediateRange = 0;
|
||||
static const size_t WasmImmediateRange = 0;
|
||||
|
||||
} // namespace jit
|
||||
} // namespace js
|
||||
|
||||
|
@ -3381,27 +3381,6 @@ Assembler::BailoutTableStart(uint8_t* code)
|
||||
return (uint8_t*) inst;
|
||||
}
|
||||
|
||||
void
|
||||
Assembler::UpdateBoundsCheck(uint8_t* patchAt, uint32_t heapLength)
|
||||
{
|
||||
Instruction* inst = (Instruction*) patchAt;
|
||||
MOZ_ASSERT(inst->is<InstCMP>());
|
||||
InstCMP* cmp = inst->as<InstCMP>();
|
||||
|
||||
Register index;
|
||||
cmp->extractOp1(&index);
|
||||
|
||||
MOZ_ASSERT(cmp->extractOp2().isImm8());
|
||||
|
||||
Imm8 imm8 = Imm8(heapLength);
|
||||
MOZ_ASSERT(!imm8.invalid);
|
||||
|
||||
*inst = InstALU(InvalidReg, index, imm8, OpCmp, SetCC, Always);
|
||||
// NOTE: we don't update the Auto Flush Cache! this function is currently
|
||||
// only called from within ModuleGenerator::finish, which does that
|
||||
// for us. Don't call this!
|
||||
}
|
||||
|
||||
InstructionIterator::InstructionIterator(Instruction* i_)
|
||||
: i(i_)
|
||||
{
|
||||
|
@ -1968,7 +1968,6 @@ class Assembler : public AssemblerShared
|
||||
static size_t ToggledCallSize(uint8_t* code);
|
||||
static void ToggleCall(CodeLocationLabel inst_, bool enabled);
|
||||
|
||||
static void UpdateBoundsCheck(uint8_t* patchAt, uint32_t heapLength);
|
||||
void processCodeLabels(uint8_t* rawCode);
|
||||
|
||||
bool bailed() {
|
||||
|
@ -2240,6 +2240,10 @@ void
|
||||
CodeGeneratorARM::visitAsmJSLoadHeap(LAsmJSLoadHeap* ins)
|
||||
{
|
||||
const MAsmJSLoadHeap* mir = ins->mir();
|
||||
MOZ_ASSERT(mir->offset() == 0);
|
||||
|
||||
const LAllocation* ptr = ins->ptr();
|
||||
|
||||
bool isSigned;
|
||||
int size;
|
||||
bool isFloat = false;
|
||||
@ -2255,10 +2259,6 @@ CodeGeneratorARM::visitAsmJSLoadHeap(LAsmJSLoadHeap* ins)
|
||||
default: MOZ_CRASH("unexpected array type");
|
||||
}
|
||||
|
||||
memoryBarrier(mir->barrierBefore());
|
||||
|
||||
const LAllocation* ptr = ins->ptr();
|
||||
|
||||
if (ptr->isConstant()) {
|
||||
MOZ_ASSERT(!mir->needsBoundsCheck());
|
||||
int32_t ptrImm = ptr->toConstant()->toInt32();
|
||||
@ -2275,63 +2275,37 @@ CodeGeneratorARM::visitAsmJSLoadHeap(LAsmJSLoadHeap* ins)
|
||||
}
|
||||
} else {
|
||||
Register ptrReg = ToRegister(ptr);
|
||||
if (isFloat)
|
||||
masm.ma_loadHeapAsmJS(ptrReg, size, mir->needsBoundsCheck(),
|
||||
/*faultOnOOB=*/false, ToFloatRegister(ins->output()));
|
||||
else
|
||||
masm.ma_loadHeapAsmJS(ptrReg, size, isSigned, mir->needsBoundsCheck(),
|
||||
mir->isAtomicAccess(), ToRegister(ins->output()));
|
||||
}
|
||||
if (isFloat) {
|
||||
FloatRegister output = ToFloatRegister(ins->output());
|
||||
if (size == 32)
|
||||
output = output.singleOverlay();
|
||||
|
||||
memoryBarrier(mir->barrierAfter());
|
||||
}
|
||||
Assembler::Condition cond = Assembler::Always;
|
||||
if (mir->needsBoundsCheck()) {
|
||||
BufferOffset cmp = masm.as_cmp(ptrReg, Imm8(0));
|
||||
masm.append(wasm::BoundsCheck(cmp.getOffset()));
|
||||
|
||||
void
|
||||
CodeGeneratorARM::visitWasmBoundsCheck(LWasmBoundsCheck* ins)
|
||||
{
|
||||
MWasmBoundsCheck* mir = ins->mir();
|
||||
size_t nanOffset = size == 32 ? wasm::NaN32GlobalDataOffset : wasm::NaN64GlobalDataOffset;
|
||||
masm.ma_vldr(Address(GlobalReg, nanOffset - AsmJSGlobalRegBias), output,
|
||||
Assembler::AboveOrEqual);
|
||||
cond = Assembler::Below;
|
||||
}
|
||||
|
||||
MOZ_ASSERT(mir->offset() <= INT32_MAX);
|
||||
masm.ma_vldr(output, HeapReg, ptrReg, 0, cond);
|
||||
} else {
|
||||
Register output = ToRegister(ins->output());
|
||||
|
||||
if (!mir->isRedundant()) {
|
||||
// No guarantee that heapBase + endOffset can be properly encoded in
|
||||
// the cmp immediate in ma_BoundsCheck, so use an explicit add instead.
|
||||
uint32_t endOffset = mir->endOffset();
|
||||
Assembler::Condition cond = Assembler::Always;
|
||||
if (mir->needsBoundsCheck()) {
|
||||
uint32_t cmpOffset = masm.as_cmp(ptrReg, Imm8(0)).getOffset();
|
||||
masm.append(wasm::BoundsCheck(cmpOffset));
|
||||
|
||||
Register ptr = ToRegister(ins->ptr());
|
||||
masm.ma_mov(Imm32(0), output, Assembler::AboveOrEqual);
|
||||
cond = Assembler::Below;
|
||||
}
|
||||
|
||||
ScratchRegisterScope ptrPlusOffset(masm);
|
||||
masm.move32(Imm32(endOffset), ptrPlusOffset);
|
||||
masm.ma_add(ptr, ptrPlusOffset, SetCC);
|
||||
|
||||
// Detect unsigned overflow by checking the carry bit.
|
||||
masm.as_b(wasm::JumpTarget::OutOfBounds, Assembler::CarrySet);
|
||||
|
||||
uint32_t cmpOffset = masm.ma_BoundsCheck(ptrPlusOffset).getOffset();
|
||||
masm.append(wasm::BoundsCheck(cmpOffset));
|
||||
masm.as_b(wasm::JumpTarget::OutOfBounds, Assembler::Above);
|
||||
} else {
|
||||
#ifdef DEBUG
|
||||
Label ok1, ok2;
|
||||
uint32_t endOffset = mir->endOffset();
|
||||
|
||||
Register ptr = ToRegister(ins->ptr());
|
||||
|
||||
ScratchRegisterScope ptrPlusOffset(masm);
|
||||
masm.move32(Imm32(endOffset), ptrPlusOffset);
|
||||
masm.ma_add(ptr, ptrPlusOffset, SetCC);
|
||||
|
||||
// Detect unsigned overflow by checking the carry bit.
|
||||
masm.as_b(&ok1, Assembler::CarryClear);
|
||||
masm.assumeUnreachable("Redundant bounds check failed!");
|
||||
masm.bind(&ok1);
|
||||
|
||||
uint32_t cmpOffset = masm.ma_BoundsCheck(ptrPlusOffset).getOffset();
|
||||
masm.append(wasm::BoundsCheck(cmpOffset));
|
||||
masm.as_b(&ok2, Assembler::BelowOrEqual);
|
||||
masm.assumeUnreachable("Redundant bounds check failed!");
|
||||
masm.bind(&ok2);
|
||||
#endif
|
||||
masm.ma_dataTransferN(IsLoad, size, isSigned, HeapReg, ptrReg, output, Offset, cond);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -2341,10 +2315,8 @@ CodeGeneratorARM::emitWasmLoad(T* lir)
|
||||
{
|
||||
const MWasmLoad* mir = lir->mir();
|
||||
|
||||
MOZ_ASSERT(!mir->barrierBefore() && !mir->barrierAfter(), "atomics NYI");
|
||||
|
||||
uint32_t offset = mir->offset();
|
||||
MOZ_ASSERT(offset <= INT32_MAX);
|
||||
MOZ_ASSERT(offset < wasm::OffsetGuardLimit);
|
||||
|
||||
Register ptr = ToRegister(lir->ptr());
|
||||
Scalar::Type type = mir->accessType();
|
||||
@ -2352,7 +2324,8 @@ CodeGeneratorARM::emitWasmLoad(T* lir)
|
||||
// Maybe add the offset.
|
||||
if (offset || type == Scalar::Int64) {
|
||||
Register ptrPlusOffset = ToRegister(lir->ptrCopy());
|
||||
masm.ma_add(Imm32(offset), ptrPlusOffset);
|
||||
if (offset)
|
||||
masm.ma_add(Imm32(offset), ptrPlusOffset);
|
||||
ptr = ptrPlusOffset;
|
||||
} else {
|
||||
MOZ_ASSERT(lir->ptrCopy()->isBogusTemp());
|
||||
@ -2362,6 +2335,8 @@ CodeGeneratorARM::emitWasmLoad(T* lir)
|
||||
type == Scalar::Int64;
|
||||
unsigned byteSize = mir->byteSize();
|
||||
|
||||
memoryBarrier(mir->barrierBefore());
|
||||
|
||||
if (mir->type() == MIRType::Int64) {
|
||||
Register64 output = ToOutRegister64(lir);
|
||||
if (type == Scalar::Int64) {
|
||||
@ -2388,6 +2363,8 @@ CodeGeneratorARM::emitWasmLoad(T* lir)
|
||||
masm.ma_dataTransferN(IsLoad, byteSize * 8, isSigned, HeapReg, ptr, output.gpr());
|
||||
}
|
||||
}
|
||||
|
||||
memoryBarrier(mir->barrierAfter());
|
||||
}
|
||||
|
||||
void
|
||||
@ -2408,10 +2385,8 @@ CodeGeneratorARM::emitWasmUnalignedLoad(T* lir)
|
||||
{
|
||||
const MWasmLoad* mir = lir->mir();
|
||||
|
||||
MOZ_ASSERT(!mir->barrierBefore() && !mir->barrierAfter(), "atomics NYI");
|
||||
|
||||
uint32_t offset = mir->offset();
|
||||
MOZ_ASSERT(offset <= INT32_MAX);
|
||||
MOZ_ASSERT(offset < wasm::OffsetGuardLimit);
|
||||
|
||||
Register ptr = ToRegister(lir->ptrCopy());
|
||||
if (offset)
|
||||
@ -2440,6 +2415,8 @@ CodeGeneratorARM::emitWasmUnalignedLoad(T* lir)
|
||||
MOZ_ASSERT(low != tmp);
|
||||
MOZ_ASSERT(low != ptr);
|
||||
|
||||
memoryBarrier(mir->barrierBefore());
|
||||
|
||||
masm.emitUnalignedLoad(isSigned, Min(byteSize, 4u), ptr, tmp, low);
|
||||
|
||||
if (IsFloatingPointType(mirType)) {
|
||||
@ -2468,6 +2445,8 @@ CodeGeneratorARM::emitWasmUnalignedLoad(T* lir)
|
||||
masm.ma_mov(Imm32(0), output.high);
|
||||
}
|
||||
}
|
||||
|
||||
memoryBarrier(mir->barrierAfter());
|
||||
}
|
||||
|
||||
void
|
||||
@ -2482,16 +2461,25 @@ CodeGeneratorARM::visitWasmUnalignedLoadI64(LWasmUnalignedLoadI64* lir)
|
||||
emitWasmUnalignedLoad(lir);
|
||||
}
|
||||
|
||||
void
|
||||
CodeGeneratorARM::visitWasmAddOffset(LWasmAddOffset* lir)
|
||||
{
|
||||
MWasmAddOffset* mir = lir->mir();
|
||||
Register base = ToRegister(lir->base());
|
||||
Register out = ToRegister(lir->output());
|
||||
|
||||
masm.ma_add(base, Imm32(mir->offset()), out, SetCC);
|
||||
masm.ma_b(wasm::JumpTarget::OutOfBounds, Assembler::CarrySet);
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
void
|
||||
CodeGeneratorARM::emitWasmStore(T* lir)
|
||||
{
|
||||
const MWasmStore* mir = lir->mir();
|
||||
|
||||
MOZ_ASSERT(!mir->barrierBefore() && !mir->barrierAfter(), "atomics NYI");
|
||||
|
||||
uint32_t offset = mir->offset();
|
||||
MOZ_ASSERT(offset <= INT32_MAX);
|
||||
MOZ_ASSERT(offset < wasm::OffsetGuardLimit);
|
||||
|
||||
Register ptr = ToRegister(lir->ptr());
|
||||
unsigned byteSize = mir->byteSize();
|
||||
@ -2500,12 +2488,15 @@ CodeGeneratorARM::emitWasmStore(T* lir)
|
||||
// Maybe add the offset.
|
||||
if (offset || type == Scalar::Int64) {
|
||||
Register ptrPlusOffset = ToRegister(lir->ptrCopy());
|
||||
masm.ma_add(Imm32(offset), ptrPlusOffset);
|
||||
if (offset)
|
||||
masm.ma_add(Imm32(offset), ptrPlusOffset);
|
||||
ptr = ptrPlusOffset;
|
||||
} else {
|
||||
MOZ_ASSERT(lir->ptrCopy()->isBogusTemp());
|
||||
}
|
||||
|
||||
memoryBarrier(mir->barrierBefore());
|
||||
|
||||
if (type == Scalar::Int64) {
|
||||
MOZ_ASSERT(INT64LOW_OFFSET == 0);
|
||||
|
||||
@ -2527,6 +2518,8 @@ CodeGeneratorARM::emitWasmStore(T* lir)
|
||||
masm.ma_dataTransferN(IsStore, 8 * byteSize /* bits */, isSigned, HeapReg, ptr, val);
|
||||
}
|
||||
}
|
||||
|
||||
memoryBarrier(mir->barrierAfter());
|
||||
}
|
||||
|
||||
void
|
||||
@ -2547,10 +2540,8 @@ CodeGeneratorARM::emitWasmUnalignedStore(T* lir)
|
||||
{
|
||||
const MWasmStore* mir = lir->mir();
|
||||
|
||||
MOZ_ASSERT(!mir->barrierBefore() && !mir->barrierAfter(), "atomics NYI");
|
||||
|
||||
uint32_t offset = mir->offset();
|
||||
MOZ_ASSERT(offset <= INT32_MAX);
|
||||
MOZ_ASSERT(offset < wasm::OffsetGuardLimit);
|
||||
|
||||
Register ptr = ToRegister(lir->ptrCopy());
|
||||
if (offset)
|
||||
@ -2561,6 +2552,8 @@ CodeGeneratorARM::emitWasmUnalignedStore(T* lir)
|
||||
|
||||
MIRType mirType = mir->value()->type();
|
||||
|
||||
memoryBarrier(mir->barrierAfter());
|
||||
|
||||
Register val = ToRegister(lir->valueHelper());
|
||||
if (IsFloatingPointType(mirType)) {
|
||||
masm.ma_vxfer(ToFloatRegister(lir->getOperand(LWasmUnalignedStore::ValueIndex)), val);
|
||||
@ -2586,7 +2579,9 @@ CodeGeneratorARM::emitWasmUnalignedStore(T* lir)
|
||||
masm.ma_mov(ToRegister64(lir->getInt64Operand(LWasmUnalignedStoreI64::ValueIndex)).high, val);
|
||||
}
|
||||
masm.emitUnalignedStore(4, ptr, val, /* offset */ 4);
|
||||
}
|
||||
}
|
||||
|
||||
memoryBarrier(mir->barrierBefore());
|
||||
}
|
||||
|
||||
void
|
||||
@ -2605,6 +2600,10 @@ void
|
||||
CodeGeneratorARM::visitAsmJSStoreHeap(LAsmJSStoreHeap* ins)
|
||||
{
|
||||
const MAsmJSStoreHeap* mir = ins->mir();
|
||||
MOZ_ASSERT(mir->offset() == 0);
|
||||
|
||||
const LAllocation* ptr = ins->ptr();
|
||||
|
||||
bool isSigned;
|
||||
int size;
|
||||
bool isFloat = false;
|
||||
@ -2620,10 +2619,6 @@ CodeGeneratorARM::visitAsmJSStoreHeap(LAsmJSStoreHeap* ins)
|
||||
default: MOZ_CRASH("unexpected array type");
|
||||
}
|
||||
|
||||
memoryBarrier(mir->barrierBefore());
|
||||
|
||||
const LAllocation* ptr = ins->ptr();
|
||||
|
||||
if (ptr->isConstant()) {
|
||||
MOZ_ASSERT(!mir->needsBoundsCheck());
|
||||
int32_t ptrImm = ptr->toConstant()->toInt32();
|
||||
@ -2641,21 +2636,34 @@ CodeGeneratorARM::visitAsmJSStoreHeap(LAsmJSStoreHeap* ins)
|
||||
}
|
||||
} else {
|
||||
Register ptrReg = ToRegister(ptr);
|
||||
if (isFloat)
|
||||
masm.ma_storeHeapAsmJS(ptrReg, size, mir->needsBoundsCheck(), /*faultOnOOB=*/false,
|
||||
ToFloatRegister(ins->value()));
|
||||
else
|
||||
masm.ma_storeHeapAsmJS(ptrReg, size, isSigned, mir->needsBoundsCheck(),
|
||||
mir->isAtomicAccess(), ToRegister(ins->value()));
|
||||
}
|
||||
|
||||
memoryBarrier(mir->barrierAfter());
|
||||
Assembler::Condition cond = Assembler::Always;
|
||||
if (mir->needsBoundsCheck()) {
|
||||
BufferOffset cmp = masm.as_cmp(ptrReg, Imm8(0));
|
||||
masm.append(wasm::BoundsCheck(cmp.getOffset()));
|
||||
|
||||
cond = Assembler::Below;
|
||||
}
|
||||
|
||||
if (isFloat) {
|
||||
FloatRegister value = ToFloatRegister(ins->value());
|
||||
if (size == 32)
|
||||
value = value.singleOverlay();
|
||||
|
||||
masm.ma_vstr(value, HeapReg, ptrReg, 0, 0, Assembler::Below);
|
||||
} else {
|
||||
Register value = ToRegister(ins->value());
|
||||
masm.ma_dataTransferN(IsStore, size, isSigned, HeapReg, ptrReg, value, Offset, cond);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
CodeGeneratorARM::visitAsmJSCompareExchangeHeap(LAsmJSCompareExchangeHeap* ins)
|
||||
{
|
||||
MAsmJSCompareExchangeHeap* mir = ins->mir();
|
||||
MOZ_ASSERT(mir->offset() == 0);
|
||||
|
||||
Scalar::Type vt = mir->accessType();
|
||||
const LAllocation* ptr = ins->ptr();
|
||||
Register ptrReg = ToRegister(ptr);
|
||||
@ -2665,11 +2673,6 @@ CodeGeneratorARM::visitAsmJSCompareExchangeHeap(LAsmJSCompareExchangeHeap* ins)
|
||||
Register oldval = ToRegister(ins->oldValue());
|
||||
Register newval = ToRegister(ins->newValue());
|
||||
|
||||
if (mir->needsBoundsCheck()) {
|
||||
uint32_t cmpOffset = masm.ma_BoundsCheck(ptrReg).getOffset();
|
||||
masm.ma_b(wasm::JumpTarget::OutOfBounds, Assembler::AboveOrEqual);
|
||||
masm.append(wasm::BoundsCheck(cmpOffset));
|
||||
}
|
||||
masm.compareExchangeToTypedIntArray(vt == Scalar::Uint32 ? Scalar::Int32 : vt,
|
||||
srcAddr, oldval, newval, InvalidReg,
|
||||
ToAnyRegister(ins->output()));
|
||||
@ -2679,6 +2682,8 @@ void
|
||||
CodeGeneratorARM::visitAsmJSCompareExchangeCallout(LAsmJSCompareExchangeCallout* ins)
|
||||
{
|
||||
const MAsmJSCompareExchangeHeap* mir = ins->mir();
|
||||
MOZ_ASSERT(mir->offset() == 0);
|
||||
|
||||
Register ptr = ToRegister(ins->ptr());
|
||||
Register oldval = ToRegister(ins->oldval());
|
||||
Register newval = ToRegister(ins->newval());
|
||||
@ -2704,18 +2709,14 @@ void
|
||||
CodeGeneratorARM::visitAsmJSAtomicExchangeHeap(LAsmJSAtomicExchangeHeap* ins)
|
||||
{
|
||||
MAsmJSAtomicExchangeHeap* mir = ins->mir();
|
||||
MOZ_ASSERT(mir->offset() == 0);
|
||||
|
||||
Scalar::Type vt = mir->accessType();
|
||||
Register ptrReg = ToRegister(ins->ptr());
|
||||
Register value = ToRegister(ins->value());
|
||||
BaseIndex srcAddr(HeapReg, ptrReg, TimesOne);
|
||||
MOZ_ASSERT(ins->addrTemp()->isBogusTemp());
|
||||
|
||||
if (mir->needsBoundsCheck()) {
|
||||
uint32_t cmpOffset = masm.ma_BoundsCheck(ptrReg).getOffset();
|
||||
masm.ma_b(wasm::JumpTarget::OutOfBounds, Assembler::AboveOrEqual);
|
||||
masm.append(wasm::BoundsCheck(cmpOffset));
|
||||
}
|
||||
|
||||
masm.atomicExchangeToTypedIntArray(vt == Scalar::Uint32 ? Scalar::Int32 : vt,
|
||||
srcAddr, value, InvalidReg, ToAnyRegister(ins->output()));
|
||||
}
|
||||
@ -2724,6 +2725,8 @@ void
|
||||
CodeGeneratorARM::visitAsmJSAtomicExchangeCallout(LAsmJSAtomicExchangeCallout* ins)
|
||||
{
|
||||
const MAsmJSAtomicExchangeHeap* mir = ins->mir();
|
||||
MOZ_ASSERT(mir->offset() == 0);
|
||||
|
||||
Register ptr = ToRegister(ins->ptr());
|
||||
Register value = ToRegister(ins->value());
|
||||
Register tls = ToRegister(ins->tls());
|
||||
@ -2746,55 +2749,46 @@ CodeGeneratorARM::visitAsmJSAtomicExchangeCallout(LAsmJSAtomicExchangeCallout* i
|
||||
void
|
||||
CodeGeneratorARM::visitAsmJSAtomicBinopHeap(LAsmJSAtomicBinopHeap* ins)
|
||||
{
|
||||
MOZ_ASSERT(ins->mir()->hasUses());
|
||||
MOZ_ASSERT(ins->addrTemp()->isBogusTemp());
|
||||
|
||||
MAsmJSAtomicBinopHeap* mir = ins->mir();
|
||||
MOZ_ASSERT(mir->offset() == 0);
|
||||
MOZ_ASSERT(mir->hasUses());
|
||||
|
||||
Scalar::Type vt = mir->accessType();
|
||||
Register ptrReg = ToRegister(ins->ptr());
|
||||
Register flagTemp = ToRegister(ins->flagTemp());
|
||||
const LAllocation* value = ins->value();
|
||||
AtomicOp op = mir->operation();
|
||||
MOZ_ASSERT(ins->addrTemp()->isBogusTemp());
|
||||
|
||||
BaseIndex srcAddr(HeapReg, ptrReg, TimesOne);
|
||||
|
||||
if (mir->needsBoundsCheck()) {
|
||||
uint32_t cmpOffset = masm.ma_BoundsCheck(ptrReg).getOffset();
|
||||
masm.ma_b(wasm::JumpTarget::OutOfBounds, Assembler::AboveOrEqual);
|
||||
masm.append(wasm::BoundsCheck(cmpOffset));
|
||||
}
|
||||
|
||||
if (value->isConstant())
|
||||
if (value->isConstant()) {
|
||||
atomicBinopToTypedIntArray(op, vt == Scalar::Uint32 ? Scalar::Int32 : vt,
|
||||
Imm32(ToInt32(value)), srcAddr, flagTemp, InvalidReg,
|
||||
ToAnyRegister(ins->output()));
|
||||
else
|
||||
} else {
|
||||
atomicBinopToTypedIntArray(op, vt == Scalar::Uint32 ? Scalar::Int32 : vt,
|
||||
ToRegister(value), srcAddr, flagTemp, InvalidReg,
|
||||
ToAnyRegister(ins->output()));
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
CodeGeneratorARM::visitAsmJSAtomicBinopHeapForEffect(LAsmJSAtomicBinopHeapForEffect* ins)
|
||||
{
|
||||
MOZ_ASSERT(!ins->mir()->hasUses());
|
||||
MOZ_ASSERT(ins->addrTemp()->isBogusTemp());
|
||||
|
||||
MAsmJSAtomicBinopHeap* mir = ins->mir();
|
||||
MOZ_ASSERT(mir->offset() == 0);
|
||||
MOZ_ASSERT(!mir->hasUses());
|
||||
|
||||
Scalar::Type vt = mir->accessType();
|
||||
Register ptrReg = ToRegister(ins->ptr());
|
||||
Register flagTemp = ToRegister(ins->flagTemp());
|
||||
const LAllocation* value = ins->value();
|
||||
AtomicOp op = mir->operation();
|
||||
MOZ_ASSERT(ins->addrTemp()->isBogusTemp());
|
||||
|
||||
BaseIndex srcAddr(HeapReg, ptrReg, TimesOne);
|
||||
|
||||
if (mir->needsBoundsCheck()) {
|
||||
uint32_t cmpOffset = masm.ma_BoundsCheck(ptrReg).getOffset();
|
||||
masm.ma_b(wasm::JumpTarget::OutOfBounds, Assembler::AboveOrEqual);
|
||||
masm.append(wasm::BoundsCheck(cmpOffset));
|
||||
}
|
||||
|
||||
if (value->isConstant())
|
||||
atomicBinopToTypedIntArray(op, vt, Imm32(ToInt32(value)), srcAddr, flagTemp);
|
||||
else
|
||||
@ -2805,6 +2799,8 @@ void
|
||||
CodeGeneratorARM::visitAsmJSAtomicBinopCallout(LAsmJSAtomicBinopCallout* ins)
|
||||
{
|
||||
const MAsmJSAtomicBinopHeap* mir = ins->mir();
|
||||
MOZ_ASSERT(mir->offset() == 0);
|
||||
|
||||
Register ptr = ToRegister(ins->ptr());
|
||||
Register value = ToRegister(ins->value());
|
||||
Register tls = ToRegister(ins->tls());
|
||||
|
@ -240,11 +240,11 @@ class CodeGeneratorARM : public CodeGeneratorShared
|
||||
void emitWasmCall(LWasmCallBase* ins);
|
||||
void visitWasmCall(LWasmCall* ins);
|
||||
void visitWasmCallI64(LWasmCallI64* ins);
|
||||
void visitWasmBoundsCheck(LWasmBoundsCheck* ins);
|
||||
void visitWasmLoad(LWasmLoad* ins);
|
||||
void visitWasmLoadI64(LWasmLoadI64* ins);
|
||||
void visitWasmUnalignedLoad(LWasmUnalignedLoad* ins);
|
||||
void visitWasmUnalignedLoadI64(LWasmUnalignedLoadI64* ins);
|
||||
void visitWasmAddOffset(LWasmAddOffset* ins);
|
||||
void visitWasmStore(LWasmStore* ins);
|
||||
void visitWasmStoreI64(LWasmStoreI64* ins);
|
||||
void visitWasmUnalignedStore(LWasmUnalignedStore* ins);
|
||||
|
@ -608,17 +608,6 @@ LIRGeneratorARM::visitAsmJSUnsignedToFloat32(MAsmJSUnsignedToFloat32* ins)
|
||||
define(lir, ins);
|
||||
}
|
||||
|
||||
void
|
||||
LIRGeneratorARM::visitWasmBoundsCheck(MWasmBoundsCheck* ins)
|
||||
{
|
||||
MDefinition* input = ins->input();
|
||||
MOZ_ASSERT(input->type() == MIRType::Int32);
|
||||
|
||||
LAllocation baseAlloc = useRegisterAtStart(input);
|
||||
auto* lir = new(alloc()) LWasmBoundsCheck(baseAlloc);
|
||||
add(lir, ins);
|
||||
}
|
||||
|
||||
void
|
||||
LIRGeneratorARM::visitWasmLoad(MWasmLoad* ins)
|
||||
{
|
||||
@ -723,9 +712,9 @@ LIRGeneratorARM::visitAsmJSLoadHeap(MAsmJSLoadHeap* ins)
|
||||
|
||||
MDefinition* base = ins->base();
|
||||
MOZ_ASSERT(base->type() == MIRType::Int32);
|
||||
LAllocation baseAlloc;
|
||||
|
||||
// For the ARM it is best to keep the 'base' in a register if a bounds check is needed.
|
||||
LAllocation baseAlloc;
|
||||
if (base->isConstant() && !ins->needsBoundsCheck()) {
|
||||
// A bounds check is only skipped for a positive index.
|
||||
MOZ_ASSERT(base->toConstant()->toInt32() >= 0);
|
||||
|
@ -105,7 +105,6 @@ class LIRGeneratorARM : public LIRGeneratorShared
|
||||
void visitAsmSelect(MAsmSelect* ins);
|
||||
void visitAsmJSUnsignedToDouble(MAsmJSUnsignedToDouble* ins);
|
||||
void visitAsmJSUnsignedToFloat32(MAsmJSUnsignedToFloat32* ins);
|
||||
void visitWasmBoundsCheck(MWasmBoundsCheck* ins);
|
||||
void visitWasmLoad(MWasmLoad* ins);
|
||||
void visitWasmStore(MWasmStore* ins);
|
||||
void visitAsmJSLoadHeap(MAsmJSLoadHeap* ins);
|
||||
|
@ -1450,12 +1450,12 @@ MacroAssembler::branchTruncateDoubleToInt32(FloatRegister src, Register dest, La
|
||||
ma_b(fail, Assembler::Equal);
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
template <typename T, typename L>
|
||||
void
|
||||
MacroAssembler::branchAdd32(Condition cond, T src, Register dest, Label* label)
|
||||
MacroAssembler::branchAdd32(Condition cond, T src, Register dest, L label)
|
||||
{
|
||||
add32(src, dest);
|
||||
j(cond, label);
|
||||
as_b(label, cond);
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
@ -1971,6 +1971,38 @@ MacroAssembler::clampIntToUint8(Register reg)
|
||||
ma_mov(Imm32(0), reg, Signed);
|
||||
}
|
||||
|
||||
// ========================================================================
|
||||
// wasm support
|
||||
|
||||
template <class L>
|
||||
void
|
||||
MacroAssembler::wasmBoundsCheck(Condition cond, Register index, L label)
|
||||
{
|
||||
BufferOffset bo = as_cmp(index, Imm8(0));
|
||||
append(wasm::BoundsCheck(bo.getOffset()));
|
||||
|
||||
as_b(label, cond);
|
||||
}
|
||||
|
||||
void
|
||||
MacroAssembler::wasmPatchBoundsCheck(uint8_t* patchAt, uint32_t limit)
|
||||
{
|
||||
Instruction* inst = (Instruction*) patchAt;
|
||||
MOZ_ASSERT(inst->is<InstCMP>());
|
||||
InstCMP* cmp = inst->as<InstCMP>();
|
||||
|
||||
Register index;
|
||||
cmp->extractOp1(&index);
|
||||
|
||||
MOZ_ASSERT(cmp->extractOp2().isImm8());
|
||||
|
||||
Imm8 imm8 = Imm8(limit);
|
||||
MOZ_RELEASE_ASSERT(!imm8.invalid);
|
||||
|
||||
*inst = InstALU(InvalidReg, index, imm8, OpCmp, SetCC, Always);
|
||||
// Don't call Auto Flush Cache; the wasm caller has done this for us.
|
||||
}
|
||||
|
||||
//}}} check_macroassembler_style
|
||||
// ===============================================================
|
||||
|
||||
|
@ -2131,55 +2131,6 @@ MacroAssemblerARMCompat::loadFloat32(const BaseIndex& src, FloatRegister dest)
|
||||
ma_vldr(Address(scratch, offset), VFPRegister(dest).singleOverlay());
|
||||
}
|
||||
|
||||
|
||||
void
|
||||
MacroAssemblerARMCompat::ma_loadHeapAsmJS(Register ptrReg, int size, bool needsBoundsCheck,
|
||||
bool faultOnOOB, FloatRegister output)
|
||||
{
|
||||
if (size == 32)
|
||||
output = output.singleOverlay();
|
||||
|
||||
if (!needsBoundsCheck) {
|
||||
ma_vldr(output, HeapReg, ptrReg, 0, Assembler::Always);
|
||||
} else {
|
||||
uint32_t cmpOffset = ma_BoundsCheck(ptrReg).getOffset();
|
||||
append(wasm::BoundsCheck(cmpOffset));
|
||||
|
||||
if (faultOnOOB) {
|
||||
ma_b(wasm::JumpTarget::OutOfBounds, Assembler::AboveOrEqual);
|
||||
}
|
||||
else {
|
||||
size_t nanOffset =
|
||||
size == 32 ? wasm::NaN32GlobalDataOffset : wasm::NaN64GlobalDataOffset;
|
||||
ma_vldr(Address(GlobalReg, nanOffset - AsmJSGlobalRegBias), output,
|
||||
Assembler::AboveOrEqual);
|
||||
}
|
||||
ma_vldr(output, HeapReg, ptrReg, 0, Assembler::Below);
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
MacroAssemblerARMCompat::ma_loadHeapAsmJS(Register ptrReg, int size, bool isSigned,
|
||||
bool needsBoundsCheck, bool faultOnOOB,
|
||||
Register output)
|
||||
{
|
||||
if (!needsBoundsCheck) {
|
||||
ma_dataTransferN(IsLoad, size, isSigned, HeapReg, ptrReg, output, Offset,
|
||||
Assembler::Always);
|
||||
return;
|
||||
}
|
||||
|
||||
uint32_t cmpOffset = ma_BoundsCheck(ptrReg).getOffset();
|
||||
append(wasm::BoundsCheck(cmpOffset));
|
||||
|
||||
if (faultOnOOB)
|
||||
ma_b(wasm::JumpTarget::OutOfBounds, Assembler::AboveOrEqual);
|
||||
else
|
||||
ma_mov(Imm32(0), output, Assembler::AboveOrEqual);
|
||||
|
||||
ma_dataTransferN(IsLoad, size, isSigned, HeapReg, ptrReg, output, Offset, Assembler::Below);
|
||||
}
|
||||
|
||||
void
|
||||
MacroAssemblerARMCompat::store8(Imm32 imm, const Address& address)
|
||||
{
|
||||
@ -2356,51 +2307,6 @@ MacroAssemblerARMCompat::storePtr(Register src, AbsoluteAddress dest)
|
||||
storePtr(src, Address(scratch, 0));
|
||||
}
|
||||
|
||||
void
|
||||
MacroAssemblerARMCompat::ma_storeHeapAsmJS(Register ptrReg, int size, bool needsBoundsCheck,
|
||||
bool faultOnOOB, FloatRegister value)
|
||||
{
|
||||
if (!needsBoundsCheck) {
|
||||
BaseIndex addr(HeapReg, ptrReg, TimesOne, 0);
|
||||
if (size == 32)
|
||||
asMasm().storeFloat32(value, addr);
|
||||
else
|
||||
asMasm().storeDouble(value, addr);
|
||||
} else {
|
||||
uint32_t cmpOffset = ma_BoundsCheck(ptrReg).getOffset();
|
||||
append(wasm::BoundsCheck(cmpOffset));
|
||||
|
||||
if (faultOnOOB)
|
||||
ma_b(wasm::JumpTarget::OutOfBounds, Assembler::AboveOrEqual);
|
||||
|
||||
if (size == 32)
|
||||
value = value.singleOverlay();
|
||||
|
||||
ma_vstr(value, HeapReg, ptrReg, 0, 0, Assembler::Below);
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
MacroAssemblerARMCompat::ma_storeHeapAsmJS(Register ptrReg, int size, bool isSigned,
|
||||
bool needsBoundsCheck, bool faultOnOOB,
|
||||
Register value)
|
||||
{
|
||||
if (!needsBoundsCheck) {
|
||||
ma_dataTransferN(IsStore, size, isSigned, HeapReg, ptrReg, value, Offset,
|
||||
Assembler::Always);
|
||||
return;
|
||||
}
|
||||
|
||||
uint32_t cmpOffset = ma_BoundsCheck(ptrReg).getOffset();
|
||||
append(wasm::BoundsCheck(cmpOffset));
|
||||
|
||||
if (faultOnOOB)
|
||||
ma_b(wasm::JumpTarget::OutOfBounds, Assembler::AboveOrEqual);
|
||||
|
||||
ma_dataTransferN(IsStore, size, isSigned, HeapReg, ptrReg, value, Offset,
|
||||
Assembler::Below);
|
||||
}
|
||||
|
||||
// Note: this function clobbers the input register.
|
||||
void
|
||||
MacroAssembler::clampDoubleToUint8(FloatRegister input, Register output)
|
||||
|
@ -981,11 +981,6 @@ class MacroAssemblerARMCompat : public MacroAssemblerARM
|
||||
void loadFloat32(const Address& addr, FloatRegister dest);
|
||||
void loadFloat32(const BaseIndex& src, FloatRegister dest);
|
||||
|
||||
void ma_loadHeapAsmJS(Register ptrReg, int size, bool needsBoundsCheck, bool faultOnOOB,
|
||||
FloatRegister output);
|
||||
void ma_loadHeapAsmJS(Register ptrReg, int size, bool isSigned, bool needsBoundsCheck,
|
||||
bool faultOnOOB, Register output);
|
||||
|
||||
void store8(Register src, const Address& address);
|
||||
void store8(Imm32 imm, const Address& address);
|
||||
void store8(Register src, const BaseIndex& address);
|
||||
@ -1022,11 +1017,6 @@ class MacroAssemblerARMCompat : public MacroAssemblerARM
|
||||
ma_vmov(src, dest, cc);
|
||||
}
|
||||
|
||||
void ma_storeHeapAsmJS(Register ptrReg, int size, bool needsBoundsCheck, bool faultOnOOB,
|
||||
FloatRegister value);
|
||||
void ma_storeHeapAsmJS(Register ptrReg, int size, bool isSigned, bool needsBoundsCheck,
|
||||
bool faultOnOOB, Register value);
|
||||
|
||||
private:
|
||||
template<typename T>
|
||||
Register computePointer(const T& src, Register r);
|
||||
@ -1444,9 +1434,6 @@ class MacroAssemblerARMCompat : public MacroAssemblerARM
|
||||
ma_mov(c, lr);
|
||||
ma_str(lr, dest);
|
||||
}
|
||||
BufferOffset ma_BoundsCheck(Register bounded) {
|
||||
return as_cmp(bounded, Imm8(0));
|
||||
}
|
||||
|
||||
void moveFloat32(FloatRegister src, FloatRegister dest, Condition cc = Always) {
|
||||
as_vmov(VFPRegister(dest).singleOverlay(), VFPRegister(src).singleOverlay(), cc);
|
||||
|
@ -456,9 +456,6 @@ hasMultiAlias()
|
||||
return false;
|
||||
}
|
||||
|
||||
static const size_t WasmCheckedImmediateRange = 0;
|
||||
static const size_t WasmImmediateRange = 0;
|
||||
|
||||
} // namespace jit
|
||||
} // namespace js
|
||||
|
||||
|
@ -636,20 +636,6 @@ Assembler::PatchInstructionImmediate(uint8_t* code, PatchedImmPtr imm)
|
||||
MOZ_CRASH("PatchInstructionImmediate()");
|
||||
}
|
||||
|
||||
void
|
||||
Assembler::UpdateBoundsCheck(uint8_t* patchAt, uint32_t heapLength)
|
||||
{
|
||||
Instruction* inst = (Instruction*) patchAt;
|
||||
int32_t mask = ~(heapLength - 1);
|
||||
unsigned n, imm_s, imm_r;
|
||||
if (!IsImmLogical(mask, 32, &n, &imm_s, &imm_r))
|
||||
MOZ_CRASH("Could not encode immediate!?");
|
||||
|
||||
inst->SetImmR(imm_r);
|
||||
inst->SetImmS(imm_s);
|
||||
inst->SetBitN(n);
|
||||
}
|
||||
|
||||
void
|
||||
Assembler::retarget(Label* label, Label* target)
|
||||
{
|
||||
|
@ -377,8 +377,6 @@ class Assembler : public vixl::Assembler
|
||||
static const size_t OffsetOfJumpTableEntryPointer = 8;
|
||||
|
||||
public:
|
||||
static void UpdateBoundsCheck(uint8_t* patchAt, uint32_t heapLength);
|
||||
|
||||
void writeCodePointer(AbsoluteLabel* absoluteLabel) {
|
||||
MOZ_ASSERT(!absoluteLabel->bound());
|
||||
uintptr_t x = LabelBase::INVALID_OFFSET;
|
||||
|
@ -338,12 +338,6 @@ LIRGeneratorARM64::visitWasmTruncateToInt64(MWasmTruncateToInt64* ins)
|
||||
MOZ_CRASH("NY");
|
||||
}
|
||||
|
||||
void
|
||||
LIRGeneratorARM64::visitWasmBoundsCheck(MWasmBoundsCheck* ins)
|
||||
{
|
||||
MOZ_CRASH("NY");
|
||||
}
|
||||
|
||||
void
|
||||
LIRGeneratorARM64::visitWasmLoad(MWasmLoad* ins)
|
||||
{
|
||||
|
@ -117,7 +117,6 @@ class LIRGeneratorARM64 : public LIRGeneratorShared
|
||||
void visitSubstr(MSubstr* ins);
|
||||
void visitRandom(MRandom* ins);
|
||||
void visitWasmTruncateToInt64(MWasmTruncateToInt64* ins);
|
||||
void visitWasmBoundsCheck(MWasmBoundsCheck* ins);
|
||||
void visitWasmLoad(MWasmLoad* ins);
|
||||
void visitWasmStore(MWasmStore* ins);
|
||||
void visitInt64ToFloatingPoint(MInt64ToFloatingPoint* ins);
|
||||
|
@ -1010,12 +1010,12 @@ MacroAssembler::branchTruncateDoubleToInt32(FloatRegister src, Register dest, La
|
||||
convertDoubleToInt32(src, dest, fail);
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
template <typename T, typename L>
|
||||
void
|
||||
MacroAssembler::branchAdd32(Condition cond, T src, Register dest, Label* label)
|
||||
MacroAssembler::branchAdd32(Condition cond, T src, Register dest, L label)
|
||||
{
|
||||
adds32(src, dest);
|
||||
branch(cond, label);
|
||||
B(label, cond);
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
@ -1545,6 +1545,22 @@ MacroAssembler::clampIntToUint8(Register reg)
|
||||
Csel(reg32, reg32, scratch32, Assembler::LessThanOrEqual);
|
||||
}
|
||||
|
||||
// ========================================================================
|
||||
// wasm support
|
||||
|
||||
template <class L>
|
||||
void
|
||||
MacroAssembler::wasmBoundsCheck(Condition cond, Register index, L label)
|
||||
{
|
||||
MOZ_CRASH("NYI");
|
||||
}
|
||||
|
||||
void
|
||||
MacroAssembler::wasmPatchBoundsCheck(uint8_t* patchAt, uint32_t limit)
|
||||
{
|
||||
MOZ_CRASH("NYI");
|
||||
}
|
||||
|
||||
//}}} check_macroassembler_style
|
||||
// ===============================================================
|
||||
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user