mirror of
https://github.com/mozilla/gecko-dev.git
synced 2024-11-28 15:23:51 +00:00
Merge inbound to central, a=merge CLOSED TREE
This commit is contained in:
commit
473e642b9c
@ -12,6 +12,8 @@
|
||||
#include "Logging.h"
|
||||
#endif
|
||||
|
||||
#include "mozilla/UniquePtr.h"
|
||||
|
||||
using namespace mozilla;
|
||||
using namespace mozilla::a11y;
|
||||
|
||||
@ -162,7 +164,7 @@ EventTree::Process(const RefPtr<DocAccessible>& aDeathGrip)
|
||||
return;
|
||||
}
|
||||
}
|
||||
mFirst = mFirst->mNext.forget();
|
||||
mFirst = Move(mFirst->mNext);
|
||||
}
|
||||
|
||||
MOZ_ASSERT(mContainer || mDependentEvents.IsEmpty(),
|
||||
@ -229,11 +231,12 @@ EventTree*
|
||||
EventTree::FindOrInsert(Accessible* aContainer)
|
||||
{
|
||||
if (!mFirst) {
|
||||
return mFirst = new EventTree(aContainer, true);
|
||||
mFirst.reset(new EventTree(aContainer, true));
|
||||
return mFirst.get();
|
||||
}
|
||||
|
||||
EventTree* prevNode = nullptr;
|
||||
EventTree* node = mFirst;
|
||||
EventTree* node = mFirst.get();
|
||||
do {
|
||||
MOZ_ASSERT(!node->mContainer->IsApplication(),
|
||||
"No event for application accessible is expected here");
|
||||
@ -277,18 +280,18 @@ EventTree::FindOrInsert(Accessible* aContainer)
|
||||
// Insert the tail node into the hierarchy between the current node and
|
||||
// its parent.
|
||||
node->mFireReorder = false;
|
||||
nsAutoPtr<EventTree>& nodeOwnerRef = prevNode ? prevNode->mNext : mFirst;
|
||||
nsAutoPtr<EventTree> newNode(new EventTree(aContainer, mDependentEvents.IsEmpty()));
|
||||
UniquePtr<EventTree>& nodeOwnerRef = prevNode ? prevNode->mNext : mFirst;
|
||||
UniquePtr<EventTree> newNode(new EventTree(aContainer, mDependentEvents.IsEmpty()));
|
||||
newNode->mFirst = Move(nodeOwnerRef);
|
||||
nodeOwnerRef = Move(newNode);
|
||||
nodeOwnerRef->mNext = Move(node->mNext);
|
||||
|
||||
// Check if a next node is contained by the given node too, and move them
|
||||
// under the given node if so.
|
||||
prevNode = nodeOwnerRef;
|
||||
node = nodeOwnerRef->mNext;
|
||||
nsAutoPtr<EventTree>* nodeRef = &nodeOwnerRef->mNext;
|
||||
EventTree* insNode = nodeOwnerRef->mFirst;
|
||||
prevNode = nodeOwnerRef.get();
|
||||
node = nodeOwnerRef->mNext.get();
|
||||
UniquePtr<EventTree>* nodeRef = &nodeOwnerRef->mNext;
|
||||
EventTree* insNode = nodeOwnerRef->mFirst.get();
|
||||
while (node) {
|
||||
Accessible* curParent = node->mContainer;
|
||||
while (curParent && !curParent->IsDoc()) {
|
||||
@ -301,7 +304,7 @@ EventTree::FindOrInsert(Accessible* aContainer)
|
||||
|
||||
node->mFireReorder = false;
|
||||
insNode->mNext = Move(*nodeRef);
|
||||
insNode = insNode->mNext;
|
||||
insNode = insNode->mNext.get();
|
||||
|
||||
prevNode->mNext = Move(node->mNext);
|
||||
node = prevNode;
|
||||
@ -310,14 +313,14 @@ EventTree::FindOrInsert(Accessible* aContainer)
|
||||
|
||||
prevNode = node;
|
||||
nodeRef = &node->mNext;
|
||||
node = node->mNext;
|
||||
node = node->mNext.get();
|
||||
}
|
||||
|
||||
return nodeOwnerRef;
|
||||
return nodeOwnerRef.get();
|
||||
}
|
||||
|
||||
prevNode = node;
|
||||
} while ((node = node->mNext));
|
||||
} while ((node = node->mNext.get()));
|
||||
|
||||
MOZ_ASSERT(prevNode, "Nowhere to insert");
|
||||
MOZ_ASSERT(!prevNode->mNext, "Taken by another node");
|
||||
@ -327,7 +330,8 @@ EventTree::FindOrInsert(Accessible* aContainer)
|
||||
// if a dependent show event target contains the given container then do not
|
||||
// emit show / hide events (see Process() method)
|
||||
|
||||
return prevNode->mNext = new EventTree(aContainer, mDependentEvents.IsEmpty());
|
||||
prevNode->mNext.reset(new EventTree(aContainer, mDependentEvents.IsEmpty()));
|
||||
return prevNode->mNext.get();
|
||||
}
|
||||
|
||||
void
|
||||
@ -357,14 +361,14 @@ EventTree::Find(const Accessible* aContainer) const
|
||||
}
|
||||
|
||||
if (et->mFirst) {
|
||||
et = et->mFirst;
|
||||
et = et->mFirst.get();
|
||||
const EventTree* cet = et->Find(aContainer);
|
||||
if (cet) {
|
||||
return cet;
|
||||
}
|
||||
}
|
||||
|
||||
et = et->mNext;
|
||||
et = et->mNext.get();
|
||||
const EventTree* cet = et->Find(aContainer);
|
||||
if (cet) {
|
||||
return cet;
|
||||
@ -421,7 +425,7 @@ EventTree::Mutated(AccMutationEvent* aEv)
|
||||
{
|
||||
// If shown or hidden node is a root of previously mutated subtree, then
|
||||
// discard those subtree mutations as we are no longer interested in them.
|
||||
nsAutoPtr<EventTree>* node = &mFirst;
|
||||
UniquePtr<EventTree>* node = &mFirst;
|
||||
while (*node) {
|
||||
if ((*node)->mContainer == aEv->mAccessible) {
|
||||
*node = Move((*node)->mNext);
|
||||
|
@ -10,6 +10,7 @@
|
||||
#include "Accessible.h"
|
||||
|
||||
#include "mozilla/RefPtr.h"
|
||||
#include "mozilla/UniquePtr.h"
|
||||
|
||||
namespace mozilla {
|
||||
namespace a11y {
|
||||
@ -101,8 +102,8 @@ private:
|
||||
void Mutated(AccMutationEvent* aEv);
|
||||
void Clear();
|
||||
|
||||
nsAutoPtr<EventTree> mFirst;
|
||||
nsAutoPtr<EventTree> mNext;
|
||||
UniquePtr<EventTree> mFirst;
|
||||
UniquePtr<EventTree> mNext;
|
||||
|
||||
Accessible* mContainer;
|
||||
nsTArray<RefPtr<AccMutationEvent>> mDependentEvents;
|
||||
|
@ -2211,7 +2211,7 @@ Accessible::EmbeddedChildCount()
|
||||
{
|
||||
if (mStateFlags & eHasTextKids) {
|
||||
if (!mEmbeddedObjCollector)
|
||||
mEmbeddedObjCollector = new EmbeddedObjCollector(this);
|
||||
mEmbeddedObjCollector.reset(new EmbeddedObjCollector(this));
|
||||
return mEmbeddedObjCollector->Count();
|
||||
}
|
||||
|
||||
@ -2223,8 +2223,8 @@ Accessible::GetEmbeddedChildAt(uint32_t aIndex)
|
||||
{
|
||||
if (mStateFlags & eHasTextKids) {
|
||||
if (!mEmbeddedObjCollector)
|
||||
mEmbeddedObjCollector = new EmbeddedObjCollector(this);
|
||||
return mEmbeddedObjCollector ?
|
||||
mEmbeddedObjCollector.reset(new EmbeddedObjCollector(this));
|
||||
return mEmbeddedObjCollector.get() ?
|
||||
mEmbeddedObjCollector->GetAccessibleAt(aIndex) : nullptr;
|
||||
}
|
||||
|
||||
@ -2236,8 +2236,8 @@ Accessible::GetIndexOfEmbeddedChild(Accessible* aChild)
|
||||
{
|
||||
if (mStateFlags & eHasTextKids) {
|
||||
if (!mEmbeddedObjCollector)
|
||||
mEmbeddedObjCollector = new EmbeddedObjCollector(this);
|
||||
return mEmbeddedObjCollector ?
|
||||
mEmbeddedObjCollector.reset(new EmbeddedObjCollector(this));
|
||||
return mEmbeddedObjCollector.get() ?
|
||||
mEmbeddedObjCollector->GetIndexAt(aChild) : -1;
|
||||
}
|
||||
|
||||
|
@ -11,7 +11,8 @@
|
||||
#include "mozilla/a11y/Role.h"
|
||||
#include "mozilla/a11y/States.h"
|
||||
|
||||
#include "nsAutoPtr.h"
|
||||
#include "mozilla/UniquePtr.h"
|
||||
|
||||
#include "nsIContent.h"
|
||||
#include "nsString.h"
|
||||
#include "nsTArray.h"
|
||||
@ -1138,7 +1139,7 @@ protected:
|
||||
friend class xpcAccessible;
|
||||
friend class TreeMutation;
|
||||
|
||||
nsAutoPtr<mozilla::a11y::EmbeddedObjCollector> mEmbeddedObjCollector;
|
||||
UniquePtr<mozilla::a11y::EmbeddedObjCollector> mEmbeddedObjCollector;
|
||||
union {
|
||||
int32_t mIndexOfEmbeddedChild;
|
||||
uint32_t mProxyInterfaces;
|
||||
|
@ -4,11 +4,13 @@ support-files =
|
||||
empty_file.html
|
||||
file_reflect_cookie_into_title.html
|
||||
favicon-normal32.png
|
||||
file_set_storages.html
|
||||
serviceworker.html
|
||||
worker.js
|
||||
|
||||
[browser_aboutURLs.js]
|
||||
[browser_favicon.js]
|
||||
[browser_forgetaboutsite.js]
|
||||
[browser_usercontext.js]
|
||||
[browser_usercontextid_tabdrop.js]
|
||||
skip-if = os == "mac" || os == "win" # Intermittent failure - bug 1268276
|
||||
|
@ -0,0 +1,351 @@
|
||||
/*
|
||||
* Bug 1238183 - Test cases for forgetAboutSite with userContextId.
|
||||
*/
|
||||
|
||||
const { classes: Cc, Constructor: CC, interfaces: Ci, utils: Cu } = Components;
|
||||
|
||||
Cu.import("resource://gre/modules/ForgetAboutSite.jsm");
|
||||
Cu.import("resource://gre/modules/Services.jsm");
|
||||
let {HttpServer} = Cu.import("resource://testing-common/httpd.js", {});
|
||||
let LoadContextInfo = Cc["@mozilla.org/load-context-info-factory;1"]
|
||||
.getService(Ci.nsILoadContextInfoFactory);
|
||||
let css = Cc["@mozilla.org/netwerk/cache-storage-service;1"]
|
||||
.getService(Ci.nsICacheStorageService);
|
||||
|
||||
const USER_CONTEXTS = [
|
||||
"default",
|
||||
"personal",
|
||||
"work",
|
||||
];
|
||||
const TEST_HOST = "example.com";
|
||||
const TEST_URL = "http://" + TEST_HOST + "/browser/browser/components/contextualidentity/test/browser/";
|
||||
const COOKIE_NAME = "userContextId";
|
||||
|
||||
// Counter for image load hits.
|
||||
let gHits = 0;
|
||||
|
||||
let gHttpServer = null;
|
||||
|
||||
function imageHandler(metadata, response) {
|
||||
// A 1x1 PNG image.
|
||||
// Source: https://commons.wikimedia.org/wiki/File:1x1.png (Public Domain)
|
||||
const IMAGE = atob("iVBORw0KGgoAAAANSUhEUgAAAAEAAAABAQMAAAAl21bKAAAAA1BMVEUAA" +
|
||||
"ACnej3aAAAAAXRSTlMAQObYZgAAAApJREFUCNdjYAAAAAIAAeIhvDMAAAAASUVORK5CYII=");
|
||||
gHits++;
|
||||
response.setHeader("Cache-Control", "max-age=10000", false);
|
||||
response.setStatusLine(metadata.httpVersion, 200, "OK");
|
||||
response.setHeader("Content-Type", "image/png", false);
|
||||
response.write(IMAGE);
|
||||
}
|
||||
|
||||
function loadImagePageHandler(metadata, response) {
|
||||
response.setHeader("Cache-Control", "max-age=10000", false);
|
||||
response.setStatusLine(metadata.httpVersion, 200, "Ok");
|
||||
response.setHeader("Content-Type", "text/html", false);
|
||||
let body = "<!DOCTYPE HTML>\
|
||||
<html>\
|
||||
<head>\
|
||||
<meta charset='utf-8'>\
|
||||
<title>Load Image</title>\
|
||||
</head>\
|
||||
<body>\
|
||||
<img src='image.png'>\
|
||||
</body>\
|
||||
</html>";
|
||||
response.bodyOutputStream.write(body, body.length);
|
||||
}
|
||||
|
||||
function* openTabInUserContext(uri, userContextId) {
|
||||
// Open the tab in the correct userContextId.
|
||||
let tab = gBrowser.addTab(uri, {userContextId});
|
||||
|
||||
// Select tab and make sure its browser is focused.
|
||||
gBrowser.selectedTab = tab;
|
||||
tab.ownerDocument.defaultView.focus();
|
||||
|
||||
let browser = gBrowser.getBrowserForTab(tab);
|
||||
yield BrowserTestUtils.browserLoaded(browser);
|
||||
return {tab, browser};
|
||||
}
|
||||
|
||||
function getCookiesForOA(host, userContextId) {
|
||||
return Services.cookies.getCookiesFromHost(host, {userContextId});
|
||||
}
|
||||
|
||||
function createURI(uri)
|
||||
{
|
||||
let ioServ = Cc["@mozilla.org/network/io-service;1"]
|
||||
.getService(Components.interfaces.nsIIOService);
|
||||
return ioServ.newURI(uri, null, null);
|
||||
}
|
||||
|
||||
function getCacheStorage(where, lci, appcache)
|
||||
{
|
||||
if (!lci) lci = LoadContextInfo.default;
|
||||
switch (where) {
|
||||
case "disk": return css.diskCacheStorage(lci, false);
|
||||
case "memory": return css.memoryCacheStorage(lci);
|
||||
case "appcache": return css.appCacheStorage(lci, appcache);
|
||||
case "pin": return css.pinningCacheStorage(lci);
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
function OpenCacheEntry(key, where, flags, lci)
|
||||
{
|
||||
return new Promise(resolve => {
|
||||
key = createURI(key);
|
||||
function CacheListener() { }
|
||||
CacheListener.prototype = {
|
||||
_appCache: null,
|
||||
|
||||
QueryInterface: function (iid) {
|
||||
if (iid.equals(Components.interfaces.nsICacheEntryOpenCallback) ||
|
||||
iid.equals(Components.interfaces.nsISupports))
|
||||
return this;
|
||||
throw Components.results.NS_ERROR_NO_INTERFACE;
|
||||
},
|
||||
|
||||
onCacheEntryCheck: function(entry, appCache) {
|
||||
return Ci.nsICacheEntryOpenCallback.ENTRY_WANTED;
|
||||
},
|
||||
|
||||
onCacheEntryAvailable: function (entry, isnew, appCache, status) {
|
||||
resolve();
|
||||
},
|
||||
|
||||
run: function () {
|
||||
let storage = getCacheStorage(where, lci, this._appCache);
|
||||
storage.asyncOpenURI(key, "", flags, this);
|
||||
}
|
||||
};
|
||||
|
||||
(new CacheListener()).run();
|
||||
});
|
||||
}
|
||||
|
||||
//
|
||||
// Test functions.
|
||||
//
|
||||
|
||||
// Cookies
|
||||
function* test_cookie_cleared() {
|
||||
let tabs = [];
|
||||
|
||||
for (let userContextId of Object.keys(USER_CONTEXTS)) {
|
||||
// Load the page in 3 different contexts and set a cookie
|
||||
// which should only be visible in that context.
|
||||
let value = USER_CONTEXTS[userContextId];
|
||||
|
||||
// Open our tab in the given user context.
|
||||
tabs[userContextId] = yield* openTabInUserContext(TEST_URL+ "file_reflect_cookie_into_title.html?" + value, userContextId);
|
||||
|
||||
// Close this tab.
|
||||
yield BrowserTestUtils.removeTab(tabs[userContextId].tab);
|
||||
}
|
||||
// Check that cookies have been set properly.
|
||||
for (let userContextId of Object.keys(USER_CONTEXTS)) {
|
||||
let enumerator = getCookiesForOA(TEST_HOST, userContextId);
|
||||
ok(enumerator.hasMoreElements(), "Cookies available");
|
||||
|
||||
let foundCookie = enumerator.getNext().QueryInterface(Ci.nsICookie2);
|
||||
Assert.equal(foundCookie["name"], COOKIE_NAME, "Check cookie name");
|
||||
Assert.equal(foundCookie["value"], USER_CONTEXTS[userContextId], "Check cookie value");
|
||||
}
|
||||
|
||||
// Forget the site.
|
||||
ForgetAboutSite.removeDataFromDomain(TEST_HOST);
|
||||
|
||||
// Check that whether cookies has been cleared or not.
|
||||
for (let userContextId of Object.keys(USER_CONTEXTS)) {
|
||||
let enumerator = getCookiesForOA(TEST_HOST, userContextId);
|
||||
ok(!enumerator.hasMoreElements(), "No Cookie should be here");
|
||||
}
|
||||
}
|
||||
|
||||
// Cache
|
||||
function* test_cache_cleared() {
|
||||
// First, add some caches.
|
||||
for (let userContextId of Object.keys(USER_CONTEXTS)) {
|
||||
yield OpenCacheEntry("http://" + TEST_HOST + "/",
|
||||
"disk",
|
||||
Ci.nsICacheStorage.OPEN_NORMALLY,
|
||||
LoadContextInfo.custom(false, false, {userContextId}));
|
||||
|
||||
yield OpenCacheEntry("http://" + TEST_HOST + "/",
|
||||
"memory",
|
||||
Ci.nsICacheStorage.OPEN_NORMALLY,
|
||||
LoadContextInfo.custom(false, false, {userContextId}));
|
||||
}
|
||||
|
||||
|
||||
// Check that caches have been set correctly.
|
||||
for (let userContextId of Object.keys(USER_CONTEXTS)) {
|
||||
let mem = getCacheStorage("memory");
|
||||
let disk = getCacheStorage("disk");
|
||||
|
||||
Assert.ok(mem.exists(createURI("http://" + TEST_HOST + "/"), ""), "The memory cache has been set correctly");
|
||||
Assert.ok(disk.exists(createURI("http://" + TEST_HOST + "/"), ""), "The disk cache has been set correctly");
|
||||
}
|
||||
|
||||
// Forget the site.
|
||||
ForgetAboutSite.removeDataFromDomain(TEST_HOST);
|
||||
|
||||
// Check that do caches be removed or not?
|
||||
for (let userContextId of Object.keys(USER_CONTEXTS)) {
|
||||
let mem = getCacheStorage("memory");
|
||||
let disk = getCacheStorage("disk");
|
||||
|
||||
Assert.ok(!mem.exists(createURI("http://" + TEST_HOST + "/"), ""), "The memory cache is cleared");
|
||||
Assert.ok(!disk.exists(createURI("http://" + TEST_HOST + "/"), ""), "The disk cache is cleared");
|
||||
}
|
||||
}
|
||||
|
||||
// Image Cache
|
||||
function* test_image_cache_cleared() {
|
||||
let tabs = [];
|
||||
|
||||
for (let userContextId of Object.keys(USER_CONTEXTS)) {
|
||||
// Open our tab in the given user context to cache image.
|
||||
tabs[userContextId] = yield* openTabInUserContext('http://localhost:' + gHttpServer.identity.primaryPort + '/loadImage.html',
|
||||
userContextId);
|
||||
yield BrowserTestUtils.removeTab(tabs[userContextId].tab);
|
||||
}
|
||||
|
||||
// Check that image cache works with the userContextId.
|
||||
todo_is(gHits, 3, "The image should be loaded three times. This test should be enabled after the bug 1270680 landed");
|
||||
|
||||
// Reset the cache count.
|
||||
gHits = 0;
|
||||
|
||||
// Forget the site.
|
||||
ForgetAboutSite.removeDataFromDomain("localhost:" + gHttpServer.identity.primaryPort + "/");
|
||||
|
||||
// Load again.
|
||||
for (let userContextId of Object.keys(USER_CONTEXTS)) {
|
||||
// Open our tab in the given user context to cache image.
|
||||
tabs[userContextId] = yield* openTabInUserContext('http://localhost:' + gHttpServer.identity.primaryPort + '/loadImage.html',
|
||||
userContextId);
|
||||
yield BrowserTestUtils.removeTab(tabs[userContextId].tab);
|
||||
}
|
||||
|
||||
// Check that image cache was cleared and the server gets another three hits.
|
||||
todo_is(gHits, 3, "The image should be loaded three times. This test should be enabled after the bug 1270680 landed");
|
||||
}
|
||||
|
||||
// Offline Storage
|
||||
function* test_storage_cleared() {
|
||||
for (let userContextId of Object.keys(USER_CONTEXTS)) {
|
||||
// Load the page in 3 different contexts and set the local storage
|
||||
// which should only be visible in that context.
|
||||
let value = USER_CONTEXTS[userContextId];
|
||||
|
||||
// Open our tab in the given user context.
|
||||
let tabInfo = yield* openTabInUserContext(TEST_URL+ "file_set_storages.html?" + value, userContextId);
|
||||
|
||||
// Check that the local storage has been set correctly.
|
||||
let win = tabInfo.browser.contentWindow;
|
||||
Assert.equal(win.localStorage.getItem("userContext"), USER_CONTEXTS[userContextId], "Check the local storage value");
|
||||
|
||||
// Check that the session storage has been set correctly.
|
||||
Assert.equal(win.sessionStorage.getItem("userContext"), USER_CONTEXTS[userContextId], "Check the session storage value");
|
||||
|
||||
// Check that the indexedDB has been set correctly.
|
||||
yield ContentTask.spawn(tabInfo.browser, { userContext: USER_CONTEXTS[userContextId] }, function* (arg) {
|
||||
let request = content.indexedDB.open("idb", 1);
|
||||
|
||||
let db = yield new Promise(done => {
|
||||
request.onsuccess = event => {
|
||||
done(event.target.result);
|
||||
};
|
||||
});
|
||||
|
||||
let transaction = db.transaction(["obj"], "readonly");
|
||||
let store = transaction.objectStore("obj");
|
||||
let storeRequest = store.get(1);
|
||||
|
||||
yield new Promise(done => {
|
||||
storeRequest.onsuccess = event => {
|
||||
let res = storeRequest.result;
|
||||
Assert.equal(res.userContext, arg.userContext, "Check the indexedDB value");
|
||||
done();
|
||||
};
|
||||
});
|
||||
});
|
||||
|
||||
// Close this tab.
|
||||
yield BrowserTestUtils.removeTab(tabInfo.tab);
|
||||
}
|
||||
|
||||
// Forget the site.
|
||||
ForgetAboutSite.removeDataFromDomain(TEST_HOST);
|
||||
|
||||
// Open the tab again without setting the localStorage and check that the
|
||||
// local storage has been cleared or not.
|
||||
for (let userContextId of Object.keys(USER_CONTEXTS)) {
|
||||
// Open our tab in the given user context without setting local storage.
|
||||
let tabInfo = yield* openTabInUserContext(TEST_URL+ "file_set_storages.html", userContextId);
|
||||
let win = tabInfo.browser.contentWindow;
|
||||
|
||||
// Check that does the local storage be cleared or not.
|
||||
Assert.ok(!win.localStorage.getItem("userContext"), "The local storage has been cleared");
|
||||
|
||||
// Check that does the session storage be cleared or not.
|
||||
Assert.ok(!win.sessionStorage.getItem("userContext"), "The session storage has been cleared");
|
||||
|
||||
// Check that does the indexedDB be cleared or not.
|
||||
yield ContentTask.spawn(tabInfo.browser, null, function* () {
|
||||
let request = content.indexedDB.open("idb", 1);
|
||||
|
||||
let db = yield new Promise(done => {
|
||||
request.onsuccess = event => {
|
||||
done(event.target.result);
|
||||
};
|
||||
});
|
||||
try {
|
||||
let transaction = db.transaction(["obj"], "readonly");
|
||||
Assert.ok(false, "The indexedDB should not exist");
|
||||
} catch (e) {
|
||||
Assert.equal(e.name, "NotFoundError", "The indexedDB does not exist as expected");
|
||||
}
|
||||
});
|
||||
|
||||
// Close the tab.
|
||||
yield BrowserTestUtils.removeTab(tabInfo.tab);
|
||||
}
|
||||
}
|
||||
|
||||
add_task(function* setup() {
|
||||
// Make sure userContext is enabled.
|
||||
yield new Promise(resolve => {
|
||||
SpecialPowers.pushPrefEnv({"set": [
|
||||
["privacy.userContext.enabled", true]
|
||||
]}, resolve);
|
||||
});
|
||||
|
||||
// Create a http server for the image cache test.
|
||||
if (!gHttpServer) {
|
||||
gHttpServer = new HttpServer();
|
||||
gHttpServer.registerPathHandler('/image.png', imageHandler);
|
||||
gHttpServer.registerPathHandler('/loadImage.html', loadImagePageHandler);
|
||||
gHttpServer.start(-1);
|
||||
}
|
||||
});
|
||||
|
||||
let tests = [
|
||||
test_cookie_cleared,
|
||||
test_cache_cleared,
|
||||
test_image_cache_cleared,
|
||||
test_storage_cleared,
|
||||
];
|
||||
|
||||
add_task(function* test() {
|
||||
for (let i = 0; i < tests.length; i++)
|
||||
add_task(tests[i]);
|
||||
});
|
||||
|
||||
registerCleanupFunction(() => {
|
||||
gHttpServer.stop(() => {
|
||||
gHttpServer = null;
|
||||
});
|
||||
});
|
@ -0,0 +1,41 @@
|
||||
<html>
|
||||
<head>
|
||||
<meta charset="UTF-8">
|
||||
<title>Bug 1238183</title>
|
||||
</head>
|
||||
<body>
|
||||
<script type="application/javascript;version=1.7">
|
||||
"use strict";
|
||||
|
||||
// if we have a query string, use it to set storages
|
||||
if (window.location.search.length > 0) {
|
||||
let context_name = window.location.search.substr(1);
|
||||
localStorage.setItem("userContext", context_name);
|
||||
sessionStorage.setItem("userContext", context_name);
|
||||
|
||||
let request = indexedDB.open("idb", 1);
|
||||
|
||||
request.onerror = function() {
|
||||
throw new Error("error opening db connection");
|
||||
};
|
||||
|
||||
request.onupgradeneeded = event => {
|
||||
let db = event.target.result;
|
||||
let store = db.createObjectStore("obj", { keyPath: "id" });
|
||||
store.createIndex("userContext", "userContext", { unique: false });
|
||||
};
|
||||
|
||||
request.onsuccess = event => {
|
||||
let db = request.result;
|
||||
let transaction = db.transaction(["obj"], "readwrite");
|
||||
let store = transaction.objectStore("obj");
|
||||
store.add({id: 1, userContext: context_name});
|
||||
|
||||
transaction.oncomplete = () => {
|
||||
db.close();
|
||||
};
|
||||
};
|
||||
}
|
||||
</script>
|
||||
</body>
|
||||
</html>
|
@ -13,4 +13,8 @@ export MOZ_TELEMETRY_REPORTING=1
|
||||
|
||||
ac_add_options --disable-stdcxx-compat
|
||||
|
||||
# Don't autoclobber l10n, as this can lead to missing binaries and broken builds
|
||||
# Bug 1283438
|
||||
mk_add_options AUTOCLOBBER=
|
||||
|
||||
. "$topsrcdir/build/mozconfig.common.override"
|
||||
|
@ -13,4 +13,8 @@ export MOZ_TELEMETRY_REPORTING=1
|
||||
|
||||
ac_add_options --disable-stdcxx-compat
|
||||
|
||||
# Don't autoclobber l10n, as this can lead to missing binaries and broken builds
|
||||
# Bug 1283438
|
||||
mk_add_options AUTOCLOBBER=
|
||||
|
||||
. "$topsrcdir/build/mozconfig.common.override"
|
||||
|
@ -14,5 +14,9 @@ export MOZILLA_OFFICIAL=1
|
||||
# Enable Telemetry
|
||||
export MOZ_TELEMETRY_REPORTING=1
|
||||
|
||||
# Don't autoclobber l10n, as this can lead to missing binaries and broken builds
|
||||
# Bug 1283438
|
||||
mk_add_options AUTOCLOBBER=
|
||||
|
||||
. "$topsrcdir/build/mozconfig.common.override"
|
||||
. "$topsrcdir/build/mozconfig.cache"
|
||||
|
@ -4,5 +4,9 @@ ac_add_options --with-l10n-base=../../l10n
|
||||
ac_add_options --enable-update-channel=${MOZ_UPDATE_CHANNEL}
|
||||
ac_add_options --with-branding=browser/branding/nightly
|
||||
|
||||
# Don't autoclobber l10n, as this can lead to missing binaries and broken builds
|
||||
# Bug 1283438
|
||||
mk_add_options AUTOCLOBBER=
|
||||
|
||||
. "$topsrcdir/build/mozconfig.common.override"
|
||||
. "$topsrcdir/build/mozconfig.cache"
|
||||
|
@ -10,6 +10,10 @@ export MOZILLA_OFFICIAL=1
|
||||
# Enable Telemetry
|
||||
export MOZ_TELEMETRY_REPORTING=1
|
||||
|
||||
# Don't autoclobber l10n, as this can lead to missing binaries and broken builds
|
||||
# Bug 1283438
|
||||
mk_add_options AUTOCLOBBER=
|
||||
|
||||
. $topsrcdir/build/win32/mozconfig.vs-latest
|
||||
|
||||
. "$topsrcdir/build/mozconfig.common.override"
|
||||
|
@ -11,6 +11,10 @@ export MOZILLA_OFFICIAL=1
|
||||
# Enable Telemetry
|
||||
export MOZ_TELEMETRY_REPORTING=1
|
||||
|
||||
# Don't autoclobber l10n, as this can lead to missing binaries and broken builds
|
||||
# Bug 1283438
|
||||
mk_add_options AUTOCLOBBER=
|
||||
|
||||
. $topsrcdir/build/win64/mozconfig.vs-latest
|
||||
|
||||
. "$topsrcdir/build/mozconfig.common.override"
|
||||
|
@ -7,4 +7,3 @@ support-files =
|
||||
[test_basic.html]
|
||||
[test_webkitdirectory.html]
|
||||
[test_worker_basic.html]
|
||||
skip-if = true # bug 1283344
|
||||
|
@ -8,17 +8,20 @@
|
||||
</head>
|
||||
|
||||
<body>
|
||||
<input id="fileList" type="file"></input>
|
||||
<script type="application/javascript;version=1.7">
|
||||
|
||||
var directory;
|
||||
var fileList;
|
||||
|
||||
function create_fileList(aPath) {
|
||||
fileList = document.createElement('input');
|
||||
fileList.setAttribute('type', 'file');
|
||||
document.body.appendChild(fileList);
|
||||
|
||||
var url = SimpleTest.getTestFileURL("script_fileList.js");
|
||||
var script = SpecialPowers.loadChromeScript(url);
|
||||
|
||||
function onOpened(message) {
|
||||
var fileList = document.getElementById('fileList');
|
||||
SpecialPowers.wrap(fileList).mozSetDirectory(message.dir);
|
||||
|
||||
fileList.getFilesAndDirectories().then(function(array) {
|
||||
@ -40,7 +43,6 @@ function test_simpleFilePicker(aPath) {
|
||||
var script = SpecialPowers.loadChromeScript(url);
|
||||
|
||||
function onOpened(message) {
|
||||
var fileList = document.getElementById('fileList');
|
||||
SpecialPowers.wrap(fileList).mozSetFileArray([message.file]);
|
||||
|
||||
is(fileList.files.length, 1, "we want 1 element");
|
||||
@ -61,7 +63,6 @@ function test_duplicateGetFilesAndDirectories() {
|
||||
var script = SpecialPowers.loadChromeScript(url);
|
||||
|
||||
function onOpened(message) {
|
||||
var fileList = document.getElementById('fileList');
|
||||
SpecialPowers.wrap(fileList).mozSetDirectory(message.dir);
|
||||
|
||||
var p1 = fileList.getFilesAndDirectories();
|
||||
@ -82,7 +83,6 @@ function test_inputGetFiles() {
|
||||
var script = SpecialPowers.loadChromeScript(url);
|
||||
|
||||
function onOpened(message) {
|
||||
var fileList = document.getElementById('fileList');
|
||||
SpecialPowers.wrap(fileList).mozSetDirectory(message.dir);
|
||||
|
||||
fileList.getFilesAndDirectories()
|
||||
|
@ -8,15 +8,19 @@
|
||||
</head>
|
||||
|
||||
<body>
|
||||
<input id="fileList" type="file"></input>
|
||||
<script type="application/javascript;version=1.7">
|
||||
|
||||
var fileList;
|
||||
|
||||
function create_fileList() {
|
||||
fileList = document.createElement('input');
|
||||
fileList.setAttribute('type', 'file');
|
||||
document.body.appendChild(fileList);
|
||||
|
||||
var url = SimpleTest.getTestFileURL("script_fileList.js");
|
||||
var script = SpecialPowers.loadChromeScript(url);
|
||||
|
||||
function onOpened(message) {
|
||||
var fileList = document.getElementById('fileList');
|
||||
SpecialPowers.wrap(fileList).mozSetDirectory(message.dir);
|
||||
script.destroy();
|
||||
next();
|
||||
@ -27,7 +31,6 @@ function create_fileList() {
|
||||
}
|
||||
|
||||
function test_worker() {
|
||||
var fileList = document.getElementById('fileList');
|
||||
fileList.getFilesAndDirectories().then(function(array) {
|
||||
var worker = new Worker('worker_basic.js');
|
||||
worker.onmessage = function(e) {
|
||||
|
@ -1,6 +1,7 @@
|
||||
[DEFAULT]
|
||||
head = head.js head-http2.js
|
||||
tail =
|
||||
firefox-appdir = browser
|
||||
# Push notifications and alarms are currently disabled on Android.
|
||||
skip-if = toolkit == 'android'
|
||||
|
||||
|
1
dom/security/test/csp/file_require_sri_meta.js
Normal file
1
dom/security/test/csp/file_require_sri_meta.js
Normal file
@ -0,0 +1 @@
|
||||
var foo = 24;
|
54
dom/security/test/csp/file_require_sri_meta.sjs
Normal file
54
dom/security/test/csp/file_require_sri_meta.sjs
Normal file
@ -0,0 +1,54 @@
|
||||
// custom *.sjs for Bug 1277557
|
||||
// META CSP: require-sri-for script;
|
||||
|
||||
const PRE_INTEGRITY =
|
||||
"<!DOCTYPE HTML>" +
|
||||
"<html><head><meta charset=\"utf-8\">" +
|
||||
"<title>Bug 1277557 - CSP require-sri-for does not block when CSP is in meta tag</title>" +
|
||||
"<meta http-equiv=\"Content-Security-Policy\" content=\"require-sri-for script; script-src 'unsafe-inline' *\">" +
|
||||
"</head>" +
|
||||
"<body>" +
|
||||
"<script id=\"testscript\"" +
|
||||
// Using math.random() to avoid confusing cache behaviors within the test
|
||||
" src=\"http://mochi.test:8888/tests/dom/security/test/csp/file_require_sri_meta.js?" + Math.random() + "\"";
|
||||
|
||||
const WRONG_INTEGRITY =
|
||||
" integrity=\"sha384-oqVuAfXRKap7fdgcCY5uykM6+R9GqQ8K/uxy9rx7HNQlGYl1kPzQho1wx4JwY8wC\"";
|
||||
|
||||
const CORRECT_INEGRITY =
|
||||
" integrity=\"sha384-PkcuZQHmjBQKRyv1v3x0X8qFmXiSyFyYIP+f9SU86XWvRneifdNCPg2cYFWBuKsF\"";
|
||||
|
||||
const POST_INTEGRITY =
|
||||
" onload=\"window.parent.postMessage({result: 'script-loaded'}, '*');\"" +
|
||||
" onerror=\"window.parent.postMessage({result: 'script-blocked'}, '*');\"" +
|
||||
"></script>" +
|
||||
"</body>" +
|
||||
"</html>";
|
||||
|
||||
function handleRequest(request, response)
|
||||
{
|
||||
// avoid confusing cache behaviors
|
||||
response.setHeader("Cache-Control", "no-cache", false);
|
||||
response.setHeader("Content-Type", "text/html", false);
|
||||
|
||||
var queryString = request.queryString;
|
||||
|
||||
if (queryString === "no-sri") {
|
||||
response.write(PRE_INTEGRITY + POST_INTEGRITY);
|
||||
return;
|
||||
}
|
||||
|
||||
if (queryString === "wrong-sri") {
|
||||
response.write(PRE_INTEGRITY + WRONG_INTEGRITY + POST_INTEGRITY);
|
||||
return;
|
||||
}
|
||||
|
||||
if (queryString === "correct-sri") {
|
||||
response.write(PRE_INTEGRITY + CORRECT_INEGRITY + POST_INTEGRITY);
|
||||
return;
|
||||
}
|
||||
|
||||
// we should never get here, but just in case
|
||||
// return something unexpected
|
||||
response.write("do'h");
|
||||
}
|
@ -182,6 +182,8 @@ support-files =
|
||||
file_sandbox_10.html
|
||||
file_sandbox_11.html
|
||||
file_sandbox_12.html
|
||||
file_require_sri_meta.sjs
|
||||
file_require_sri_meta.js
|
||||
|
||||
[test_base-uri.html]
|
||||
[test_blob_data_schemes.html]
|
||||
@ -272,3 +274,4 @@ tags = mcb
|
||||
[test_iframe_sandbox_top_1.html]
|
||||
[test_sandbox.html]
|
||||
[test_ping.html]
|
||||
[test_require_sri_meta.html]
|
||||
|
77
dom/security/test/csp/test_require_sri_meta.html
Normal file
77
dom/security/test/csp/test_require_sri_meta.html
Normal file
@ -0,0 +1,77 @@
|
||||
<!DOCTYPE HTML>
|
||||
<html>
|
||||
<head>
|
||||
<meta charset="utf-8">
|
||||
<title>Bug 1277557 - CSP require-sri-for does not block when CSP is in meta tag</title>
|
||||
<!-- Including SimpleTest.js so we can use waitForExplicitFinish !-->
|
||||
<script type="text/javascript" src="/tests/SimpleTest/SimpleTest.js"></script>
|
||||
<link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css" />
|
||||
</head>
|
||||
<body>
|
||||
<iframe style="width:100%;" id="testframe"></iframe>
|
||||
|
||||
<script class="testbody" type="text/javascript">
|
||||
|
||||
/* Description of the test:
|
||||
* We load scripts within an iframe and make sure that meta-csp of
|
||||
* require-sri-for applies correctly to preloaded scripts.
|
||||
* Please note that we have to use <script src=""> to kick
|
||||
* off the html preloader.
|
||||
*/
|
||||
|
||||
SimpleTest.waitForExplicitFinish();
|
||||
|
||||
SpecialPowers.setBoolPref("security.csp.experimentalEnabled", true);
|
||||
|
||||
var curTest;
|
||||
var counter = -1;
|
||||
|
||||
const tests = [
|
||||
{ // test 1
|
||||
description: "script with *no* SRI should be blocked",
|
||||
query: "no-sri",
|
||||
expected: "script-blocked"
|
||||
},
|
||||
{ // test 2
|
||||
description: "script-with *incorrect* SRI should be blocked",
|
||||
query: "wrong-sri",
|
||||
expected: "script-blocked"
|
||||
},
|
||||
{ // test 3
|
||||
description: "script-with *correct* SRI should be loaded",
|
||||
query: "correct-sri",
|
||||
expected: "script-loaded"
|
||||
},
|
||||
];
|
||||
|
||||
function finishTest() {
|
||||
window.removeEventListener("message", receiveMessage, false);
|
||||
SimpleTest.finish();
|
||||
}
|
||||
|
||||
function checkResults(result) {
|
||||
is(result, curTest.expected, curTest.description);
|
||||
loadNextTest();
|
||||
}
|
||||
|
||||
window.addEventListener("message", receiveMessage, false);
|
||||
function receiveMessage(event) {
|
||||
checkResults(event.data.result);
|
||||
}
|
||||
|
||||
function loadNextTest() {
|
||||
counter++;
|
||||
if (counter == tests.length) {
|
||||
finishTest();
|
||||
return;
|
||||
}
|
||||
curTest = tests[counter];
|
||||
var testframe = document.getElementById("testframe");
|
||||
testframe.src = "file_require_sri_meta.sjs?" + curTest.query;
|
||||
}
|
||||
|
||||
loadNextTest();
|
||||
|
||||
</script>
|
||||
</body>
|
||||
</html>
|
@ -621,12 +621,19 @@ BasicCompositor::BeginFrame(const nsIntRegion& aInvalidRegion,
|
||||
clearRect = mInvalidRect;
|
||||
}
|
||||
|
||||
// Prevent CreateRenderTargetForWindow from clearing unwanted area.
|
||||
gfxUtils::ClipToRegion(mDrawTarget,
|
||||
mInvalidRegion.ToUnknownRegion());
|
||||
|
||||
// Setup an intermediate render target to buffer all compositing. We will
|
||||
// copy this into mDrawTarget (the widget), and/or mTarget in EndFrame()
|
||||
RefPtr<CompositingRenderTarget> target =
|
||||
CreateRenderTargetForWindow(mInvalidRect,
|
||||
clearRect,
|
||||
bufferMode);
|
||||
|
||||
mDrawTarget->PopClip();
|
||||
|
||||
if (!target) {
|
||||
if (!mTarget) {
|
||||
mWidget->EndRemoteDrawingInRegion(mDrawTarget, mInvalidRegion);
|
||||
|
@ -2710,13 +2710,12 @@ class BaseCompiler
|
||||
#if defined(JS_CODEGEN_X64)
|
||||
// Copied from CodeGenerator-x64.cpp
|
||||
// TODO / CLEANUP - share with the code generator.
|
||||
|
||||
wasm::MemoryAccess
|
||||
AsmJSMemoryAccess(uint32_t before, wasm::MemoryAccess::OutOfBoundsBehavior throwBehavior,
|
||||
uint32_t offsetWithinWholeSimdVector = 0)
|
||||
MemoryAccess
|
||||
WasmMemoryAccess(uint32_t before)
|
||||
{
|
||||
return wasm::MemoryAccess(before, throwBehavior, wasm::MemoryAccess::WrapOffset,
|
||||
offsetWithinWholeSimdVector);
|
||||
if (isCompilingAsmJS())
|
||||
return MemoryAccess(before, MemoryAccess::CarryOn, MemoryAccess::WrapOffset);
|
||||
return MemoryAccess(before, MemoryAccess::Throw, MemoryAccess::DontWrapOffset);
|
||||
}
|
||||
#endif
|
||||
|
||||
@ -2760,6 +2759,11 @@ class BaseCompiler
|
||||
#endif
|
||||
|
||||
void loadHeap(const MWasmMemoryAccess& access, RegI32 ptr, AnyReg dest) {
|
||||
if (access.offset() > INT32_MAX) {
|
||||
masm.jump(wasm::JumpTarget::OutOfBounds);
|
||||
return;
|
||||
}
|
||||
|
||||
#if defined(JS_CODEGEN_X64)
|
||||
// CodeGeneratorX64::visitAsmJSLoadHeap()
|
||||
|
||||
@ -2783,7 +2787,7 @@ class BaseCompiler
|
||||
}
|
||||
uint32_t after = masm.size();
|
||||
|
||||
masm.append(AsmJSMemoryAccess(before, wasm::MemoryAccess::CarryOn));
|
||||
masm.append(WasmMemoryAccess(before));
|
||||
verifyHeapAccessDisassembly(before, after, IsLoad(true), access.accessType(), 0, srcAddr, dest);
|
||||
#else
|
||||
MOZ_CRASH("BaseCompiler platform hook: loadHeap");
|
||||
@ -2814,7 +2818,7 @@ class BaseCompiler
|
||||
}
|
||||
uint32_t after = masm.size();
|
||||
|
||||
masm.append(AsmJSMemoryAccess(before, wasm::MemoryAccess::CarryOn));
|
||||
masm.append(WasmMemoryAccess(before));
|
||||
verifyHeapAccessDisassembly(before, after, IsLoad(false), access.accessType(), 0, dstAddr, src);
|
||||
#else
|
||||
MOZ_CRASH("BaseCompiler platform hook: storeHeap");
|
||||
@ -5210,8 +5214,7 @@ BaseCompiler::emitLoad(ValType type, Scalar::Type viewType)
|
||||
// TODO / OPTIMIZE: Disable bounds checking on constant accesses
|
||||
// below the minimum heap length.
|
||||
|
||||
MWasmMemoryAccess access(viewType, addr.align);
|
||||
access.setOffset(addr.offset);
|
||||
MWasmMemoryAccess access(viewType, addr.align, addr.offset);
|
||||
|
||||
switch (type) {
|
||||
case ValType::I32: {
|
||||
@ -5260,8 +5263,7 @@ BaseCompiler::emitStore(ValType resultType, Scalar::Type viewType)
|
||||
// TODO / OPTIMIZE: Disable bounds checking on constant accesses
|
||||
// below the minimum heap length.
|
||||
|
||||
MWasmMemoryAccess access(viewType, addr.align);
|
||||
access.setOffset(addr.offset);
|
||||
MWasmMemoryAccess access(viewType, addr.align, addr.offset);
|
||||
|
||||
switch (resultType) {
|
||||
case ValType::I32: {
|
||||
@ -5540,8 +5542,7 @@ BaseCompiler::emitStoreWithCoercion(ValType resultType, Scalar::Type viewType)
|
||||
// TODO / OPTIMIZE: Disable bounds checking on constant accesses
|
||||
// below the minimum heap length.
|
||||
|
||||
MWasmMemoryAccess access(viewType, addr.align);
|
||||
access.setOffset(addr.offset);
|
||||
MWasmMemoryAccess access(viewType, addr.align, addr.offset);
|
||||
|
||||
if (resultType == ValType::F32 && viewType == Scalar::Float64) {
|
||||
RegF32 rv = popF32();
|
||||
|
@ -594,7 +594,16 @@ class FunctionCompiler
|
||||
{
|
||||
if (inDeadCode())
|
||||
return nullptr;
|
||||
MAsmJSLoadHeap* load = MAsmJSLoadHeap::New(alloc(), base, access);
|
||||
|
||||
MInstruction* load = nullptr;
|
||||
if (mg().kind == ModuleKind::Wasm) {
|
||||
if (!mg().usesSignal.forOOB)
|
||||
curBlock_->add(MWasmBoundsCheck::New(alloc(), base, access));
|
||||
load = MWasmLoad::New(alloc(), base, access);
|
||||
} else {
|
||||
load = MAsmJSLoadHeap::New(alloc(), base, access);
|
||||
}
|
||||
|
||||
curBlock_->add(load);
|
||||
return load;
|
||||
}
|
||||
@ -603,7 +612,16 @@ class FunctionCompiler
|
||||
{
|
||||
if (inDeadCode())
|
||||
return;
|
||||
MAsmJSStoreHeap* store = MAsmJSStoreHeap::New(alloc(), base, access, v);
|
||||
|
||||
MInstruction* store = nullptr;
|
||||
if (mg().kind == ModuleKind::Wasm) {
|
||||
if (!mg().usesSignal.forOOB)
|
||||
curBlock_->add(MWasmBoundsCheck::New(alloc(), base, access));
|
||||
store = MWasmStore::New(alloc(), base, access, v);
|
||||
} else {
|
||||
store = MAsmJSStoreHeap::New(alloc(), base, access, v);
|
||||
}
|
||||
|
||||
curBlock_->add(store);
|
||||
}
|
||||
|
||||
@ -2055,32 +2073,6 @@ EmitSelect(FunctionCompiler& f)
|
||||
return true;
|
||||
}
|
||||
|
||||
enum class IsAtomic {
|
||||
No = false,
|
||||
Yes = true
|
||||
};
|
||||
|
||||
static bool
|
||||
SetHeapAccessOffset(FunctionCompiler& f, uint32_t offset, MWasmMemoryAccess* access,
|
||||
MDefinition** base, IsAtomic atomic = IsAtomic::No)
|
||||
{
|
||||
// TODO Remove this after implementing non-wraparound offset semantics.
|
||||
uint32_t endOffset = offset + access->byteSize();
|
||||
if (endOffset < offset)
|
||||
return false;
|
||||
|
||||
// Assume worst case.
|
||||
if (endOffset > f.mirGen().foldableOffsetRange(/* bounds check */ true, bool(atomic))) {
|
||||
MDefinition* rhs = f.constant(Int32Value(offset), MIRType::Int32);
|
||||
*base = f.binary<MAdd>(*base, rhs, MIRType::Int32);
|
||||
access->setOffset(0);
|
||||
} else {
|
||||
access->setOffset(offset);
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
static bool
|
||||
EmitLoad(FunctionCompiler& f, ValType type, Scalar::Type viewType)
|
||||
{
|
||||
@ -2088,13 +2080,8 @@ EmitLoad(FunctionCompiler& f, ValType type, Scalar::Type viewType)
|
||||
if (!f.iter().readLoad(type, Scalar::byteSize(viewType), &addr))
|
||||
return false;
|
||||
|
||||
MWasmMemoryAccess access(viewType, addr.align);
|
||||
|
||||
MDefinition* base = addr.base;
|
||||
if (!SetHeapAccessOffset(f, addr.offset, &access, &base))
|
||||
return false;
|
||||
|
||||
f.iter().setResult(f.loadHeap(base, access));
|
||||
MWasmMemoryAccess access(viewType, addr.align, addr.offset);
|
||||
f.iter().setResult(f.loadHeap(addr.base, access));
|
||||
return true;
|
||||
}
|
||||
|
||||
@ -2106,13 +2093,8 @@ EmitStore(FunctionCompiler& f, ValType resultType, Scalar::Type viewType)
|
||||
if (!f.iter().readStore(resultType, Scalar::byteSize(viewType), &addr, &value))
|
||||
return false;
|
||||
|
||||
MWasmMemoryAccess access(viewType, addr.align);
|
||||
|
||||
MDefinition* base = addr.base;
|
||||
if (!SetHeapAccessOffset(f, addr.offset, &access, &base))
|
||||
return false;
|
||||
|
||||
f.storeHeap(base, access, value);
|
||||
MWasmMemoryAccess access(viewType, addr.align, addr.offset);
|
||||
f.storeHeap(addr.base, access, value);
|
||||
return true;
|
||||
}
|
||||
|
||||
@ -2131,13 +2113,8 @@ EmitStoreWithCoercion(FunctionCompiler& f, ValType resultType, Scalar::Type view
|
||||
else
|
||||
MOZ_CRASH("unexpected coerced store");
|
||||
|
||||
MWasmMemoryAccess access(viewType, addr.align);
|
||||
|
||||
MDefinition* base = addr.base;
|
||||
if (!SetHeapAccessOffset(f, addr.offset, &access, &base))
|
||||
return false;
|
||||
|
||||
f.storeHeap(base, access, value);
|
||||
MWasmMemoryAccess access(viewType, addr.align, addr.offset);
|
||||
f.storeHeap(addr.base, access, value);
|
||||
return true;
|
||||
}
|
||||
|
||||
@ -2207,13 +2184,9 @@ EmitAtomicsLoad(FunctionCompiler& f)
|
||||
if (!f.iter().readAtomicLoad(&addr, &viewType))
|
||||
return false;
|
||||
|
||||
MWasmMemoryAccess access(viewType, addr.align, 0, MembarBeforeLoad, MembarAfterLoad);
|
||||
|
||||
MDefinition* base = addr.base;
|
||||
if (!SetHeapAccessOffset(f, addr.offset, &access, &base, IsAtomic::Yes))
|
||||
return false;
|
||||
|
||||
f.iter().setResult(f.loadAtomicHeap(base, access));
|
||||
MWasmMemoryAccess access(viewType, addr.align, addr.offset, 0,
|
||||
MembarBeforeLoad, MembarAfterLoad);
|
||||
f.iter().setResult(f.loadAtomicHeap(addr.base, access));
|
||||
return true;
|
||||
}
|
||||
|
||||
@ -2226,13 +2199,9 @@ EmitAtomicsStore(FunctionCompiler& f)
|
||||
if (!f.iter().readAtomicStore(&addr, &viewType, &value))
|
||||
return false;
|
||||
|
||||
MWasmMemoryAccess access(viewType, addr.align, 0, MembarBeforeStore, MembarAfterStore);
|
||||
|
||||
MDefinition* base = addr.base;
|
||||
if (!SetHeapAccessOffset(f, addr.offset, &access, &base, IsAtomic::Yes))
|
||||
return false;
|
||||
|
||||
f.storeAtomicHeap(base, access, value);
|
||||
MWasmMemoryAccess access(viewType, addr.align, addr.offset, 0,
|
||||
MembarBeforeStore, MembarAfterStore);
|
||||
f.storeAtomicHeap(addr.base, access, value);
|
||||
f.iter().setResult(value);
|
||||
return true;
|
||||
}
|
||||
@ -2247,13 +2216,8 @@ EmitAtomicsBinOp(FunctionCompiler& f)
|
||||
if (!f.iter().readAtomicBinOp(&addr, &viewType, &op, &value))
|
||||
return false;
|
||||
|
||||
MWasmMemoryAccess access(viewType, addr.align);
|
||||
|
||||
MDefinition* base = addr.base;
|
||||
if (!SetHeapAccessOffset(f, addr.offset, &access, &base, IsAtomic::Yes))
|
||||
return false;
|
||||
|
||||
f.iter().setResult(f.atomicBinopHeap(op, base, access, value));
|
||||
MWasmMemoryAccess access(viewType, addr.align, addr.offset);
|
||||
f.iter().setResult(f.atomicBinopHeap(op, addr.base, access, value));
|
||||
return true;
|
||||
}
|
||||
|
||||
@ -2267,13 +2231,8 @@ EmitAtomicsCompareExchange(FunctionCompiler& f)
|
||||
if (!f.iter().readAtomicCompareExchange(&addr, &viewType, &oldValue, &newValue))
|
||||
return false;
|
||||
|
||||
MWasmMemoryAccess access(viewType, addr.align);
|
||||
|
||||
MDefinition* base = addr.base;
|
||||
if (!SetHeapAccessOffset(f, addr.offset, &access, &base, IsAtomic::Yes))
|
||||
return false;
|
||||
|
||||
f.iter().setResult(f.atomicCompareExchangeHeap(base, access, oldValue, newValue));
|
||||
MWasmMemoryAccess access(viewType, addr.align, addr.offset);
|
||||
f.iter().setResult(f.atomicCompareExchangeHeap(addr.base, access, oldValue, newValue));
|
||||
return true;
|
||||
}
|
||||
|
||||
@ -2286,13 +2245,8 @@ EmitAtomicsExchange(FunctionCompiler& f)
|
||||
if (!f.iter().readAtomicExchange(&addr, &viewType, &value))
|
||||
return false;
|
||||
|
||||
MWasmMemoryAccess access(viewType, addr.align);
|
||||
|
||||
MDefinition* base = addr.base;
|
||||
if (!SetHeapAccessOffset(f, addr.offset, &access, &base, IsAtomic::Yes))
|
||||
return false;
|
||||
|
||||
f.iter().setResult(f.atomicExchangeHeap(base, access, value));
|
||||
MWasmMemoryAccess access(viewType, addr.align, addr.offset);
|
||||
f.iter().setResult(f.atomicExchangeHeap(addr.base, access, value));
|
||||
return true;
|
||||
}
|
||||
|
||||
@ -2513,13 +2467,8 @@ EmitSimdLoad(FunctionCompiler& f, ValType resultType, unsigned numElems)
|
||||
if (!f.iter().readLoad(resultType, Scalar::byteSize(viewType), &addr))
|
||||
return false;
|
||||
|
||||
MWasmMemoryAccess access(viewType, addr.align, numElems);
|
||||
|
||||
MDefinition* base = addr.base;
|
||||
if (!SetHeapAccessOffset(f, addr.offset, &access, &base))
|
||||
return false;
|
||||
|
||||
f.iter().setResult(f.loadSimdHeap(base, access));
|
||||
MWasmMemoryAccess access(viewType, addr.align, addr.offset, numElems);
|
||||
f.iter().setResult(f.loadSimdHeap(addr.base, access));
|
||||
return true;
|
||||
}
|
||||
|
||||
@ -2537,13 +2486,8 @@ EmitSimdStore(FunctionCompiler& f, ValType resultType, unsigned numElems)
|
||||
if (!f.iter().readStore(resultType, Scalar::byteSize(viewType), &addr, &value))
|
||||
return false;
|
||||
|
||||
MWasmMemoryAccess access(viewType, addr.align, numElems);
|
||||
|
||||
MDefinition* base = addr.base;
|
||||
if (!SetHeapAccessOffset(f, addr.offset, &access, &base))
|
||||
return false;
|
||||
|
||||
f.storeSimdHeap(base, access, value);
|
||||
MWasmMemoryAccess access(viewType, addr.align, addr.offset, numElems);
|
||||
f.storeSimdHeap(addr.base, access, value);
|
||||
return true;
|
||||
}
|
||||
|
||||
|
@ -56,7 +56,7 @@ struct LinkData : LinkDataCacheablePod
|
||||
RawPointer,
|
||||
CodeLabel,
|
||||
InstructionImmediate
|
||||
};
|
||||
};
|
||||
MOZ_INIT_OUTSIDE_CTOR uint32_t patchAtOffset;
|
||||
MOZ_INIT_OUTSIDE_CTOR uint32_t targetOffset;
|
||||
|
||||
|
@ -650,6 +650,13 @@ EmulateHeapAccess(EMULATOR_CONTEXT* context, uint8_t* pc, uint8_t* faultingAddre
|
||||
MOZ_RELEASE_ASSERT(accessAddress + access.size() > instance.memoryBase() + instance.memoryLength(),
|
||||
"Computed access address is not actually out of bounds");
|
||||
|
||||
// Wasm loads/stores don't wrap offsets at all, so hitting the guard page
|
||||
// means we are out of bounds in any cases.
|
||||
if (!memoryAccess->wrapOffset()) {
|
||||
MOZ_ASSERT(memoryAccess->throwOnOOB());
|
||||
return instance.codeSegment().outOfBoundsCode();
|
||||
}
|
||||
|
||||
// The basic sandbox model is that all heap accesses are a heap base
|
||||
// register plus an index, and the index is always computed with 32-bit
|
||||
// operations, so we know it can only be 4 GiB off of the heap base.
|
||||
@ -733,8 +740,11 @@ MOZ_COLD static uint8_t*
|
||||
EmulateHeapAccess(EMULATOR_CONTEXT* context, uint8_t* pc, uint8_t* faultingAddress,
|
||||
const MemoryAccess* memoryAccess, const Instance& instance)
|
||||
{
|
||||
// TODO: Implement unaligned accesses.
|
||||
return instance.codeSegment().outOfBoundsCode();
|
||||
// We forbid ARM instruction sets below ARMv7, so that solves unaligned
|
||||
// integer memory accesses. So the only way to land here is because of a
|
||||
// non-default configured kernel or an unaligned floating-point access.
|
||||
// TODO Handle FPU unaligned accesses on ARM (bug 1283121).
|
||||
return instance.codeSegment().unalignedAccessCode();
|
||||
}
|
||||
|
||||
#endif // defined(ASMJS_MAY_USE_SIGNAL_HANDLERS_FOR_UNALIGNED)
|
||||
@ -1097,16 +1107,28 @@ MachExceptionHandler::install(JSRuntime* rt)
|
||||
|
||||
#else // If not Windows or Mac, assume Unix
|
||||
|
||||
enum class Signal {
|
||||
SegFault,
|
||||
BusError
|
||||
};
|
||||
|
||||
// Be very cautious and default to not handling; we don't want to accidentally
|
||||
// silence real crashes from real bugs.
|
||||
template<Signal signal>
|
||||
static bool
|
||||
HandleFault(int signum, siginfo_t* info, void* ctx)
|
||||
{
|
||||
// The signals we're expecting come from access violations, accessing
|
||||
// mprotected memory. If the signal originates anywhere else, don't try
|
||||
// to handle it.
|
||||
MOZ_RELEASE_ASSERT(signum == SIGSEGV);
|
||||
if (info->si_code != SEGV_ACCERR)
|
||||
if (signal == Signal::SegFault)
|
||||
MOZ_RELEASE_ASSERT(signum == SIGSEGV);
|
||||
else
|
||||
MOZ_RELEASE_ASSERT(signum == SIGBUS);
|
||||
|
||||
if (signal == Signal::SegFault && info->si_code != SEGV_ACCERR)
|
||||
return false;
|
||||
if (signal == Signal::BusError && info->si_code != BUS_ADRALN)
|
||||
return false;
|
||||
|
||||
CONTEXT* context = (CONTEXT*)ctx;
|
||||
@ -1135,7 +1157,7 @@ HandleFault(int signum, siginfo_t* info, void* ctx)
|
||||
return false;
|
||||
|
||||
const MemoryAccess* memoryAccess = instance.lookupMemoryAccess(pc);
|
||||
if (!memoryAccess)
|
||||
if (signal == Signal::SegFault && !memoryAccess)
|
||||
return false;
|
||||
|
||||
*ppc = EmulateHeapAccess(context, pc, faultingAddress, memoryAccess, instance);
|
||||
@ -1144,13 +1166,19 @@ HandleFault(int signum, siginfo_t* info, void* ctx)
|
||||
}
|
||||
|
||||
static struct sigaction sPrevSEGVHandler;
|
||||
static struct sigaction sPrevSIGBUSHandler;
|
||||
|
||||
template<Signal signal>
|
||||
static void
|
||||
AsmJSFaultHandler(int signum, siginfo_t* info, void* context)
|
||||
{
|
||||
if (HandleFault(signum, info, context))
|
||||
if (HandleFault<signal>(signum, info, context))
|
||||
return;
|
||||
|
||||
struct sigaction* previousSignal = signal == Signal::SegFault
|
||||
? &sPrevSEGVHandler
|
||||
: &sPrevSIGBUSHandler;
|
||||
|
||||
// This signal is not for any asm.js code we expect, so we need to forward
|
||||
// the signal to the next handler. If there is no next handler (SIG_IGN or
|
||||
// SIG_DFL), then it's time to crash. To do this, we set the signal back to
|
||||
@ -1163,15 +1191,14 @@ AsmJSFaultHandler(int signum, siginfo_t* info, void* context)
|
||||
// signal to it's original disposition and returning.
|
||||
//
|
||||
// Note: the order of these tests matter.
|
||||
if (sPrevSEGVHandler.sa_flags & SA_SIGINFO)
|
||||
sPrevSEGVHandler.sa_sigaction(signum, info, context);
|
||||
else if (sPrevSEGVHandler.sa_handler == SIG_DFL || sPrevSEGVHandler.sa_handler == SIG_IGN)
|
||||
sigaction(signum, &sPrevSEGVHandler, nullptr);
|
||||
if (previousSignal->sa_flags & SA_SIGINFO)
|
||||
previousSignal->sa_sigaction(signum, info, context);
|
||||
else if (previousSignal->sa_handler == SIG_DFL || previousSignal->sa_handler == SIG_IGN)
|
||||
sigaction(signum, previousSignal, nullptr);
|
||||
else
|
||||
sPrevSEGVHandler.sa_handler(signum);
|
||||
previousSignal->sa_handler(signum);
|
||||
}
|
||||
#endif
|
||||
|
||||
# endif // XP_WIN || XP_DARWIN || assume unix
|
||||
#endif // defined(ASMJS_MAY_USE_SIGNAL_HANDLERS)
|
||||
|
||||
static void
|
||||
@ -1303,12 +1330,22 @@ wasm::EnsureSignalHandlersInstalled(JSRuntime* rt)
|
||||
// SA_NODEFER allows us to reenter the signal handler if we crash while
|
||||
// handling the signal, and fall through to the Breakpad handler by testing
|
||||
// handlingSegFault.
|
||||
|
||||
# if defined(ASMJS_MAY_USE_SIGNAL_HANDLERS_FOR_OOB)
|
||||
struct sigaction faultHandler;
|
||||
faultHandler.sa_flags = SA_SIGINFO | SA_NODEFER;
|
||||
faultHandler.sa_sigaction = &AsmJSFaultHandler;
|
||||
faultHandler.sa_sigaction = &AsmJSFaultHandler<Signal::SegFault>;
|
||||
sigemptyset(&faultHandler.sa_mask);
|
||||
if (sigaction(SIGSEGV, &faultHandler, &sPrevSEGVHandler))
|
||||
MOZ_CRASH("unable to install segv handler");
|
||||
# elif defined(ASMJS_MAY_USE_SIGNAL_HANDLERS_FOR_UNALIGNED)
|
||||
struct sigaction busHandler;
|
||||
busHandler.sa_flags = SA_SIGINFO | SA_NODEFER;
|
||||
busHandler.sa_sigaction = &AsmJSFaultHandler<Signal::BusError>;
|
||||
sigemptyset(&busHandler.sa_mask);
|
||||
if (sigaction(SIGBUS, &busHandler, &sPrevSIGBUSHandler))
|
||||
MOZ_CRASH("unable to install sigbus handler");
|
||||
# endif
|
||||
# endif
|
||||
#endif // defined(ASMJS_MAY_USE_SIGNAL_HANDLERS)
|
||||
|
||||
|
@ -1042,8 +1042,10 @@ js::GCMarker::eagerlyMarkChildren(Shape* shape)
|
||||
// be traced by this loop they do not need to be traced here as well.
|
||||
BaseShape* base = shape->base();
|
||||
CheckTraversedEdge(shape, base);
|
||||
if (mark(base))
|
||||
if (mark(base)) {
|
||||
MOZ_ASSERT(base->canSkipMarkingShapeTable(shape));
|
||||
base->traceChildrenSkipShapeTable(this);
|
||||
}
|
||||
|
||||
traverseEdge(shape, shape->propidRef().get());
|
||||
|
||||
|
@ -1,74 +1,65 @@
|
||||
// |jit-test| test-also-wasm-baseline
|
||||
load(libdir + "wasm.js");
|
||||
|
||||
function loadModule(type, ext, offset, align) {
|
||||
return wasmEvalText(
|
||||
`(module
|
||||
(memory 1
|
||||
(segment 0 "\\00\\01\\02\\03\\04\\05\\06\\07\\08\\09\\0a\\0b\\0c\\0d\\0e\\0f")
|
||||
(segment 16 "\\f0\\f1\\f2\\f3\\f4\\f5\\f6\\f7\\f8\\f9\\fa\\fb\\fc\\fd\\fe\\ff")
|
||||
)
|
||||
(func (param i32) (result ${type})
|
||||
(${type}.load${ext}
|
||||
offset=${offset}
|
||||
${align != 0 ? 'align=' + align : ''}
|
||||
(get_local 0)
|
||||
)
|
||||
) (export "" 0))`
|
||||
);
|
||||
}
|
||||
|
||||
function storeModule(type, ext, offset, align) {
|
||||
var load_ext = ext === '' ? '' : ext + '_s';
|
||||
return wasmEvalText(
|
||||
`(module
|
||||
(memory 1
|
||||
(segment 0 "\\00\\01\\02\\03\\04\\05\\06\\07\\08\\09\\0a\\0b\\0c\\0d\\0e\\0f")
|
||||
(segment 16 "\\f0\\f1\\f2\\f3\\f4\\f5\\f6\\f7\\f8\\f9\\fa\\fb\\fc\\fd\\fe\\ff")
|
||||
)
|
||||
(func (param i32) (param ${type}) (result ${type})
|
||||
(${type}.store${ext}
|
||||
offset=${offset}
|
||||
${align != 0 ? 'align=' + align : ''}
|
||||
(get_local 0)
|
||||
(get_local 1)
|
||||
)
|
||||
) (export "store" 0)
|
||||
(func (param i32) (result ${type})
|
||||
(${type}.load${load_ext}
|
||||
offset=${offset}
|
||||
${align != 0 ? 'align=' + align : ''}
|
||||
(get_local 0)
|
||||
)
|
||||
) (export "load" 1))`
|
||||
);
|
||||
}
|
||||
|
||||
function testLoad(type, ext, base, offset, align, expect) {
|
||||
assertEq(wasmEvalText(
|
||||
'(module' +
|
||||
' (memory 1' +
|
||||
' (segment 0 "\\00\\01\\02\\03\\04\\05\\06\\07\\08\\09\\0a\\0b\\0c\\0d\\0e\\0f")' +
|
||||
' (segment 16 "\\f0\\f1\\f2\\f3\\f4\\f5\\f6\\f7\\f8\\f9\\fa\\fb\\fc\\fd\\fe\\ff")' +
|
||||
' )' +
|
||||
' (func (param i32) (result ' + type + ')' +
|
||||
' (' + type + '.load' + ext +
|
||||
' offset=' + offset +
|
||||
' ' + (align != 0 ? 'align=' + align : '') +
|
||||
' (get_local 0)' +
|
||||
' )' +
|
||||
' ) (export "" 0))'
|
||||
)(base), expect);
|
||||
assertEq(loadModule(type, ext, offset, align)(base), expect);
|
||||
}
|
||||
|
||||
function testLoadOOB(type, ext, base, offset, align) {
|
||||
assertErrorMessage(() => loadModule(type, ext, offset, align)(base), Error, /invalid or out-of-range index/);
|
||||
}
|
||||
|
||||
function testStore(type, ext, base, offset, align, value) {
|
||||
assertEq(wasmEvalText(
|
||||
'(module' +
|
||||
' (memory 1' +
|
||||
' (segment 0 "\\00\\01\\02\\03\\04\\05\\06\\07\\08\\09\\0a\\0b\\0c\\0d\\0e\\0f")' +
|
||||
' (segment 16 "\\f0\\f1\\f2\\f3\\f4\\f5\\f6\\f7\\f8\\f9\\fa\\fb\\fc\\fd\\fe\\ff")' +
|
||||
' )' +
|
||||
' (func (param i32) (param ' + type + ') (result ' + type + ')' +
|
||||
' (' + type + '.store' + ext +
|
||||
' offset=' + offset +
|
||||
' ' + (align != 0 ? 'align=' + align : '') +
|
||||
' (get_local 0)' +
|
||||
' (get_local 1)' +
|
||||
' )' +
|
||||
' ) (export "" 0))'
|
||||
)(base, value), value);
|
||||
let module = storeModule(type, ext, offset, align);
|
||||
assertEq(module.store(base, value), value);
|
||||
assertEq(module.load(base), value);
|
||||
}
|
||||
|
||||
function testLoadError(type, ext, base, offset, align, errorMsg) {
|
||||
assertErrorMessage(() => wasmEvalText(
|
||||
'(module' +
|
||||
' (memory 1' +
|
||||
' (segment 0 "\\00\\01\\02\\03\\04\\05\\06\\07\\08\\09\\0a\\0b\\0c\\0d\\0e\\0f")' +
|
||||
' (segment 16 "\\f0\\f1\\f2\\f3\\f4\\f5\\f6\\f7\\f8\\f9\\fa\\fb\\fc\\fd\\fe\\ff")' +
|
||||
' )' +
|
||||
' (func (param i32) (result ' + type + ')' +
|
||||
' (' + type + '.load' + ext +
|
||||
' offset=' + offset +
|
||||
' ' + (align != 0 ? 'align=' + align : '') +
|
||||
' (get_local 0)' +
|
||||
' )' +
|
||||
' ) (export "" 0))'
|
||||
), Error, errorMsg);
|
||||
}
|
||||
|
||||
function testStoreError(type, ext, base, offset, align, errorMsg) {
|
||||
assertErrorMessage(() => wasmEvalText(
|
||||
'(module' +
|
||||
' (memory 1' +
|
||||
' (segment 0 "\\00\\01\\02\\03\\04\\05\\06\\07\\08\\09\\0a\\0b\\0c\\0d\\0e\\0f")' +
|
||||
' (segment 16 "\\f0\\f1\\f2\\f3\\f4\\f5\\f6\\f7\\f8\\f9\\fa\\fb\\fc\\fd\\fe\\ff")' +
|
||||
' )' +
|
||||
' (func (param i32) (param ' + type + ') (result ' + type + ')' +
|
||||
' (' + type + '.store' + ext +
|
||||
' offset=' + offset +
|
||||
' ' + (align != 0 ? 'align=' + align : '') +
|
||||
' (get_local 0)' +
|
||||
' (get_local 1)' +
|
||||
' )' +
|
||||
' ) (export "" 0))'
|
||||
), Error, errorMsg);
|
||||
function testStoreOOB(type, ext, base, offset, align, value) {
|
||||
assertErrorMessage(() => storeModule(type, ext, offset, align).store(base, value), Error, /invalid or out-of-range index/);
|
||||
}
|
||||
|
||||
testLoad('i32', '', 0, 0, 0, 0x03020100);
|
||||
@ -143,21 +134,70 @@ testStore('f64', '', 1, 7, 4, 0.89012345);
|
||||
testStore('i32', '8', 0, 0, 0, 0x23);
|
||||
testStore('i32', '16', 0, 0, 0, 0x2345);
|
||||
|
||||
testLoadError('i32', '8_s', 0, 0, 2, /greater than natural alignment/);
|
||||
testLoadError('i32', '8_u', 0, 0, 2, /greater than natural alignment/);
|
||||
testLoadError('i32', '16_s', 0, 0, 4, /greater than natural alignment/);
|
||||
testLoadError('i32', '16_u', 0, 0, 4, /greater than natural alignment/);
|
||||
testLoadError('i32', '', 0, 0, 8, /greater than natural alignment/);
|
||||
testLoadError('f32', '', 0, 0, 8, /greater than natural alignment/);
|
||||
testLoadError('f64', '', 0, 0, 16, /greater than natural alignment/);
|
||||
testStoreError('i32', '8', 0, 0, 2, /greater than natural alignment/);
|
||||
testStoreError('i32', '16', 0, 0, 4, /greater than natural alignment/);
|
||||
testStoreError('i32', '', 0, 0, 8, /greater than natural alignment/);
|
||||
testStoreError('f32', '', 0, 0, 8, /greater than natural alignment/);
|
||||
testStoreError('f64', '', 0, 0, 16, /greater than natural alignment/);
|
||||
|
||||
assertErrorMessage(() => wasmEvalText('(module (memory 2 1))'), TypeError, /maximum memory size less than initial memory size/);
|
||||
|
||||
// Test bounds checks and edge cases.
|
||||
const align = 0;
|
||||
for (let offset of [0, 1, 2, 3, 4, 8, 16, 41, 0xfff8]) {
|
||||
// Accesses of 1 byte.
|
||||
let lastValidIndex = 0x10000 - 1 - offset;
|
||||
|
||||
testLoad('i32', '8_s', lastValidIndex, offset, align, 0);
|
||||
testLoadOOB('i32', '8_s', lastValidIndex + 1, offset, align);
|
||||
|
||||
testLoad('i32', '8_u', lastValidIndex, offset, align, 0);
|
||||
testLoadOOB('i32', '8_u', lastValidIndex + 1, offset, align);
|
||||
|
||||
testStore('i32', '8', lastValidIndex, offset, align, -42);
|
||||
testStoreOOB('i32', '8', lastValidIndex + 1, offset, align, -42);
|
||||
|
||||
// Accesses of 2 bytes.
|
||||
lastValidIndex = 0x10000 - 2 - offset;
|
||||
|
||||
testLoad('i32', '16_s', lastValidIndex, offset, align, 0);
|
||||
testLoadOOB('i32', '16_s', lastValidIndex + 1, offset, align);
|
||||
|
||||
testLoad('i32', '16_u', lastValidIndex, offset, align, 0);
|
||||
testLoadOOB('i32', '16_u', lastValidIndex + 1, offset, align);
|
||||
|
||||
testStore('i32', '16', lastValidIndex, offset, align, -32768);
|
||||
testStoreOOB('i32', '16', lastValidIndex + 1, offset, align, -32768);
|
||||
|
||||
// Accesses of 4 bytes.
|
||||
lastValidIndex = 0x10000 - 4 - offset;
|
||||
|
||||
testLoad('i32', '', lastValidIndex, offset, align, 0);
|
||||
testLoadOOB('i32', '', lastValidIndex + 1, offset, align);
|
||||
|
||||
testLoad('f32', '', lastValidIndex, offset, align, 0);
|
||||
testLoadOOB('f32', '', lastValidIndex + 1, offset, align);
|
||||
|
||||
testStore('i32', '', lastValidIndex, offset, align, 1337);
|
||||
testStoreOOB('i32', '', lastValidIndex + 1, offset, align, 1337);
|
||||
|
||||
testStore('f32', '', lastValidIndex, offset, align, Math.fround(13.37));
|
||||
testStoreOOB('f32', '', lastValidIndex + 1, offset, align, Math.fround(13.37));
|
||||
|
||||
// Accesses of 8 bytes.
|
||||
lastValidIndex = 0x10000 - 8 - offset;
|
||||
|
||||
testLoad('f64', '', lastValidIndex, offset, align, 0);
|
||||
testLoadOOB('f64', '', lastValidIndex + 1, offset, align);
|
||||
|
||||
testStore('f64', '', lastValidIndex, offset, align, 1.23456789);
|
||||
testStoreOOB('f64', '', lastValidIndex + 1, offset, align, 1.23456789);
|
||||
}
|
||||
|
||||
// Ensure wrapping doesn't apply.
|
||||
offset = 0x7fffffff; // maximum allowed offset that doesn't always throw.
|
||||
for (let index of [0, 1, 2, 3, 0x7fffffff, 0x80000000, 0x80000001]) {
|
||||
testLoadOOB('i32', '8_s', index, offset, align);
|
||||
testLoadOOB('i32', '16_s', index, offset, align);
|
||||
testLoadOOB('i32', '', index, offset, align);
|
||||
testLoadOOB('f32', '', index, offset, align);
|
||||
testLoadOOB('f64', '', index, offset, align);
|
||||
}
|
||||
|
||||
assertErrorMessage(() => wasmEvalText('(module (memory 1) (func (f64.store offset=0 (i32.const 0) (i32.const 0))))'), TypeError, mismatchError("i32", "f64"));
|
||||
assertErrorMessage(() => wasmEvalText('(module (memory 1) (func (f64.store offset=0 (i32.const 0) (f32.const 0))))'), TypeError, mismatchError("f32", "f64"));
|
||||
|
||||
@ -170,3 +210,44 @@ assertErrorMessage(() => wasmEvalText('(module (memory 1) (func (i32.store offse
|
||||
wasmEvalText('(module (memory 0 65535))')
|
||||
assertErrorMessage(() => wasmEvalText('(module (memory 0 65536))'), TypeError, /maximum memory size too big/);
|
||||
|
||||
// Test high charge of registers
|
||||
function testRegisters() {
|
||||
assertEq(wasmEvalText(
|
||||
`(module
|
||||
(memory 1
|
||||
(segment 0 "\\00\\01\\02\\03\\04\\05\\06\\07\\08\\09\\0a\\0b\\0c\\0d\\0e\\0f")
|
||||
(segment 16 "\\f0\\f1\\f2\\f3\\f4\\f5\\f6\\f7\\f8\\f9\\fa\\fb\\fc\\fd\\fe\\ff")
|
||||
)
|
||||
(func (param i32) (local i32 i32 i32 i32 f32 f64) (result i32)
|
||||
(set_local 1 (i32.load8_s offset=4 (get_local 0)))
|
||||
(set_local 2 (i32.load16_s (get_local 1)))
|
||||
(i32.store8 offset=4 (get_local 0) (get_local 1))
|
||||
(set_local 3 (i32.load16_u (get_local 2)))
|
||||
(i32.store16 (get_local 1) (get_local 2))
|
||||
(set_local 4 (i32.load (get_local 2)))
|
||||
(i32.store (get_local 1) (get_local 2))
|
||||
(set_local 5 (f32.load (get_local 4)))
|
||||
(f32.store (get_local 4) (get_local 5))
|
||||
(set_local 6 (f64.load (get_local 4)))
|
||||
(f64.store (get_local 4) (get_local 6))
|
||||
(i32.add
|
||||
(i32.add
|
||||
(get_local 0)
|
||||
(get_local 1)
|
||||
)
|
||||
(i32.add
|
||||
(i32.add
|
||||
(get_local 2)
|
||||
(get_local 3)
|
||||
)
|
||||
(i32.add
|
||||
(get_local 4)
|
||||
(i32.reinterpret/f32 (get_local 5))
|
||||
)
|
||||
)
|
||||
)
|
||||
) (export "" 0))`
|
||||
)(1), 50464523);
|
||||
}
|
||||
|
||||
testRegisters();
|
||||
|
@ -1,4 +1,2 @@
|
||||
// |jit-test| test-also-wasm-baseline
|
||||
// TODO: wrapping offsets
|
||||
quit();
|
||||
var importedArgs = ['address.wast']; load(scriptdir + '../spec.js');
|
||||
|
@ -1,4 +1,4 @@
|
||||
// |jit-test| test-also-wasm-baseline
|
||||
// TODO unaligned memory accesses
|
||||
// TODO i64 loads
|
||||
quit();
|
||||
var importedArgs = ['memory.wast']; load(scriptdir + '../spec.js');
|
||||
|
@ -1,4 +1,4 @@
|
||||
// |jit-test| test-also-wasm-baseline
|
||||
// TODO trap on OOB
|
||||
// TODO dce'd effectful instructions
|
||||
quit();
|
||||
var importedArgs = ['traps.wast']; load(scriptdir + '../spec.js');
|
||||
|
@ -142,6 +142,8 @@ GetObject(const MDefinition* ins)
|
||||
case MDefinition::Op_AtomicTypedArrayElementBinop:
|
||||
case MDefinition::Op_AsmJSLoadHeap:
|
||||
case MDefinition::Op_AsmJSStoreHeap:
|
||||
case MDefinition::Op_WasmLoad:
|
||||
case MDefinition::Op_WasmStore:
|
||||
case MDefinition::Op_AsmJSCompareExchangeHeap:
|
||||
case MDefinition::Op_AsmJSAtomicBinopHeap:
|
||||
case MDefinition::Op_AsmJSLoadGlobalVar:
|
||||
|
@ -12959,10 +12959,11 @@ class MWasmMemoryAccess
|
||||
MemoryBarrierBits barrierAfter_;
|
||||
|
||||
public:
|
||||
explicit MWasmMemoryAccess(Scalar::Type accessType, uint32_t align, unsigned numSimdElems = 0,
|
||||
explicit MWasmMemoryAccess(Scalar::Type accessType, uint32_t align, uint32_t offset,
|
||||
unsigned numSimdElems = 0,
|
||||
MemoryBarrierBits barrierBefore = MembarNobits,
|
||||
MemoryBarrierBits barrierAfter = MembarNobits)
|
||||
: offset_(0),
|
||||
: offset_(offset),
|
||||
align_(align),
|
||||
accessType_(accessType),
|
||||
needsBoundsCheck_(true),
|
||||
@ -12984,13 +12985,94 @@ class MWasmMemoryAccess
|
||||
: TypedArrayElemSize(accessType());
|
||||
}
|
||||
bool needsBoundsCheck() const { return needsBoundsCheck_; }
|
||||
void removeBoundsCheck() { needsBoundsCheck_ = false; }
|
||||
unsigned numSimdElems() const { MOZ_ASSERT(Scalar::isSimdType(accessType_)); return numSimdElems_; }
|
||||
void setOffset(uint32_t o) { offset_ = o; }
|
||||
void setAlign(uint32_t a) { MOZ_ASSERT(mozilla::IsPowerOfTwo(a)); align_ = a; }
|
||||
MemoryBarrierBits barrierBefore() const { return barrierBefore_; }
|
||||
MemoryBarrierBits barrierAfter() const { return barrierAfter_; }
|
||||
bool isAtomicAccess() const { return (barrierBefore_|barrierAfter_) != MembarNobits; }
|
||||
|
||||
void removeBoundsCheck() { needsBoundsCheck_ = false; }
|
||||
void setOffset(uint32_t o) { offset_ = o; }
|
||||
};
|
||||
|
||||
class MWasmBoundsCheck
|
||||
: public MUnaryInstruction,
|
||||
public MWasmMemoryAccess,
|
||||
public NoTypePolicy::Data
|
||||
{
|
||||
explicit MWasmBoundsCheck(MDefinition* index, const MWasmMemoryAccess& access)
|
||||
: MUnaryInstruction(index),
|
||||
MWasmMemoryAccess(access)
|
||||
{
|
||||
setMovable();
|
||||
setGuard(); // Effectful: throws for OOB.
|
||||
}
|
||||
|
||||
public:
|
||||
INSTRUCTION_HEADER(WasmBoundsCheck)
|
||||
TRIVIAL_NEW_WRAPPERS
|
||||
|
||||
bool congruentTo(const MDefinition* ins) const override {
|
||||
if (!congruentIfOperandsEqual(ins))
|
||||
return false;
|
||||
const MWasmBoundsCheck* other = ins->toWasmBoundsCheck();
|
||||
return accessType() == other->accessType() &&
|
||||
offset() == other->offset() &&
|
||||
align() == other->align();
|
||||
}
|
||||
|
||||
AliasSet getAliasSet() const override {
|
||||
return AliasSet::None();
|
||||
}
|
||||
};
|
||||
|
||||
class MWasmLoad
|
||||
: public MUnaryInstruction,
|
||||
public MWasmMemoryAccess,
|
||||
public NoTypePolicy::Data
|
||||
{
|
||||
MWasmLoad(MDefinition* base, const MWasmMemoryAccess& access)
|
||||
: MUnaryInstruction(base),
|
||||
MWasmMemoryAccess(access)
|
||||
{
|
||||
setGuard();
|
||||
MOZ_ASSERT(access.accessType() != Scalar::Uint8Clamped, "unexpected load heap in wasm");
|
||||
setResultType(ScalarTypeToMIRType(access.accessType()));
|
||||
}
|
||||
|
||||
public:
|
||||
INSTRUCTION_HEADER(WasmLoad)
|
||||
TRIVIAL_NEW_WRAPPERS
|
||||
NAMED_OPERANDS((0, base))
|
||||
|
||||
AliasSet getAliasSet() const override {
|
||||
// When a barrier is needed, make the instruction effectful by giving
|
||||
// it a "store" effect.
|
||||
if (isAtomicAccess())
|
||||
return AliasSet::Store(AliasSet::AsmJSHeap);
|
||||
return AliasSet::Load(AliasSet::AsmJSHeap);
|
||||
}
|
||||
};
|
||||
|
||||
class MWasmStore
|
||||
: public MBinaryInstruction,
|
||||
public MWasmMemoryAccess,
|
||||
public NoTypePolicy::Data
|
||||
{
|
||||
MWasmStore(MDefinition* base, const MWasmMemoryAccess& access, MDefinition* value)
|
||||
: MBinaryInstruction(base, value),
|
||||
MWasmMemoryAccess(access)
|
||||
{
|
||||
setGuard();
|
||||
}
|
||||
|
||||
public:
|
||||
INSTRUCTION_HEADER(WasmStore)
|
||||
TRIVIAL_NEW_WRAPPERS
|
||||
NAMED_OPERANDS((0, base), (1, value))
|
||||
|
||||
AliasSet getAliasSet() const override {
|
||||
return AliasSet::Store(AliasSet::AsmJSHeap);
|
||||
}
|
||||
};
|
||||
|
||||
class MAsmJSLoadHeap
|
||||
|
@ -224,7 +224,6 @@ class MIRGenerator
|
||||
|
||||
bool needsBoundsCheckBranch(const MWasmMemoryAccess* access) const;
|
||||
size_t foldableOffsetRange(const MWasmMemoryAccess* access) const;
|
||||
size_t foldableOffsetRange(bool accessNeedsBoundsCheck, bool atomic) const;
|
||||
|
||||
private:
|
||||
GraphSpewer gs_;
|
||||
|
@ -125,12 +125,6 @@ MIRGenerator::needsBoundsCheckBranch(const MWasmMemoryAccess* access) const
|
||||
|
||||
size_t
|
||||
MIRGenerator::foldableOffsetRange(const MWasmMemoryAccess* access) const
|
||||
{
|
||||
return foldableOffsetRange(access->needsBoundsCheck(), access->isAtomicAccess());
|
||||
}
|
||||
|
||||
size_t
|
||||
MIRGenerator::foldableOffsetRange(bool accessNeedsBoundsCheck, bool atomic) const
|
||||
{
|
||||
// This determines whether it's ok to fold up to WasmImmediateRange
|
||||
// offsets, instead of just WasmCheckedImmediateRange.
|
||||
@ -148,14 +142,14 @@ MIRGenerator::foldableOffsetRange(bool accessNeedsBoundsCheck, bool atomic) cons
|
||||
|
||||
// Signal-handling can be dynamically disabled by OS bugs or flags.
|
||||
// Bug 1254935: Atomic accesses can't be handled with signal handlers yet.
|
||||
if (usesSignalHandlersForAsmJSOOB_ && !atomic)
|
||||
if (usesSignalHandlersForAsmJSOOB_ && !access->isAtomicAccess())
|
||||
return WasmImmediateRange;
|
||||
#endif
|
||||
|
||||
// On 32-bit platforms, if we've proven the access is in bounds after
|
||||
// 32-bit wrapping, we can fold full offsets because they're added with
|
||||
// 32-bit arithmetic.
|
||||
if (sizeof(intptr_t) == sizeof(int32_t) && !accessNeedsBoundsCheck)
|
||||
if (sizeof(intptr_t) == sizeof(int32_t) && !access->needsBoundsCheck())
|
||||
return WasmImmediateRange;
|
||||
|
||||
// Otherwise, only allow the checked size. This is always less than the
|
||||
|
@ -268,6 +268,9 @@ namespace jit {
|
||||
_(IsObject) \
|
||||
_(HasClass) \
|
||||
_(CopySign) \
|
||||
_(WasmBoundsCheck) \
|
||||
_(WasmLoad) \
|
||||
_(WasmStore) \
|
||||
_(WasmTruncateToInt32) \
|
||||
_(AsmJSNeg) \
|
||||
_(AsmJSUnsignedToDouble) \
|
||||
|
@ -1206,6 +1206,7 @@ class Assembler : public AssemblerShared
|
||||
LessThan = LT,
|
||||
LessThanOrEqual = LE,
|
||||
Overflow = VS,
|
||||
CarrySet = CS,
|
||||
Signed = MI,
|
||||
NotSigned = PL,
|
||||
Zero = EQ,
|
||||
|
@ -2188,6 +2188,119 @@ CodeGeneratorARM::visitAsmJSLoadHeap(LAsmJSLoadHeap* ins)
|
||||
memoryBarrier(mir->barrierAfter());
|
||||
}
|
||||
|
||||
void
|
||||
CodeGeneratorARM::visitWasmBoundsCheck(LWasmBoundsCheck* ins)
|
||||
{
|
||||
MWasmBoundsCheck* mir = ins->mir();
|
||||
|
||||
uint32_t offset = mir->offset();
|
||||
if (offset > INT32_MAX) {
|
||||
masm.as_b(wasm::JumpTarget::OutOfBounds);
|
||||
return;
|
||||
}
|
||||
|
||||
// No guarantee that heapBase + endOffset can be properly encoded in
|
||||
// the cmp immediate in ma_BoundsCheck, so use an explicit add instead.
|
||||
uint32_t endOffset = mir->endOffset();
|
||||
|
||||
Register ptr = ToRegister(ins->ptr());
|
||||
|
||||
ScratchRegisterScope ptrPlusOffset(masm);
|
||||
masm.move32(Imm32(endOffset), ptrPlusOffset);
|
||||
masm.ma_add(ptr, ptrPlusOffset, SetCC);
|
||||
|
||||
// Detect unsigned overflow by checking the carry bit.
|
||||
masm.as_b(wasm::JumpTarget::OutOfBounds, Assembler::CarrySet);
|
||||
|
||||
uint32_t cmpOffset = masm.ma_BoundsCheck(ptrPlusOffset).getOffset();
|
||||
masm.append(wasm::BoundsCheck(cmpOffset));
|
||||
masm.as_b(wasm::JumpTarget::OutOfBounds, Assembler::Above);
|
||||
}
|
||||
|
||||
void
|
||||
CodeGeneratorARM::visitWasmLoad(LWasmLoad* lir)
|
||||
{
|
||||
const MWasmLoad* mir = lir->mir();
|
||||
|
||||
MOZ_ASSERT(!mir->barrierBefore() && !mir->barrierAfter(), "atomics NYI");
|
||||
|
||||
uint32_t offset = mir->offset();
|
||||
if (offset > INT32_MAX) {
|
||||
// This is unreachable because of bounds checks.
|
||||
masm.breakpoint();
|
||||
return;
|
||||
}
|
||||
|
||||
Register ptr = ToRegister(lir->ptr());
|
||||
AnyRegister output = ToAnyRegister(lir->output());
|
||||
|
||||
// Maybe add the offset.
|
||||
if (offset) {
|
||||
Register ptrPlusOffset = ToRegister(lir->ptrCopy());
|
||||
masm.ma_add(Imm32(offset), ptrPlusOffset);
|
||||
ptr = ptrPlusOffset;
|
||||
} else {
|
||||
MOZ_ASSERT(lir->ptrCopy()->isBogusTemp());
|
||||
}
|
||||
|
||||
Scalar::Type type = mir->accessType();
|
||||
bool isSigned = type == Scalar::Int8 || type == Scalar::Int16 || type == Scalar::Int32;
|
||||
bool isFloat = output.isFloat();
|
||||
|
||||
unsigned byteSize = mir->byteSize();
|
||||
|
||||
if (isFloat) {
|
||||
MOZ_ASSERT((byteSize == 4) == output.fpu().isSingle());
|
||||
ScratchRegisterScope scratch(masm);
|
||||
masm.ma_add(HeapReg, ptr, scratch);
|
||||
masm.ma_vldr(Address(scratch, 0), output.fpu());
|
||||
} else {
|
||||
masm.ma_dataTransferN(IsLoad, byteSize * 8, isSigned, HeapReg, ptr, output.gpr());
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
CodeGeneratorARM::visitWasmStore(LWasmStore* lir)
|
||||
{
|
||||
const MWasmStore* mir = lir->mir();
|
||||
|
||||
MOZ_ASSERT(!mir->barrierBefore() && !mir->barrierAfter(), "atomics NYI");
|
||||
|
||||
uint32_t offset = mir->offset();
|
||||
if (offset > INT32_MAX) {
|
||||
// This is unreachable because of bounds checks.
|
||||
masm.breakpoint();
|
||||
return;
|
||||
}
|
||||
|
||||
Register ptr = ToRegister(lir->ptr());
|
||||
|
||||
// Maybe add the offset.
|
||||
if (offset) {
|
||||
Register ptrPlusOffset = ToRegister(lir->ptrCopy());
|
||||
masm.ma_add(Imm32(offset), ptrPlusOffset);
|
||||
ptr = ptrPlusOffset;
|
||||
} else {
|
||||
MOZ_ASSERT(lir->ptrCopy()->isBogusTemp());
|
||||
}
|
||||
|
||||
AnyRegister value = ToAnyRegister(lir->value());
|
||||
unsigned byteSize = mir->byteSize();
|
||||
Scalar::Type type = mir->accessType();
|
||||
|
||||
if (value.isFloat()) {
|
||||
FloatRegister val = value.fpu();
|
||||
MOZ_ASSERT((byteSize == 4) == val.isSingle());
|
||||
ScratchRegisterScope scratch(masm);
|
||||
masm.ma_add(HeapReg, ptr, scratch);
|
||||
masm.ma_vstr(val, Address(scratch, 0));
|
||||
} else {
|
||||
bool isSigned = type == Scalar::Uint32 || type == Scalar::Int32; // see AsmJSStoreHeap;
|
||||
Register val = value.gpr();
|
||||
masm.ma_dataTransferN(IsStore, 8 * byteSize /* bits */, isSigned, HeapReg, ptr, val);
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
CodeGeneratorARM::visitAsmJSStoreHeap(LAsmJSStoreHeap* ins)
|
||||
{
|
||||
|
@ -203,6 +203,9 @@ class CodeGeneratorARM : public CodeGeneratorShared
|
||||
void visitAsmSelect(LAsmSelect* ins);
|
||||
void visitAsmReinterpret(LAsmReinterpret* ins);
|
||||
void visitAsmJSCall(LAsmJSCall* ins);
|
||||
void visitWasmBoundsCheck(LWasmBoundsCheck* ins);
|
||||
void visitWasmLoad(LWasmLoad* ins);
|
||||
void visitWasmStore(LWasmStore* ins);
|
||||
void visitAsmJSLoadHeap(LAsmJSLoadHeap* ins);
|
||||
void visitAsmJSStoreHeap(LAsmJSStoreHeap* ins);
|
||||
void visitAsmJSCompareExchangeHeap(LAsmJSCompareExchangeHeap* ins);
|
||||
|
@ -497,6 +497,48 @@ LIRGeneratorARM::visitAsmJSUnsignedToFloat32(MAsmJSUnsignedToFloat32* ins)
|
||||
define(lir, ins);
|
||||
}
|
||||
|
||||
void
|
||||
LIRGeneratorARM::visitWasmBoundsCheck(MWasmBoundsCheck* ins)
|
||||
{
|
||||
MDefinition* input = ins->input();
|
||||
MOZ_ASSERT(input->type() == MIRType::Int32);
|
||||
|
||||
LAllocation baseAlloc = useRegisterAtStart(input);
|
||||
auto* lir = new(alloc()) LWasmBoundsCheck(baseAlloc);
|
||||
add(lir, ins);
|
||||
}
|
||||
|
||||
void
|
||||
LIRGeneratorARM::visitWasmLoad(MWasmLoad* ins)
|
||||
{
|
||||
MDefinition* base = ins->base();
|
||||
MOZ_ASSERT(base->type() == MIRType::Int32);
|
||||
|
||||
LAllocation baseAlloc = useRegisterAtStart(base);
|
||||
auto* lir = new(alloc()) LWasmLoad(baseAlloc);
|
||||
|
||||
if (ins->offset())
|
||||
lir->setTemp(0, tempCopy(base, 0));
|
||||
|
||||
define(lir, ins);
|
||||
}
|
||||
|
||||
void
|
||||
LIRGeneratorARM::visitWasmStore(MWasmStore* ins)
|
||||
{
|
||||
MDefinition* base = ins->base();
|
||||
MOZ_ASSERT(base->type() == MIRType::Int32);
|
||||
|
||||
LAllocation baseAlloc = useRegisterAtStart(base);
|
||||
LAllocation valueAlloc = useRegisterAtStart(ins->value());
|
||||
auto* lir = new(alloc()) LWasmStore(baseAlloc, valueAlloc);
|
||||
|
||||
if (ins->offset())
|
||||
lir->setTemp(0, tempCopy(base, 0));
|
||||
|
||||
add(lir, ins);
|
||||
}
|
||||
|
||||
void
|
||||
LIRGeneratorARM::visitAsmJSLoadHeap(MAsmJSLoadHeap* ins)
|
||||
{
|
||||
|
@ -97,6 +97,9 @@ class LIRGeneratorARM : public LIRGeneratorShared
|
||||
void visitAsmSelect(MAsmSelect* ins);
|
||||
void visitAsmJSUnsignedToDouble(MAsmJSUnsignedToDouble* ins);
|
||||
void visitAsmJSUnsignedToFloat32(MAsmJSUnsignedToFloat32* ins);
|
||||
void visitWasmBoundsCheck(MWasmBoundsCheck* ins);
|
||||
void visitWasmLoad(MWasmLoad* ins);
|
||||
void visitWasmStore(MWasmStore* ins);
|
||||
void visitAsmJSLoadHeap(MAsmJSLoadHeap* ins);
|
||||
void visitAsmJSStoreHeap(MAsmJSStoreHeap* ins);
|
||||
void visitAsmJSLoadFuncPtr(MAsmJSLoadFuncPtr* ins);
|
||||
|
@ -324,6 +324,24 @@ LIRGeneratorARM64::visitWasmTruncateToInt64(MWasmTruncateToInt64* ins)
|
||||
MOZ_CRASH("NY");
|
||||
}
|
||||
|
||||
void
|
||||
LIRGeneratorARM64::visitWasmBoundsCheck(MWasmBoundsCheck* ins)
|
||||
{
|
||||
MOZ_CRASH("NY");
|
||||
}
|
||||
|
||||
void
|
||||
LIRGeneratorARM64::visitWasmLoad(MWasmLoad* ins)
|
||||
{
|
||||
MOZ_CRASH("NY");
|
||||
}
|
||||
|
||||
void
|
||||
LIRGeneratorARM64::visitWasmStore(MWasmStore* ins)
|
||||
{
|
||||
MOZ_CRASH("NY");
|
||||
}
|
||||
|
||||
void
|
||||
LIRGeneratorARM64::visitInt64ToFloatingPoint(MInt64ToFloatingPoint* ins)
|
||||
{
|
||||
|
@ -113,6 +113,9 @@ class LIRGeneratorARM64 : public LIRGeneratorShared
|
||||
void visitSubstr(MSubstr* ins);
|
||||
void visitRandom(MRandom* ins);
|
||||
void visitWasmTruncateToInt64(MWasmTruncateToInt64* ins);
|
||||
void visitWasmBoundsCheck(MWasmBoundsCheck* ins);
|
||||
void visitWasmLoad(MWasmLoad* ins);
|
||||
void visitWasmStore(MWasmStore* ins);
|
||||
void visitInt64ToFloatingPoint(MInt64ToFloatingPoint* ins);
|
||||
void visitCopySign(MCopySign* ins);
|
||||
};
|
||||
|
@ -61,7 +61,7 @@ class Token {
|
||||
virtual bool IsUnknown() const { return false; }
|
||||
// Token properties.
|
||||
virtual bool CanAddressMemory() const { return false; }
|
||||
virtual uint8_t* ToAddress(Debugger* debugger) const;
|
||||
virtual uint8_t* ToAddress(Debugger* debugger) const = 0;
|
||||
virtual void Print(FILE* out = stdout) const = 0;
|
||||
|
||||
static Token* Tokenize(const char* arg);
|
||||
@ -77,6 +77,11 @@ template<typename T> class ValueToken : public Token {
|
||||
|
||||
T value() const { return value_; }
|
||||
|
||||
virtual uint8_t* ToAddress(Debugger* debugger) const {
|
||||
USE(debugger);
|
||||
VIXL_ABORT();
|
||||
}
|
||||
|
||||
protected:
|
||||
T value_;
|
||||
};
|
||||
@ -202,6 +207,11 @@ class FormatToken : public Token {
|
||||
virtual void PrintData(void* data, FILE* out = stdout) const = 0;
|
||||
virtual void Print(FILE* out = stdout) const = 0;
|
||||
|
||||
virtual uint8_t* ToAddress(Debugger* debugger) const {
|
||||
USE(debugger);
|
||||
VIXL_ABORT();
|
||||
}
|
||||
|
||||
static Token* Tokenize(const char* arg);
|
||||
static FormatToken* Cast(Token* tok) {
|
||||
VIXL_ASSERT(tok->IsFormat());
|
||||
@ -237,6 +247,10 @@ class UnknownToken : public Token {
|
||||
strncpy(unknown_, arg, size);
|
||||
}
|
||||
virtual ~UnknownToken() { js_free(unknown_); }
|
||||
virtual uint8_t* ToAddress(Debugger* debugger) const {
|
||||
USE(debugger);
|
||||
VIXL_ABORT();
|
||||
}
|
||||
|
||||
virtual bool IsUnknown() const { return true; }
|
||||
virtual void Print(FILE* out = stdout) const;
|
||||
@ -797,13 +811,6 @@ static bool StringToInt64(int64_t* value, const char* line, int base = 10) {
|
||||
}
|
||||
|
||||
|
||||
uint8_t* Token::ToAddress(Debugger* debugger) const {
|
||||
USE(debugger);
|
||||
VIXL_UNREACHABLE();
|
||||
return NULL;
|
||||
}
|
||||
|
||||
|
||||
Token* Token::Tokenize(const char* arg) {
|
||||
if ((arg == NULL) || (*arg == '\0')) {
|
||||
return NULL;
|
||||
|
@ -24,6 +24,8 @@
|
||||
// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
#ifdef JS_SIMULATOR_ARM64
|
||||
|
||||
#ifndef VIXL_A64_DEBUGGER_A64_H_
|
||||
#define VIXL_A64_DEBUGGER_A64_H_
|
||||
|
||||
@ -111,3 +113,5 @@ class Debugger : public Simulator {
|
||||
} // namespace vixl
|
||||
|
||||
#endif // VIXL_A64_DEBUGGER_A64_H_
|
||||
|
||||
#endif // JS_SIMULATOR_ARM64
|
||||
|
@ -37,7 +37,7 @@
|
||||
|
||||
// List macro containing all visitors needed by the decoder class.
|
||||
|
||||
#define VISITOR_LIST(V) \
|
||||
#define VISITOR_LIST_THAT_RETURN(V) \
|
||||
V(PCRelAddressing) \
|
||||
V(AddSubImmediate) \
|
||||
V(LogicalImmediate) \
|
||||
@ -106,8 +106,14 @@
|
||||
V(NEONShiftImmediate) \
|
||||
V(NEONTable) \
|
||||
V(NEONPerm) \
|
||||
V(Unallocated) \
|
||||
V(Unimplemented)
|
||||
|
||||
#define VISITOR_LIST_THAT_DONT_RETURN(V) \
|
||||
V(Unallocated) \
|
||||
V(Unimplemented) \
|
||||
|
||||
#define VISITOR_LIST(V) \
|
||||
VISITOR_LIST_THAT_RETURN(V) \
|
||||
VISITOR_LIST_THAT_DONT_RETURN(V) \
|
||||
|
||||
namespace vixl {
|
||||
|
||||
|
@ -24,6 +24,8 @@
|
||||
// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
#ifdef JS_SIMULATOR_ARM64
|
||||
|
||||
#include <cmath>
|
||||
|
||||
#include "jit/arm64/vixl/Simulator-vixl.h"
|
||||
@ -4872,3 +4874,5 @@ LogicVRegister Simulator::ucvtf(VectorFormat vform,
|
||||
|
||||
|
||||
} // namespace vixl
|
||||
|
||||
#endif // JS_SIMULATOR_ARM64
|
||||
|
@ -26,6 +26,8 @@
|
||||
|
||||
#include "jit/arm64/vixl/MacroAssembler-vixl.h"
|
||||
|
||||
#include <ctype.h>
|
||||
|
||||
namespace vixl {
|
||||
|
||||
MacroAssembler::MacroAssembler()
|
||||
|
@ -33,6 +33,8 @@
|
||||
#include "jit/arm64/Assembler-arm64.h"
|
||||
#include "jit/arm64/vixl/Debugger-vixl.h"
|
||||
#include "jit/arm64/vixl/Globals-vixl.h"
|
||||
#include "jit/arm64/vixl/Instrument-vixl.h"
|
||||
#include "jit/arm64/vixl/Simulator-Constants-vixl.h"
|
||||
|
||||
#define LS_MACRO_LIST(V) \
|
||||
V(Ldrb, Register&, rt, LDRB_w) \
|
||||
|
141
js/src/jit/arm64/vixl/Simulator-Constants-vixl.h
Normal file
141
js/src/jit/arm64/vixl/Simulator-Constants-vixl.h
Normal file
@ -0,0 +1,141 @@
|
||||
// Copyright 2015, ARM Limited
|
||||
// All rights reserved.
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright notice,
|
||||
// this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above copyright notice
|
||||
// this list of conditions and the following disclaimer in the documentation
|
||||
// and/or other materials provided with the distribution.
|
||||
// * Neither the name of ARM Limited nor the names of its contributors may be
|
||||
// used to endorse or promote products derived from this software without
|
||||
// specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND
|
||||
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
||||
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
|
||||
// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
|
||||
// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
|
||||
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
|
||||
// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
#ifndef VIXL_A64_SIMULATOR_CONSTANTS_A64_H_
|
||||
#define VIXL_A64_SIMULATOR_CONSTANTS_A64_H_
|
||||
|
||||
namespace vixl {
|
||||
|
||||
// Debug instructions.
|
||||
//
|
||||
// VIXL's macro-assembler and simulator support a few pseudo instructions to
|
||||
// make debugging easier. These pseudo instructions do not exist on real
|
||||
// hardware.
|
||||
//
|
||||
// TODO: Also consider allowing these pseudo-instructions to be disabled in the
|
||||
// simulator, so that users can check that the input is a valid native code.
|
||||
// (This isn't possible in all cases. Printf won't work, for example.)
|
||||
//
|
||||
// Each debug pseudo instruction is represented by a HLT instruction. The HLT
|
||||
// immediate field is used to identify the type of debug pseudo instruction.
|
||||
|
||||
enum DebugHltOpcodes {
|
||||
kUnreachableOpcode = 0xdeb0,
|
||||
kPrintfOpcode,
|
||||
kTraceOpcode,
|
||||
kLogOpcode,
|
||||
// Aliases.
|
||||
kDebugHltFirstOpcode = kUnreachableOpcode,
|
||||
kDebugHltLastOpcode = kLogOpcode
|
||||
};
|
||||
|
||||
// Each pseudo instruction uses a custom encoding for additional arguments, as
|
||||
// described below.
|
||||
|
||||
// Unreachable - kUnreachableOpcode
|
||||
//
|
||||
// Instruction which should never be executed. This is used as a guard in parts
|
||||
// of the code that should not be reachable, such as in data encoded inline in
|
||||
// the instructions.
|
||||
|
||||
// Printf - kPrintfOpcode
|
||||
// - arg_count: The number of arguments.
|
||||
// - arg_pattern: A set of PrintfArgPattern values, packed into two-bit fields.
|
||||
//
|
||||
// Simulate a call to printf.
|
||||
//
|
||||
// Floating-point and integer arguments are passed in separate sets of registers
|
||||
// in AAPCS64 (even for varargs functions), so it is not possible to determine
|
||||
// the type of each argument without some information about the values that were
|
||||
// passed in. This information could be retrieved from the printf format string,
|
||||
// but the format string is not trivial to parse so we encode the relevant
|
||||
// information with the HLT instruction.
|
||||
//
|
||||
// Also, the following registers are populated (as if for a native A64 call):
|
||||
// x0: The format string
|
||||
// x1-x7: Optional arguments, if type == CPURegister::kRegister
|
||||
// d0-d7: Optional arguments, if type == CPURegister::kFPRegister
|
||||
const unsigned kPrintfArgCountOffset = 1 * kInstructionSize;
|
||||
const unsigned kPrintfArgPatternListOffset = 2 * kInstructionSize;
|
||||
const unsigned kPrintfLength = 3 * kInstructionSize;
|
||||
|
||||
const unsigned kPrintfMaxArgCount = 4;
|
||||
|
||||
// The argument pattern is a set of two-bit-fields, each with one of the
|
||||
// following values:
|
||||
enum PrintfArgPattern {
|
||||
kPrintfArgW = 1,
|
||||
kPrintfArgX = 2,
|
||||
// There is no kPrintfArgS because floats are always converted to doubles in C
|
||||
// varargs calls.
|
||||
kPrintfArgD = 3
|
||||
};
|
||||
static const unsigned kPrintfArgPatternBits = 2;
|
||||
|
||||
// Trace - kTraceOpcode
|
||||
// - parameter: TraceParameter stored as a uint32_t
|
||||
// - command: TraceCommand stored as a uint32_t
|
||||
//
|
||||
// Allow for trace management in the generated code. This enables or disables
|
||||
// automatic tracing of the specified information for every simulated
|
||||
// instruction.
|
||||
const unsigned kTraceParamsOffset = 1 * kInstructionSize;
|
||||
const unsigned kTraceCommandOffset = 2 * kInstructionSize;
|
||||
const unsigned kTraceLength = 3 * kInstructionSize;
|
||||
|
||||
// Trace parameters.
|
||||
enum TraceParameters {
|
||||
LOG_DISASM = 1 << 0, // Log disassembly.
|
||||
LOG_REGS = 1 << 1, // Log general purpose registers.
|
||||
LOG_VREGS = 1 << 2, // Log NEON and floating-point registers.
|
||||
LOG_SYSREGS = 1 << 3, // Log the flags and system registers.
|
||||
LOG_WRITE = 1 << 4, // Log writes to memory.
|
||||
|
||||
LOG_NONE = 0,
|
||||
LOG_STATE = LOG_REGS | LOG_VREGS | LOG_SYSREGS,
|
||||
LOG_ALL = LOG_DISASM | LOG_STATE | LOG_WRITE
|
||||
};
|
||||
|
||||
// Trace commands.
|
||||
enum TraceCommand {
|
||||
TRACE_ENABLE = 1,
|
||||
TRACE_DISABLE = 2
|
||||
};
|
||||
|
||||
// Log - kLogOpcode
|
||||
// - parameter: TraceParameter stored as a uint32_t
|
||||
//
|
||||
// Print the specified information once. This mechanism is separate from Trace.
|
||||
// In particular, _all_ of the specified registers are printed, rather than just
|
||||
// the registers that the instruction writes.
|
||||
//
|
||||
// Any combination of the TraceParameters values can be used, except that
|
||||
// LOG_DISASM is not supported for Log.
|
||||
const unsigned kLogParamsOffset = 1 * kInstructionSize;
|
||||
const unsigned kLogLength = 2 * kInstructionSize;
|
||||
} // namespace vixl
|
||||
|
||||
#endif // VIXL_A64_SIMULATOR_CONSTANTS_A64_H_
|
@ -213,56 +213,39 @@ void Simulator::set_instruction_stats(bool value) {
|
||||
}
|
||||
|
||||
// Helpers ---------------------------------------------------------------------
|
||||
int64_t Simulator::AddWithCarry(unsigned reg_size,
|
||||
bool set_flags,
|
||||
int64_t src1,
|
||||
int64_t src2,
|
||||
int64_t carry_in) {
|
||||
uint64_t Simulator::AddWithCarry(unsigned reg_size,
|
||||
bool set_flags,
|
||||
uint64_t left,
|
||||
uint64_t right,
|
||||
int carry_in) {
|
||||
VIXL_ASSERT((carry_in == 0) || (carry_in == 1));
|
||||
VIXL_ASSERT((reg_size == kXRegSize) || (reg_size == kWRegSize));
|
||||
|
||||
uint64_t u1, u2;
|
||||
int64_t result;
|
||||
int64_t signed_sum = src1 + src2 + carry_in;
|
||||
uint64_t max_uint = (reg_size == kWRegSize) ? kWMaxUInt : kXMaxUInt;
|
||||
uint64_t reg_mask = (reg_size == kWRegSize) ? kWRegMask : kXRegMask;
|
||||
uint64_t sign_mask = (reg_size == kWRegSize) ? kWSignMask : kXSignMask;
|
||||
|
||||
uint32_t N, Z, C, V;
|
||||
|
||||
if (reg_size == kWRegSize) {
|
||||
u1 = static_cast<uint64_t>(src1) & kWRegMask;
|
||||
u2 = static_cast<uint64_t>(src2) & kWRegMask;
|
||||
|
||||
result = signed_sum & kWRegMask;
|
||||
// Compute the C flag by comparing the sum to the max unsigned integer.
|
||||
C = ((kWMaxUInt - u1) < (u2 + carry_in)) ||
|
||||
((kWMaxUInt - u1 - carry_in) < u2);
|
||||
// Overflow iff the sign bit is the same for the two inputs and different
|
||||
// for the result.
|
||||
int64_t s_src1 = src1 << (kXRegSize - kWRegSize);
|
||||
int64_t s_src2 = src2 << (kXRegSize - kWRegSize);
|
||||
int64_t s_result = result << (kXRegSize - kWRegSize);
|
||||
V = ((s_src1 ^ s_src2) >= 0) && ((s_src1 ^ s_result) < 0);
|
||||
|
||||
} else {
|
||||
u1 = static_cast<uint64_t>(src1);
|
||||
u2 = static_cast<uint64_t>(src2);
|
||||
|
||||
result = signed_sum;
|
||||
// Compute the C flag by comparing the sum to the max unsigned integer.
|
||||
C = ((kXMaxUInt - u1) < (u2 + carry_in)) ||
|
||||
((kXMaxUInt - u1 - carry_in) < u2);
|
||||
// Overflow iff the sign bit is the same for the two inputs and different
|
||||
// for the result.
|
||||
V = ((src1 ^ src2) >= 0) && ((src1 ^ result) < 0);
|
||||
}
|
||||
|
||||
N = CalcNFlag(result, reg_size);
|
||||
Z = CalcZFlag(result);
|
||||
left &= reg_mask;
|
||||
right &= reg_mask;
|
||||
uint64_t result = (left + right + carry_in) & reg_mask;
|
||||
|
||||
if (set_flags) {
|
||||
nzcv().SetN(N);
|
||||
nzcv().SetZ(Z);
|
||||
nzcv().SetC(C);
|
||||
nzcv().SetV(V);
|
||||
nzcv().SetN(CalcNFlag(result, reg_size));
|
||||
nzcv().SetZ(CalcZFlag(result));
|
||||
|
||||
// Compute the C flag by comparing the result to the max unsigned integer.
|
||||
uint64_t max_uint_2op = max_uint - carry_in;
|
||||
bool C = (left > max_uint_2op) || ((max_uint_2op - left) < right);
|
||||
nzcv().SetC(C ? 1 : 0);
|
||||
|
||||
// Overflow iff the sign bit is the same for the two inputs and different
|
||||
// for the result.
|
||||
uint64_t left_sign = left & sign_mask;
|
||||
uint64_t right_sign = right & sign_mask;
|
||||
uint64_t result_sign = result & sign_mask;
|
||||
bool V = (left_sign == right_sign) && (left_sign != result_sign);
|
||||
nzcv().SetV(V ? 1 : 0);
|
||||
|
||||
LogSystemRegister(NZCV);
|
||||
}
|
||||
return result;
|
||||
|
@ -40,6 +40,7 @@
|
||||
#include "jit/arm64/vixl/Globals-vixl.h"
|
||||
#include "jit/arm64/vixl/Instructions-vixl.h"
|
||||
#include "jit/arm64/vixl/Instrument-vixl.h"
|
||||
#include "jit/arm64/vixl/Simulator-Constants-vixl.h"
|
||||
#include "jit/arm64/vixl/Utils-vixl.h"
|
||||
#include "jit/IonTypes.h"
|
||||
#include "threading/Mutex.h"
|
||||
@ -55,120 +56,6 @@
|
||||
|
||||
namespace vixl {
|
||||
|
||||
// Debug instructions.
|
||||
//
|
||||
// VIXL's macro-assembler and simulator support a few pseudo instructions to
|
||||
// make debugging easier. These pseudo instructions do not exist on real
|
||||
// hardware.
|
||||
//
|
||||
// TODO: Provide controls to prevent the macro assembler from emitting
|
||||
// pseudo-instructions. This is important for ahead-of-time compilers, where the
|
||||
// macro assembler is built with USE_SIMULATOR but the code will eventually be
|
||||
// run on real hardware.
|
||||
//
|
||||
// TODO: Also consider allowing these pseudo-instructions to be disabled in the
|
||||
// simulator, so that users can check that the input is a valid native code.
|
||||
// (This isn't possible in all cases. Printf won't work, for example.)
|
||||
//
|
||||
// Each debug pseudo instruction is represented by a HLT instruction. The HLT
|
||||
// immediate field is used to identify the type of debug pseudo instruction.
|
||||
|
||||
enum DebugHltOpcodes {
|
||||
kUnreachableOpcode = 0xdeb0,
|
||||
kPrintfOpcode,
|
||||
kTraceOpcode,
|
||||
kLogOpcode,
|
||||
// Aliases.
|
||||
kDebugHltFirstOpcode = kUnreachableOpcode,
|
||||
kDebugHltLastOpcode = kLogOpcode
|
||||
};
|
||||
|
||||
// Each pseudo instruction uses a custom encoding for additional arguments, as
|
||||
// described below.
|
||||
|
||||
// Unreachable - kUnreachableOpcode
|
||||
//
|
||||
// Instruction which should never be executed. This is used as a guard in parts
|
||||
// of the code that should not be reachable, such as in data encoded inline in
|
||||
// the instructions.
|
||||
|
||||
// Printf - kPrintfOpcode
|
||||
// - arg_count: The number of arguments.
|
||||
// - arg_pattern: A set of PrintfArgPattern values, packed into two-bit fields.
|
||||
//
|
||||
// Simulate a call to printf.
|
||||
//
|
||||
// Floating-point and integer arguments are passed in separate sets of registers
|
||||
// in AAPCS64 (even for varargs functions), so it is not possible to determine
|
||||
// the type of each argument without some information about the values that were
|
||||
// passed in. This information could be retrieved from the printf format string,
|
||||
// but the format string is not trivial to parse so we encode the relevant
|
||||
// information with the HLT instruction.
|
||||
//
|
||||
// Also, the following registers are populated (as if for a native A64 call):
|
||||
// x0: The format string
|
||||
// x1-x7: Optional arguments, if type == CPURegister::kRegister
|
||||
// d0-d7: Optional arguments, if type == CPURegister::kFPRegister
|
||||
const unsigned kPrintfArgCountOffset = 1 * kInstructionSize;
|
||||
const unsigned kPrintfArgPatternListOffset = 2 * kInstructionSize;
|
||||
const unsigned kPrintfLength = 3 * kInstructionSize;
|
||||
|
||||
const unsigned kPrintfMaxArgCount = 4;
|
||||
|
||||
// The argument pattern is a set of two-bit-fields, each with one of the
|
||||
// following values:
|
||||
enum PrintfArgPattern {
|
||||
kPrintfArgW = 1,
|
||||
kPrintfArgX = 2,
|
||||
// There is no kPrintfArgS because floats are always converted to doubles in C
|
||||
// varargs calls.
|
||||
kPrintfArgD = 3
|
||||
};
|
||||
static const unsigned kPrintfArgPatternBits = 2;
|
||||
|
||||
// Trace - kTraceOpcode
|
||||
// - parameter: TraceParameter stored as a uint32_t
|
||||
// - command: TraceCommand stored as a uint32_t
|
||||
//
|
||||
// Allow for trace management in the generated code. This enables or disables
|
||||
// automatic tracing of the specified information for every simulated
|
||||
// instruction.
|
||||
const unsigned kTraceParamsOffset = 1 * kInstructionSize;
|
||||
const unsigned kTraceCommandOffset = 2 * kInstructionSize;
|
||||
const unsigned kTraceLength = 3 * kInstructionSize;
|
||||
|
||||
// Trace parameters.
|
||||
enum TraceParameters {
|
||||
LOG_DISASM = 1 << 0, // Log disassembly.
|
||||
LOG_REGS = 1 << 1, // Log general purpose registers.
|
||||
LOG_VREGS = 1 << 2, // Log NEON and floating-point registers.
|
||||
LOG_SYSREGS = 1 << 3, // Log the flags and system registers.
|
||||
LOG_WRITE = 1 << 4, // Log writes to memory.
|
||||
|
||||
LOG_NONE = 0,
|
||||
LOG_STATE = LOG_REGS | LOG_VREGS | LOG_SYSREGS,
|
||||
LOG_ALL = LOG_DISASM | LOG_STATE | LOG_WRITE
|
||||
};
|
||||
|
||||
// Trace commands.
|
||||
enum TraceCommand {
|
||||
TRACE_ENABLE = 1,
|
||||
TRACE_DISABLE = 2
|
||||
};
|
||||
|
||||
// Log - kLogOpcode
|
||||
// - parameter: TraceParameter stored as a uint32_t
|
||||
//
|
||||
// Print the specified information once. This mechanism is separate from Trace.
|
||||
// In particular, _all_ of the specified registers are printed, rather than just
|
||||
// the registers that the instruction writes.
|
||||
//
|
||||
// Any combination of the TraceParameters values can be used, except that
|
||||
// LOG_DISASM is not supported for Log.
|
||||
const unsigned kLogParamsOffset = 1 * kInstructionSize;
|
||||
const unsigned kLogLength = 2 * kInstructionSize;
|
||||
|
||||
|
||||
// Assemble the specified IEEE-754 components into the target type and apply
|
||||
// appropriate rounding.
|
||||
// sign: 0 = positive, 1 = negative
|
||||
@ -877,9 +764,11 @@ class Simulator : public DecoderVisitor {
|
||||
|
||||
// Declare all Visitor functions.
|
||||
#define DECLARE(A) virtual void Visit##A(const Instruction* instr);
|
||||
VISITOR_LIST(DECLARE)
|
||||
VISITOR_LIST_THAT_RETURN(DECLARE)
|
||||
VISITOR_LIST_THAT_DONT_RETURN(DECLARE)
|
||||
#undef DECLARE
|
||||
|
||||
|
||||
// Integer register accessors.
|
||||
|
||||
// Basic accessor: Read the register as the specified type.
|
||||
@ -1434,11 +1323,11 @@ class Simulator : public DecoderVisitor {
|
||||
}
|
||||
|
||||
void AddSubHelper(const Instruction* instr, int64_t op2);
|
||||
int64_t AddWithCarry(unsigned reg_size,
|
||||
bool set_flags,
|
||||
int64_t src1,
|
||||
int64_t src2,
|
||||
int64_t carry_in = 0);
|
||||
uint64_t AddWithCarry(unsigned reg_size,
|
||||
bool set_flags,
|
||||
uint64_t left,
|
||||
uint64_t right,
|
||||
int carry_in = 0);
|
||||
void LogicalHelper(const Instruction* instr, int64_t op2);
|
||||
void ConditionalCompareHelper(const Instruction* instr, int64_t op2);
|
||||
void LoadStoreHelper(const Instruction* instr,
|
||||
@ -2677,7 +2566,7 @@ class Simulator : public DecoderVisitor {
|
||||
}
|
||||
|
||||
static int CalcZFlag(uint64_t result) {
|
||||
return result == 0;
|
||||
return (result == 0) ? 1 : 0;
|
||||
}
|
||||
|
||||
static const uint32_t kConditionFlagsMask = 0xf0000000;
|
||||
|
@ -7663,6 +7663,63 @@ class LAsmSelectI64 : public LAsmSelectBase<INT64_PIECES, 2 * INT64_PIECES + 1>
|
||||
}
|
||||
};
|
||||
|
||||
class LWasmBoundsCheck : public LInstructionHelper<0, 1, 0>
|
||||
{
|
||||
public:
|
||||
LIR_HEADER(WasmBoundsCheck);
|
||||
explicit LWasmBoundsCheck(const LAllocation& ptr) {
|
||||
setOperand(0, ptr);
|
||||
}
|
||||
MWasmBoundsCheck* mir() const {
|
||||
return mir_->toWasmBoundsCheck();
|
||||
}
|
||||
const LAllocation* ptr() {
|
||||
return getOperand(0);
|
||||
}
|
||||
};
|
||||
|
||||
class LWasmLoad : public LInstructionHelper<1, 1, 1>
|
||||
{
|
||||
public:
|
||||
LIR_HEADER(WasmLoad);
|
||||
explicit LWasmLoad(const LAllocation& ptr) {
|
||||
setOperand(0, ptr);
|
||||
setTemp(0, LDefinition::BogusTemp());
|
||||
}
|
||||
MWasmLoad* mir() const {
|
||||
return mir_->toWasmLoad();
|
||||
}
|
||||
const LAllocation* ptr() {
|
||||
return getOperand(0);
|
||||
}
|
||||
const LDefinition* ptrCopy() {
|
||||
return getTemp(0);
|
||||
}
|
||||
};
|
||||
|
||||
class LWasmStore : public LInstructionHelper<0, 2, 1>
|
||||
{
|
||||
public:
|
||||
LIR_HEADER(WasmStore);
|
||||
LWasmStore(const LAllocation& ptr, const LAllocation& value) {
|
||||
setOperand(0, ptr);
|
||||
setOperand(1, value);
|
||||
setTemp(0, LDefinition::BogusTemp());
|
||||
}
|
||||
MWasmStore* mir() const {
|
||||
return mir_->toWasmStore();
|
||||
}
|
||||
const LAllocation* ptr() {
|
||||
return getOperand(0);
|
||||
}
|
||||
const LDefinition* ptrCopy() {
|
||||
return getTemp(0);
|
||||
}
|
||||
const LAllocation* value() {
|
||||
return getOperand(1);
|
||||
}
|
||||
};
|
||||
|
||||
class LAsmJSLoadHeap : public LInstructionHelper<1, 1, 0>
|
||||
{
|
||||
public:
|
||||
|
@ -384,6 +384,9 @@
|
||||
_(HasClass) \
|
||||
_(AsmSelect) \
|
||||
_(AsmSelectI64) \
|
||||
_(WasmLoad) \
|
||||
_(WasmStore) \
|
||||
_(WasmBoundsCheck) \
|
||||
_(AsmJSLoadHeap) \
|
||||
_(AsmJSStoreHeap) \
|
||||
_(AsmJSLoadFuncPtr) \
|
||||
|
@ -628,6 +628,66 @@ AsmJSMemoryAccess(uint32_t before, wasm::MemoryAccess::OutOfBoundsBehavior throw
|
||||
offsetWithinWholeSimdVector);
|
||||
}
|
||||
|
||||
static wasm::MemoryAccess
|
||||
WasmMemoryAccess(uint32_t before)
|
||||
{
|
||||
return wasm::MemoryAccess(before,
|
||||
wasm::MemoryAccess::Throw,
|
||||
wasm::MemoryAccess::DontWrapOffset);
|
||||
}
|
||||
|
||||
void
|
||||
CodeGeneratorX64::visitWasmLoad(LWasmLoad* ins)
|
||||
{
|
||||
const MWasmLoad* mir = ins->mir();
|
||||
|
||||
Scalar::Type accessType = mir->accessType();
|
||||
MOZ_ASSERT(!Scalar::isSimdType(accessType), "SIMD NYI");
|
||||
MOZ_ASSERT(!mir->barrierBefore() && !mir->barrierAfter(), "atomics NYI");
|
||||
|
||||
if (mir->offset() > INT32_MAX) {
|
||||
masm.jump(wasm::JumpTarget::OutOfBounds);
|
||||
return;
|
||||
}
|
||||
|
||||
const LAllocation* ptr = ins->ptr();
|
||||
Operand srcAddr = ptr->isBogus()
|
||||
? Operand(HeapReg, mir->offset())
|
||||
: Operand(HeapReg, ToRegister(ptr), TimesOne, mir->offset());
|
||||
|
||||
AnyRegister out = ToAnyRegister(ins->output());
|
||||
|
||||
uint32_t before = masm.size();
|
||||
load(accessType, srcAddr, out);
|
||||
masm.append(WasmMemoryAccess(before));
|
||||
}
|
||||
|
||||
void
|
||||
CodeGeneratorX64::visitWasmStore(LWasmStore* ins)
|
||||
{
|
||||
const MWasmStore* mir = ins->mir();
|
||||
|
||||
Scalar::Type accessType = mir->accessType();
|
||||
MOZ_ASSERT(!Scalar::isSimdType(accessType), "SIMD NYI");
|
||||
MOZ_ASSERT(!mir->barrierBefore() && !mir->barrierAfter(), "atomics NYI");
|
||||
|
||||
if (mir->offset() > INT32_MAX) {
|
||||
masm.jump(wasm::JumpTarget::OutOfBounds);
|
||||
return;
|
||||
}
|
||||
|
||||
const LAllocation* value = ins->value();
|
||||
const LAllocation* ptr = ins->ptr();
|
||||
Operand dstAddr = ptr->isBogus()
|
||||
? Operand(HeapReg, mir->offset())
|
||||
: Operand(HeapReg, ToRegister(ptr), TimesOne, mir->offset());
|
||||
|
||||
uint32_t before = masm.size();
|
||||
store(accessType, value, dstAddr);
|
||||
|
||||
masm.append(WasmMemoryAccess(before));
|
||||
}
|
||||
|
||||
void
|
||||
CodeGeneratorX64::emitSimdLoad(LAsmJSLoadHeap* ins)
|
||||
{
|
||||
@ -683,6 +743,29 @@ CodeGeneratorX64::emitSimdLoad(LAsmJSLoadHeap* ins)
|
||||
cleanupAfterAsmJSBoundsCheckBranch(mir, ToRegister(ptr));
|
||||
}
|
||||
|
||||
void
|
||||
CodeGeneratorX64::load(Scalar::Type type, const Operand& srcAddr, AnyRegister out)
|
||||
{
|
||||
switch (type) {
|
||||
case Scalar::Int8: masm.movsbl(srcAddr, out.gpr()); break;
|
||||
case Scalar::Uint8: masm.movzbl(srcAddr, out.gpr()); break;
|
||||
case Scalar::Int16: masm.movswl(srcAddr, out.gpr()); break;
|
||||
case Scalar::Uint16: masm.movzwl(srcAddr, out.gpr()); break;
|
||||
case Scalar::Int32:
|
||||
case Scalar::Uint32: masm.movl(srcAddr, out.gpr()); break;
|
||||
case Scalar::Float32: masm.loadFloat32(srcAddr, out.fpu()); break;
|
||||
case Scalar::Float64: masm.loadDouble(srcAddr, out.fpu()); break;
|
||||
case Scalar::Float32x4:
|
||||
case Scalar::Int8x16:
|
||||
case Scalar::Int16x8:
|
||||
case Scalar::Int32x4:
|
||||
MOZ_CRASH("SIMD loads should be handled in emitSimdLoad");
|
||||
case Scalar::Uint8Clamped:
|
||||
case Scalar::MaxTypedArrayViewType:
|
||||
MOZ_CRASH("unexpected array type");
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
CodeGeneratorX64::visitAsmJSLoadHeap(LAsmJSLoadHeap* ins)
|
||||
{
|
||||
@ -704,23 +787,7 @@ CodeGeneratorX64::visitAsmJSLoadHeap(LAsmJSLoadHeap* ins)
|
||||
DebugOnly<bool> hasBoundsCheck = maybeEmitAsmJSLoadBoundsCheck(mir, ins, &ool);
|
||||
|
||||
uint32_t before = masm.size();
|
||||
switch (accessType) {
|
||||
case Scalar::Int8: masm.movsbl(srcAddr, ToRegister(out)); break;
|
||||
case Scalar::Uint8: masm.movzbl(srcAddr, ToRegister(out)); break;
|
||||
case Scalar::Int16: masm.movswl(srcAddr, ToRegister(out)); break;
|
||||
case Scalar::Uint16: masm.movzwl(srcAddr, ToRegister(out)); break;
|
||||
case Scalar::Int32:
|
||||
case Scalar::Uint32: masm.movl(srcAddr, ToRegister(out)); break;
|
||||
case Scalar::Float32: masm.loadFloat32(srcAddr, ToFloatRegister(out)); break;
|
||||
case Scalar::Float64: masm.loadDouble(srcAddr, ToFloatRegister(out)); break;
|
||||
case Scalar::Float32x4:
|
||||
case Scalar::Int8x16:
|
||||
case Scalar::Int16x8:
|
||||
case Scalar::Int32x4: MOZ_CRASH("SIMD loads should be handled in emitSimdLoad");
|
||||
case Scalar::Uint8Clamped:
|
||||
case Scalar::MaxTypedArrayViewType:
|
||||
MOZ_CRASH("unexpected array type");
|
||||
}
|
||||
load(accessType, srcAddr, ToAnyRegister(out));
|
||||
uint32_t after = masm.size();
|
||||
|
||||
verifyHeapAccessDisassembly(before, after, /*isLoad=*/true, accessType, 0, srcAddr, *out->output());
|
||||
@ -736,6 +803,60 @@ CodeGeneratorX64::visitAsmJSLoadHeap(LAsmJSLoadHeap* ins)
|
||||
masm.append(AsmJSMemoryAccess(before, wasm::MemoryAccess::CarryOn));
|
||||
}
|
||||
|
||||
void
|
||||
CodeGeneratorX64::store(Scalar::Type type, const LAllocation* value, const Operand& dstAddr)
|
||||
{
|
||||
if (value->isConstant()) {
|
||||
Imm32 cst(ToInt32(value));
|
||||
switch (type) {
|
||||
case Scalar::Int8:
|
||||
case Scalar::Uint8: masm.movb(cst, dstAddr); break;
|
||||
case Scalar::Int16:
|
||||
case Scalar::Uint16: masm.movw(cst, dstAddr); break;
|
||||
case Scalar::Int32:
|
||||
case Scalar::Uint32: masm.movl(cst, dstAddr); break;
|
||||
case Scalar::Float32:
|
||||
case Scalar::Float64:
|
||||
case Scalar::Float32x4:
|
||||
case Scalar::Int8x16:
|
||||
case Scalar::Int16x8:
|
||||
case Scalar::Int32x4:
|
||||
case Scalar::Uint8Clamped:
|
||||
case Scalar::MaxTypedArrayViewType:
|
||||
MOZ_CRASH("unexpected array type");
|
||||
}
|
||||
} else {
|
||||
switch (type) {
|
||||
case Scalar::Int8:
|
||||
case Scalar::Uint8:
|
||||
masm.movb(ToRegister(value), dstAddr);
|
||||
break;
|
||||
case Scalar::Int16:
|
||||
case Scalar::Uint16:
|
||||
masm.movw(ToRegister(value), dstAddr);
|
||||
break;
|
||||
case Scalar::Int32:
|
||||
case Scalar::Uint32:
|
||||
masm.movl(ToRegister(value), dstAddr);
|
||||
break;
|
||||
case Scalar::Float32:
|
||||
masm.storeUncanonicalizedFloat32(ToFloatRegister(value), dstAddr);
|
||||
break;
|
||||
case Scalar::Float64:
|
||||
masm.storeUncanonicalizedDouble(ToFloatRegister(value), dstAddr);
|
||||
break;
|
||||
case Scalar::Float32x4:
|
||||
case Scalar::Int8x16:
|
||||
case Scalar::Int16x8:
|
||||
case Scalar::Int32x4:
|
||||
MOZ_CRASH("SIMD stores must be handled in emitSimdStore");
|
||||
case Scalar::Uint8Clamped:
|
||||
case Scalar::MaxTypedArrayViewType:
|
||||
MOZ_CRASH("unexpected array type");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
CodeGeneratorX64::storeSimd(Scalar::Type type, unsigned numElems, FloatRegister in,
|
||||
const Operand& dstAddr)
|
||||
@ -861,54 +982,7 @@ CodeGeneratorX64::visitAsmJSStoreHeap(LAsmJSStoreHeap* ins)
|
||||
DebugOnly<bool> hasBoundsCheck = maybeEmitAsmJSStoreBoundsCheck(mir, ins, &rejoin);
|
||||
|
||||
uint32_t before = masm.size();
|
||||
if (value->isConstant()) {
|
||||
switch (accessType) {
|
||||
case Scalar::Int8:
|
||||
case Scalar::Uint8: masm.movb(Imm32(ToInt32(value)), dstAddr); break;
|
||||
case Scalar::Int16:
|
||||
case Scalar::Uint16: masm.movw(Imm32(ToInt32(value)), dstAddr); break;
|
||||
case Scalar::Int32:
|
||||
case Scalar::Uint32: masm.movl(Imm32(ToInt32(value)), dstAddr); break;
|
||||
case Scalar::Float32:
|
||||
case Scalar::Float64:
|
||||
case Scalar::Float32x4:
|
||||
case Scalar::Int8x16:
|
||||
case Scalar::Int16x8:
|
||||
case Scalar::Int32x4:
|
||||
case Scalar::Uint8Clamped:
|
||||
case Scalar::MaxTypedArrayViewType:
|
||||
MOZ_CRASH("unexpected array type");
|
||||
}
|
||||
} else {
|
||||
switch (accessType) {
|
||||
case Scalar::Int8:
|
||||
case Scalar::Uint8:
|
||||
masm.movb(ToRegister(value), dstAddr);
|
||||
break;
|
||||
case Scalar::Int16:
|
||||
case Scalar::Uint16:
|
||||
masm.movw(ToRegister(value), dstAddr);
|
||||
break;
|
||||
case Scalar::Int32:
|
||||
case Scalar::Uint32:
|
||||
masm.movl(ToRegister(value), dstAddr);
|
||||
break;
|
||||
case Scalar::Float32:
|
||||
masm.storeUncanonicalizedFloat32(ToFloatRegister(value), dstAddr);
|
||||
break;
|
||||
case Scalar::Float64:
|
||||
masm.storeUncanonicalizedDouble(ToFloatRegister(value), dstAddr);
|
||||
break;
|
||||
case Scalar::Float32x4:
|
||||
case Scalar::Int8x16:
|
||||
case Scalar::Int16x8:
|
||||
case Scalar::Int32x4:
|
||||
MOZ_CRASH("SIMD stores must be handled in emitSimdStore");
|
||||
case Scalar::Uint8Clamped:
|
||||
case Scalar::MaxTypedArrayViewType:
|
||||
MOZ_CRASH("unexpected array type");
|
||||
}
|
||||
}
|
||||
store(accessType, value, dstAddr);
|
||||
uint32_t after = masm.size();
|
||||
|
||||
verifyHeapAccessDisassembly(before, after, /*isLoad=*/false, accessType, 0, dstAddr, *value);
|
||||
@ -924,20 +998,6 @@ CodeGeneratorX64::visitAsmJSStoreHeap(LAsmJSStoreHeap* ins)
|
||||
masm.append(AsmJSMemoryAccess(before, wasm::MemoryAccess::CarryOn));
|
||||
}
|
||||
|
||||
static void
|
||||
MaybeAddAtomicsBoundsCheck(MacroAssemblerX64& masm, MWasmMemoryAccess* mir, Register ptr)
|
||||
{
|
||||
if (!mir->needsBoundsCheck())
|
||||
return;
|
||||
|
||||
// Note that we can't use the same machinery as normal asm.js loads/stores
|
||||
// since signal-handler bounds checking is not yet implemented for atomic
|
||||
// accesses.
|
||||
uint32_t cmpOffset = masm.cmp32WithPatch(ptr, Imm32(-mir->endOffset())).offset();
|
||||
masm.append(wasm::BoundsCheck(cmpOffset));
|
||||
masm.j(Assembler::Above, wasm::JumpTarget::OutOfBounds);
|
||||
}
|
||||
|
||||
void
|
||||
CodeGeneratorX64::visitAsmJSCompareExchangeHeap(LAsmJSCompareExchangeHeap* ins)
|
||||
{
|
||||
@ -951,7 +1011,10 @@ CodeGeneratorX64::visitAsmJSCompareExchangeHeap(LAsmJSCompareExchangeHeap* ins)
|
||||
Register oldval = ToRegister(ins->oldValue());
|
||||
Register newval = ToRegister(ins->newValue());
|
||||
|
||||
MaybeAddAtomicsBoundsCheck(masm, mir, ptr);
|
||||
// Note that we can't use the same machinery as normal asm.js loads/stores
|
||||
// since signal-handler bounds checking is not yet implemented for atomic
|
||||
// accesses.
|
||||
maybeEmitWasmBoundsCheckBranch(mir, ptr);
|
||||
|
||||
masm.compareExchangeToTypedIntArray(accessType == Scalar::Uint32 ? Scalar::Int32 : accessType,
|
||||
srcAddr,
|
||||
@ -977,7 +1040,8 @@ CodeGeneratorX64::visitAsmJSAtomicExchangeHeap(LAsmJSAtomicExchangeHeap* ins)
|
||||
BaseIndex srcAddr(HeapReg, ptr, TimesOne, mir->offset());
|
||||
Register value = ToRegister(ins->value());
|
||||
|
||||
MaybeAddAtomicsBoundsCheck(masm, mir, ptr);
|
||||
// See comment in visitAsmJSCompareExchangeHeap.
|
||||
maybeEmitWasmBoundsCheckBranch(mir, ptr);
|
||||
|
||||
masm.atomicExchangeToTypedIntArray(accessType == Scalar::Uint32 ? Scalar::Int32 : accessType,
|
||||
srcAddr,
|
||||
@ -1006,7 +1070,8 @@ CodeGeneratorX64::visitAsmJSAtomicBinopHeap(LAsmJSAtomicBinopHeap* ins)
|
||||
|
||||
const LAllocation* value = ins->value();
|
||||
|
||||
MaybeAddAtomicsBoundsCheck(masm, mir, ptr);
|
||||
// See comment in visitAsmJSCompareExchangeHeap.
|
||||
maybeEmitWasmBoundsCheckBranch(mir, ptr);
|
||||
|
||||
AnyRegister output = ToAnyRegister(ins->output());
|
||||
if (value->isConstant()) {
|
||||
@ -1036,7 +1101,8 @@ CodeGeneratorX64::visitAsmJSAtomicBinopHeapForEffect(LAsmJSAtomicBinopHeapForEff
|
||||
BaseIndex srcAddr(HeapReg, ptr, TimesOne, mir->offset());
|
||||
const LAllocation* value = ins->value();
|
||||
|
||||
MaybeAddAtomicsBoundsCheck(masm, mir, ptr);
|
||||
// See comment in visitAsmJSCompareExchangeHeap.
|
||||
maybeEmitWasmBoundsCheckBranch(mir, ptr);
|
||||
|
||||
if (value->isConstant())
|
||||
atomicBinopToTypedIntArray(op, accessType, Imm32(ToInt32(value)), srcAddr);
|
||||
|
@ -27,9 +27,13 @@ class CodeGeneratorX64 : public CodeGeneratorX86Shared
|
||||
Operand dest, MIRType slotType);
|
||||
void memoryBarrier(MemoryBarrierBits barrier);
|
||||
|
||||
void load(Scalar::Type type, const Operand& srcAddr, AnyRegister out);
|
||||
void loadSimd(Scalar::Type type, unsigned numElems, const Operand& srcAddr, FloatRegister out);
|
||||
void emitSimdLoad(LAsmJSLoadHeap* ins);
|
||||
|
||||
void store(Scalar::Type type, const LAllocation* value, const Operand& dstAddr);
|
||||
void storeSimd(Scalar::Type type, unsigned numElems, FloatRegister in, const Operand& dstAddr);
|
||||
|
||||
void emitSimdLoad(LAsmJSLoadHeap* ins);
|
||||
void emitSimdStore(LAsmJSStoreHeap* ins);
|
||||
public:
|
||||
CodeGeneratorX64(MIRGenerator* gen, LIRGraph* graph, MacroAssembler* masm);
|
||||
@ -64,6 +68,8 @@ class CodeGeneratorX64 : public CodeGeneratorX86Shared
|
||||
void visitInt64ToFloatingPoint(LInt64ToFloatingPoint* lir);
|
||||
void visitLoadTypedArrayElementStatic(LLoadTypedArrayElementStatic* ins);
|
||||
void visitStoreTypedArrayElementStatic(LStoreTypedArrayElementStatic* ins);
|
||||
void visitWasmLoad(LWasmLoad* ins);
|
||||
void visitWasmStore(LWasmStore* ins);
|
||||
void visitAsmSelectI64(LAsmSelectI64* ins);
|
||||
void visitAsmJSCall(LAsmJSCall* ins);
|
||||
void visitAsmJSLoadHeap(LAsmJSLoadHeap* ins);
|
||||
|
@ -155,6 +155,40 @@ LIRGeneratorX64::visitAsmJSUnsignedToFloat32(MAsmJSUnsignedToFloat32* ins)
|
||||
define(lir, ins);
|
||||
}
|
||||
|
||||
void
|
||||
LIRGeneratorX64::visitWasmStore(MWasmStore* ins)
|
||||
{
|
||||
MDefinition* base = ins->base();
|
||||
MOZ_ASSERT(base->type() == MIRType::Int32);
|
||||
|
||||
LAllocation value;
|
||||
switch (ins->accessType()) {
|
||||
case Scalar::Int8:
|
||||
case Scalar::Uint8:
|
||||
case Scalar::Int16:
|
||||
case Scalar::Uint16:
|
||||
case Scalar::Int32:
|
||||
case Scalar::Uint32:
|
||||
value = useRegisterOrConstantAtStart(ins->value());
|
||||
break;
|
||||
case Scalar::Float32:
|
||||
case Scalar::Float64:
|
||||
case Scalar::Float32x4:
|
||||
case Scalar::Int8x16:
|
||||
case Scalar::Int16x8:
|
||||
case Scalar::Int32x4:
|
||||
value = useRegisterAtStart(ins->value());
|
||||
break;
|
||||
case Scalar::Uint8Clamped:
|
||||
case Scalar::MaxTypedArrayViewType:
|
||||
MOZ_CRASH("unexpected array type");
|
||||
}
|
||||
|
||||
LAllocation baseAlloc = useRegisterOrZeroAtStart(base);
|
||||
auto* lir = new(alloc()) LWasmStore(baseAlloc, value);
|
||||
add(lir, ins);
|
||||
}
|
||||
|
||||
void
|
||||
LIRGeneratorX64::visitAsmJSLoadHeap(MAsmJSLoadHeap* ins)
|
||||
{
|
||||
|
@ -56,6 +56,7 @@ class LIRGeneratorX64 : public LIRGeneratorX86Shared
|
||||
void visitAsmJSCompareExchangeHeap(MAsmJSCompareExchangeHeap* ins);
|
||||
void visitAsmJSAtomicExchangeHeap(MAsmJSAtomicExchangeHeap* ins);
|
||||
void visitAsmJSAtomicBinopHeap(MAsmJSAtomicBinopHeap* ins);
|
||||
void visitWasmStore(MWasmStore* ins);
|
||||
void visitStoreTypedArrayElementStatic(MStoreTypedArrayElementStatic* ins);
|
||||
void visitSubstr(MSubstr* ins);
|
||||
void visitRandom(MRandom* ins);
|
||||
|
@ -1077,18 +1077,31 @@ class AssemblerX86Shared : public AssemblerShared
|
||||
}
|
||||
|
||||
static void UpdateBoundsCheck(uint8_t* patchAt, uint32_t heapLength) {
|
||||
// An access is out-of-bounds iff
|
||||
// ptr + offset + data-type-byte-size > heapLength
|
||||
// i.e. ptr > heapLength - data-type-byte-size - offset.
|
||||
// data-type-byte-size and offset are already included in the addend so
|
||||
// we just have to add the heap length here.
|
||||
//
|
||||
// On x64, even with signal handling being used for most bounds checks,
|
||||
// there may be atomic operations that depend on explicit checks. All
|
||||
// accesses that have been recorded are the only ones that need bound
|
||||
// checks (see also
|
||||
// CodeGeneratorX64::visitAsmJS{Load,Store,CompareExchange,Exchange,AtomicBinop}Heap)
|
||||
X86Encoding::AddInt32(patchAt, heapLength);
|
||||
// checks.
|
||||
//
|
||||
// An access is out-of-bounds iff
|
||||
// ptr + offset + data-type-byte-size > heapLength
|
||||
// i.e ptr + offset + data-type-byte-size - 1 >= heapLength
|
||||
// i.e. ptr >= heapLength - data-type-byte-size - offset + 1.
|
||||
//
|
||||
// before := data-type-byte-size + offset - 1
|
||||
uint32_t before = reinterpret_cast<uint32_t*>(patchAt)[-1];
|
||||
uint32_t after = before + heapLength;
|
||||
|
||||
// If the computed index `before` already is out of bounds,
|
||||
// we need to make sure the bounds check will fail all the time.
|
||||
// For bounds checks, the sequence of instructions we use is:
|
||||
// cmp(ptrReg, #before)
|
||||
// jae(OutOfBounds)
|
||||
// so replace the cmp immediate with 0.
|
||||
if (after > heapLength)
|
||||
after = 0;
|
||||
|
||||
MOZ_ASSERT_IF(after, int32_t(after) >= int32_t(before));
|
||||
reinterpret_cast<uint32_t*>(patchAt)[-1] = after;
|
||||
}
|
||||
|
||||
void breakpoint() {
|
||||
|
@ -454,11 +454,14 @@ CodeGeneratorX86Shared::emitAsmJSBoundsCheckBranch(const MWasmMemoryAccess* acce
|
||||
// field, so -access->endOffset() will turn into
|
||||
// (heapLength - access->endOffset()), allowing us to test whether the end
|
||||
// of the access is beyond the end of the heap.
|
||||
uint32_t cmpOffset = masm.cmp32WithPatch(ptr, Imm32(-access->endOffset())).offset();
|
||||
MOZ_ASSERT(access->endOffset() >= 1,
|
||||
"need to subtract 1 to use JAE, see also AssemblerX86Shared::UpdateBoundsCheck");
|
||||
|
||||
uint32_t cmpOffset = masm.cmp32WithPatch(ptr, Imm32(1 - access->endOffset())).offset();
|
||||
if (maybeFail)
|
||||
masm.j(Assembler::Above, maybeFail);
|
||||
masm.j(Assembler::AboveOrEqual, maybeFail);
|
||||
else
|
||||
masm.j(Assembler::Above, wasm::JumpTarget::OutOfBounds);
|
||||
masm.j(Assembler::AboveOrEqual, wasm::JumpTarget::OutOfBounds);
|
||||
|
||||
if (pass)
|
||||
masm.bind(pass);
|
||||
@ -466,6 +469,34 @@ CodeGeneratorX86Shared::emitAsmJSBoundsCheckBranch(const MWasmMemoryAccess* acce
|
||||
masm.append(wasm::BoundsCheck(cmpOffset));
|
||||
}
|
||||
|
||||
void
|
||||
CodeGeneratorX86Shared::visitWasmBoundsCheck(LWasmBoundsCheck* ins)
|
||||
{
|
||||
const MWasmBoundsCheck* mir = ins->mir();
|
||||
MOZ_ASSERT(gen->needsBoundsCheckBranch(mir));
|
||||
if (mir->offset() > INT32_MAX) {
|
||||
masm.jump(wasm::JumpTarget::OutOfBounds);
|
||||
return;
|
||||
}
|
||||
|
||||
Register ptrReg = ToRegister(ins->ptr());
|
||||
maybeEmitWasmBoundsCheckBranch(mir, ptrReg);
|
||||
}
|
||||
|
||||
void
|
||||
CodeGeneratorX86Shared::maybeEmitWasmBoundsCheckBranch(const MWasmMemoryAccess* mir, Register ptr)
|
||||
{
|
||||
if (!mir->needsBoundsCheck())
|
||||
return;
|
||||
|
||||
MOZ_ASSERT(mir->endOffset() >= 1,
|
||||
"need to subtract 1 to use JAE, see also AssemblerX86Shared::UpdateBoundsCheck");
|
||||
|
||||
uint32_t cmpOffset = masm.cmp32WithPatch(ptr, Imm32(1 - mir->endOffset())).offset();
|
||||
masm.j(Assembler::AboveOrEqual, wasm::JumpTarget::OutOfBounds);
|
||||
masm.append(wasm::BoundsCheck(cmpOffset));
|
||||
}
|
||||
|
||||
bool
|
||||
CodeGeneratorX86Shared::maybeEmitThrowingAsmJSBoundsCheck(const MWasmMemoryAccess* access,
|
||||
const MInstruction* mir,
|
||||
|
@ -94,24 +94,24 @@ class CodeGeneratorX86Shared : public CodeGeneratorShared
|
||||
};
|
||||
|
||||
private:
|
||||
void
|
||||
emitAsmJSBoundsCheckBranch(const MWasmMemoryAccess* mir, const MInstruction* ins,
|
||||
Register ptr, Label* fail);
|
||||
void emitAsmJSBoundsCheckBranch(const MWasmMemoryAccess* mir, const MInstruction* ins,
|
||||
Register ptr, Label* fail);
|
||||
|
||||
protected:
|
||||
void maybeEmitWasmBoundsCheckBranch(const MWasmMemoryAccess* mir, Register ptr);
|
||||
|
||||
public:
|
||||
// For SIMD and atomic loads and stores (which throw on out-of-bounds):
|
||||
bool
|
||||
maybeEmitThrowingAsmJSBoundsCheck(const MWasmMemoryAccess* mir, const MInstruction* ins,
|
||||
const LAllocation* ptr);
|
||||
bool maybeEmitThrowingAsmJSBoundsCheck(const MWasmMemoryAccess* mir, const MInstruction* ins,
|
||||
const LAllocation* ptr);
|
||||
|
||||
// For asm.js plain and atomic loads that possibly require a bounds check:
|
||||
bool
|
||||
maybeEmitAsmJSLoadBoundsCheck(const MAsmJSLoadHeap* mir, LAsmJSLoadHeap* ins,
|
||||
OutOfLineLoadTypedArrayOutOfBounds** ool);
|
||||
bool maybeEmitAsmJSLoadBoundsCheck(const MAsmJSLoadHeap* mir, LAsmJSLoadHeap* ins,
|
||||
OutOfLineLoadTypedArrayOutOfBounds** ool);
|
||||
|
||||
// For asm.js plain and atomic stores that possibly require a bounds check:
|
||||
bool
|
||||
maybeEmitAsmJSStoreBoundsCheck(const MAsmJSStoreHeap* mir, LAsmJSStoreHeap* ins, Label** rejoin);
|
||||
bool maybeEmitAsmJSStoreBoundsCheck(const MAsmJSStoreHeap* mir, LAsmJSStoreHeap* ins,
|
||||
Label** rejoin);
|
||||
|
||||
void cleanupAfterAsmJSBoundsCheckBranch(const MWasmMemoryAccess* mir, Register ptr);
|
||||
|
||||
@ -272,6 +272,7 @@ class CodeGeneratorX86Shared : public CodeGeneratorShared
|
||||
virtual void visitAsmJSPassStackArg(LAsmJSPassStackArg* ins);
|
||||
virtual void visitAsmSelect(LAsmSelect* ins);
|
||||
virtual void visitAsmReinterpret(LAsmReinterpret* lir);
|
||||
virtual void visitWasmBoundsCheck(LWasmBoundsCheck* ins);
|
||||
virtual void visitMemoryBarrier(LMemoryBarrier* ins);
|
||||
virtual void visitAtomicTypedArrayElementBinop(LAtomicTypedArrayElementBinop* lir);
|
||||
virtual void visitAtomicTypedArrayElementBinopForEffect(LAtomicTypedArrayElementBinopForEffect* lir);
|
||||
|
@ -322,6 +322,27 @@ LIRGeneratorX86Shared::visitAsmJSNeg(MAsmJSNeg* ins)
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
LIRGeneratorX86Shared::visitWasmBoundsCheck(MWasmBoundsCheck* ins)
|
||||
{
|
||||
if (!gen->needsBoundsCheckBranch(ins))
|
||||
return;
|
||||
|
||||
MDefinition* index = ins->input();
|
||||
auto* lir = new(alloc()) LWasmBoundsCheck(useRegisterAtStart(index));
|
||||
add(lir, ins);
|
||||
}
|
||||
|
||||
void
|
||||
LIRGeneratorX86Shared::visitWasmLoad(MWasmLoad* ins)
|
||||
{
|
||||
MDefinition* base = ins->base();
|
||||
MOZ_ASSERT(base->type() == MIRType::Int32);
|
||||
|
||||
auto* lir = new(alloc()) LWasmLoad(useRegisterOrZeroAtStart(base));
|
||||
define(lir, ins);
|
||||
}
|
||||
|
||||
void
|
||||
LIRGeneratorX86Shared::lowerUDiv(MDiv* div)
|
||||
{
|
||||
|
@ -47,6 +47,8 @@ class LIRGeneratorX86Shared : public LIRGeneratorShared
|
||||
void lowerForBitAndAndBranch(LBitAndAndBranch* baab, MInstruction* mir,
|
||||
MDefinition* lhs, MDefinition* rhs);
|
||||
void visitAsmJSNeg(MAsmJSNeg* ins);
|
||||
void visitWasmBoundsCheck(MWasmBoundsCheck* ins);
|
||||
void visitWasmLoad(MWasmLoad* ins);
|
||||
void visitAsmSelect(MAsmSelect* ins);
|
||||
void lowerMulI(MMul* mul, MDefinition* lhs, MDefinition* rhs);
|
||||
void lowerDivI(MDiv* div);
|
||||
|
@ -42,17 +42,6 @@ SetInt32(void* where, int32_t value)
|
||||
reinterpret_cast<int32_t*>(where)[-1] = value;
|
||||
}
|
||||
|
||||
inline void
|
||||
AddInt32(void* where, int32_t value)
|
||||
{
|
||||
#ifdef DEBUG
|
||||
uint32_t x = reinterpret_cast<uint32_t*>(where)[-1];
|
||||
uint32_t y = x + uint32_t(value);
|
||||
MOZ_ASSERT(value >= 0 ? (int32_t(y) >= int32_t(x)) : (int32_t(y) < int32_t(x)));
|
||||
#endif
|
||||
reinterpret_cast<uint32_t*>(where)[-1] += uint32_t(value);
|
||||
}
|
||||
|
||||
inline void
|
||||
SetRel32(void* from, void* to)
|
||||
{
|
||||
|
@ -394,6 +394,57 @@ CodeGeneratorX86::loadSimd(Scalar::Type type, unsigned numElems, const Operand&
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
CodeGeneratorX86::visitWasmLoad(LWasmLoad* ins)
|
||||
{
|
||||
const MWasmLoad* mir = ins->mir();
|
||||
|
||||
Scalar::Type accessType = mir->accessType();
|
||||
MOZ_ASSERT(!Scalar::isSimdType(accessType), "SIMD NYI");
|
||||
MOZ_ASSERT(!mir->barrierBefore() && !mir->barrierAfter(), "atomics NYI");
|
||||
|
||||
if (mir->offset() > INT32_MAX) {
|
||||
// This is unreachable because of the bounds check.
|
||||
masm.breakpoint();
|
||||
return;
|
||||
}
|
||||
|
||||
const LAllocation* ptr = ins->ptr();
|
||||
Operand srcAddr = ptr->isBogus()
|
||||
? Operand(PatchedAbsoluteAddress(mir->offset()))
|
||||
: Operand(ToRegister(ptr), mir->offset());
|
||||
|
||||
load(accessType, srcAddr, ins->output());
|
||||
|
||||
masm.append(wasm::MemoryAccess(masm.size()));
|
||||
}
|
||||
|
||||
void
|
||||
CodeGeneratorX86::visitWasmStore(LWasmStore* ins)
|
||||
{
|
||||
const MWasmStore* mir = ins->mir();
|
||||
|
||||
Scalar::Type accessType = mir->accessType();
|
||||
MOZ_ASSERT(!Scalar::isSimdType(accessType), "SIMD NYI");
|
||||
MOZ_ASSERT(!mir->barrierBefore() && !mir->barrierAfter(), "atomics NYI");
|
||||
|
||||
if (mir->offset() > INT32_MAX) {
|
||||
// This is unreachable because of the bounds check.
|
||||
masm.breakpoint();
|
||||
return;
|
||||
}
|
||||
|
||||
const LAllocation* value = ins->value();
|
||||
const LAllocation* ptr = ins->ptr();
|
||||
Operand dstAddr = ptr->isBogus()
|
||||
? Operand(PatchedAbsoluteAddress(mir->offset()))
|
||||
: Operand(ToRegister(ptr), mir->offset());
|
||||
|
||||
store(accessType, value, dstAddr);
|
||||
|
||||
masm.append(wasm::MemoryAccess(masm.size()));
|
||||
}
|
||||
|
||||
void
|
||||
CodeGeneratorX86::emitSimdLoad(LAsmJSLoadHeap* ins)
|
||||
{
|
||||
@ -664,6 +715,23 @@ CodeGeneratorX86::visitAsmJSStoreHeap(LAsmJSStoreHeap* ins)
|
||||
masm.append(wasm::MemoryAccess(after));
|
||||
}
|
||||
|
||||
// Perform bounds checking on the access if necessary; if it fails,
|
||||
// jump to out-of-line code that throws. If the bounds check passes,
|
||||
// set up the heap address in addrTemp.
|
||||
|
||||
void
|
||||
CodeGeneratorX86::asmJSAtomicComputeAddress(Register addrTemp, Register ptrReg,
|
||||
const MWasmMemoryAccess* mir)
|
||||
{
|
||||
maybeEmitWasmBoundsCheckBranch(mir, ptrReg);
|
||||
|
||||
// Add in the actual heap pointer explicitly, to avoid opening up
|
||||
// the abstraction that is atomicBinopToTypedIntArray at this time.
|
||||
masm.movl(ptrReg, addrTemp);
|
||||
masm.addlWithPatch(Imm32(mir->offset()), addrTemp);
|
||||
masm.append(wasm::MemoryAccess(masm.size()));
|
||||
}
|
||||
|
||||
void
|
||||
CodeGeneratorX86::visitAsmJSCompareExchangeHeap(LAsmJSCompareExchangeHeap* ins)
|
||||
{
|
||||
@ -674,8 +742,7 @@ CodeGeneratorX86::visitAsmJSCompareExchangeHeap(LAsmJSCompareExchangeHeap* ins)
|
||||
Register newval = ToRegister(ins->newValue());
|
||||
Register addrTemp = ToRegister(ins->addrTemp());
|
||||
|
||||
asmJSAtomicComputeAddress(addrTemp, ptrReg, mir->needsBoundsCheck(), mir->offset(),
|
||||
mir->endOffset());
|
||||
asmJSAtomicComputeAddress(addrTemp, ptrReg, mir);
|
||||
|
||||
Address memAddr(addrTemp, mir->offset());
|
||||
masm.compareExchangeToTypedIntArray(accessType == Scalar::Uint32 ? Scalar::Int32 : accessType,
|
||||
@ -686,27 +753,6 @@ CodeGeneratorX86::visitAsmJSCompareExchangeHeap(LAsmJSCompareExchangeHeap* ins)
|
||||
ToAnyRegister(ins->output()));
|
||||
}
|
||||
|
||||
// Perform bounds checking on the access if necessary; if it fails,
|
||||
// jump to out-of-line code that throws. If the bounds check passes,
|
||||
// set up the heap address in addrTemp.
|
||||
|
||||
void
|
||||
CodeGeneratorX86::asmJSAtomicComputeAddress(Register addrTemp, Register ptrReg, bool boundsCheck,
|
||||
uint32_t offset, uint32_t endOffset)
|
||||
{
|
||||
if (boundsCheck) {
|
||||
uint32_t cmpOffset = masm.cmp32WithPatch(ptrReg, Imm32(-endOffset)).offset();
|
||||
masm.j(Assembler::Above, wasm::JumpTarget::OutOfBounds);
|
||||
masm.append(wasm::BoundsCheck(cmpOffset));
|
||||
}
|
||||
|
||||
// Add in the actual heap pointer explicitly, to avoid opening up
|
||||
// the abstraction that is atomicBinopToTypedIntArray at this time.
|
||||
masm.movl(ptrReg, addrTemp);
|
||||
masm.addlWithPatch(Imm32(offset), addrTemp);
|
||||
masm.append(wasm::MemoryAccess(masm.size()));
|
||||
}
|
||||
|
||||
void
|
||||
CodeGeneratorX86::visitAsmJSAtomicExchangeHeap(LAsmJSAtomicExchangeHeap* ins)
|
||||
{
|
||||
@ -716,8 +762,7 @@ CodeGeneratorX86::visitAsmJSAtomicExchangeHeap(LAsmJSAtomicExchangeHeap* ins)
|
||||
Register value = ToRegister(ins->value());
|
||||
Register addrTemp = ToRegister(ins->addrTemp());
|
||||
|
||||
asmJSAtomicComputeAddress(addrTemp, ptrReg, mir->needsBoundsCheck(), mir->offset(),
|
||||
mir->endOffset());
|
||||
asmJSAtomicComputeAddress(addrTemp, ptrReg, mir);
|
||||
|
||||
Address memAddr(addrTemp, mir->offset());
|
||||
masm.atomicExchangeToTypedIntArray(accessType == Scalar::Uint32 ? Scalar::Int32 : accessType,
|
||||
@ -738,8 +783,7 @@ CodeGeneratorX86::visitAsmJSAtomicBinopHeap(LAsmJSAtomicBinopHeap* ins)
|
||||
const LAllocation* value = ins->value();
|
||||
AtomicOp op = mir->operation();
|
||||
|
||||
asmJSAtomicComputeAddress(addrTemp, ptrReg, mir->needsBoundsCheck(), mir->offset(),
|
||||
mir->endOffset());
|
||||
asmJSAtomicComputeAddress(addrTemp, ptrReg, mir);
|
||||
|
||||
Address memAddr(addrTemp, mir->offset());
|
||||
if (value->isConstant()) {
|
||||
@ -771,8 +815,7 @@ CodeGeneratorX86::visitAsmJSAtomicBinopHeapForEffect(LAsmJSAtomicBinopHeapForEff
|
||||
|
||||
MOZ_ASSERT(!mir->hasUses());
|
||||
|
||||
asmJSAtomicComputeAddress(addrTemp, ptrReg, mir->needsBoundsCheck(), mir->offset(),
|
||||
mir->endOffset());
|
||||
asmJSAtomicComputeAddress(addrTemp, ptrReg, mir);
|
||||
|
||||
Address memAddr(addrTemp, mir->offset());
|
||||
if (value->isConstant())
|
||||
|
@ -58,6 +58,8 @@ class CodeGeneratorX86 : public CodeGeneratorX86Shared
|
||||
void visitLoadTypedArrayElementStatic(LLoadTypedArrayElementStatic* ins);
|
||||
void visitStoreTypedArrayElementStatic(LStoreTypedArrayElementStatic* ins);
|
||||
void visitAsmJSCall(LAsmJSCall* ins);
|
||||
void visitWasmLoad(LWasmLoad* ins);
|
||||
void visitWasmStore(LWasmStore* ins);
|
||||
void visitAsmJSLoadHeap(LAsmJSLoadHeap* ins);
|
||||
void visitAsmJSStoreHeap(LAsmJSStoreHeap* ins);
|
||||
void visitAsmJSCompareExchangeHeap(LAsmJSCompareExchangeHeap* ins);
|
||||
@ -74,8 +76,8 @@ class CodeGeneratorX86 : public CodeGeneratorX86Shared
|
||||
void visitOutOfLineTruncateFloat32(OutOfLineTruncateFloat32* ool);
|
||||
|
||||
private:
|
||||
void asmJSAtomicComputeAddress(Register addrTemp, Register ptrReg, bool boundsCheck,
|
||||
uint32_t offset, uint32_t endOffset);
|
||||
void asmJSAtomicComputeAddress(Register addrTemp, Register ptrReg,
|
||||
const MWasmMemoryAccess* access);
|
||||
};
|
||||
|
||||
typedef CodeGeneratorX86 CodeGeneratorSpecific;
|
||||
|
@ -203,6 +203,40 @@ LIRGeneratorX86::visitAsmJSUnsignedToFloat32(MAsmJSUnsignedToFloat32* ins)
|
||||
define(lir, ins);
|
||||
}
|
||||
|
||||
void
|
||||
LIRGeneratorX86::visitWasmStore(MWasmStore* ins)
|
||||
{
|
||||
MDefinition* base = ins->base();
|
||||
MOZ_ASSERT(base->type() == MIRType::Int32);
|
||||
|
||||
LAllocation baseAlloc = useRegisterOrZeroAtStart(base);
|
||||
|
||||
LAllocation valueAlloc;
|
||||
switch (ins->accessType()) {
|
||||
case Scalar::Int8: case Scalar::Uint8:
|
||||
// See comment for LIRGeneratorX86::useByteOpRegister.
|
||||
valueAlloc = useFixed(ins->value(), eax);
|
||||
break;
|
||||
case Scalar::Int16: case Scalar::Uint16:
|
||||
case Scalar::Int32: case Scalar::Uint32:
|
||||
case Scalar::Float32: case Scalar::Float64:
|
||||
case Scalar::Float32x4:
|
||||
case Scalar::Int8x16:
|
||||
case Scalar::Int16x8:
|
||||
case Scalar::Int32x4:
|
||||
// For now, don't allow constant values. The immediate operand affects
|
||||
// instruction layout which affects patching.
|
||||
valueAlloc = useRegisterAtStart(ins->value());
|
||||
break;
|
||||
case Scalar::Uint8Clamped:
|
||||
case Scalar::MaxTypedArrayViewType:
|
||||
MOZ_CRASH("unexpected array type");
|
||||
}
|
||||
|
||||
auto* lir = new(alloc()) LWasmStore(baseAlloc, valueAlloc);
|
||||
add(lir, ins);
|
||||
}
|
||||
|
||||
void
|
||||
LIRGeneratorX86::visitAsmJSLoadHeap(MAsmJSLoadHeap* ins)
|
||||
{
|
||||
|
@ -60,6 +60,7 @@ class LIRGeneratorX86 : public LIRGeneratorX86Shared
|
||||
void visitAsmJSCompareExchangeHeap(MAsmJSCompareExchangeHeap* ins);
|
||||
void visitAsmJSAtomicExchangeHeap(MAsmJSAtomicExchangeHeap* ins);
|
||||
void visitAsmJSAtomicBinopHeap(MAsmJSAtomicBinopHeap* ins);
|
||||
void visitWasmStore(MWasmStore* ins);
|
||||
void visitStoreTypedArrayElementStatic(MStoreTypedArrayElementStatic* ins);
|
||||
void visitSubstr(MSubstr* ins);
|
||||
void visitRandom(MRandom* ins);
|
||||
|
@ -462,7 +462,7 @@ ArrayBufferObject::createForWasm(JSContext* cx, uint32_t numBytes, bool signalsF
|
||||
#endif
|
||||
}
|
||||
|
||||
auto buffer = ArrayBufferObject::create(cx, numBytes);
|
||||
auto* buffer = ArrayBufferObject::create(cx, numBytes);
|
||||
if (!buffer)
|
||||
return nullptr;
|
||||
|
||||
@ -633,6 +633,10 @@ ArrayBufferObject::create(JSContext* cx, uint32_t nbytes, BufferContents content
|
||||
size_t nAllocated = nbytes;
|
||||
if (contents.kind() == MAPPED)
|
||||
nAllocated = JS_ROUNDUP(nbytes, js::gc::SystemPageSize());
|
||||
#ifdef ASMJS_MAY_USE_SIGNAL_HANDLERS_FOR_OOB
|
||||
else if (contents.kind() == WASM_MAPPED)
|
||||
nAllocated = wasm::MappedSize;
|
||||
#endif
|
||||
cx->zone()->updateMallocCounter(nAllocated);
|
||||
}
|
||||
} else {
|
||||
|
@ -1358,6 +1358,28 @@ BaseShape::traceShapeTable(JSTracer* trc)
|
||||
table().trace(trc);
|
||||
}
|
||||
|
||||
#ifdef DEBUG
|
||||
bool
|
||||
BaseShape::canSkipMarkingShapeTable(Shape* lastShape)
|
||||
{
|
||||
// Check that every shape in the shape table will be marked by marking
|
||||
// |lastShape|.
|
||||
|
||||
if (!hasTable())
|
||||
return true;
|
||||
|
||||
uint32_t count = 0;
|
||||
for (Shape::Range<NoGC> r(lastShape); !r.empty(); r.popFront()) {
|
||||
Shape* shape = &r.front();
|
||||
ShapeTable::Entry& entry = table().search<MaybeAdding::NotAdding>(shape->propid());
|
||||
if (entry.isLive())
|
||||
count++;
|
||||
}
|
||||
|
||||
return count == table().entryCount();
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifdef JSGC_HASH_TABLE_CHECKS
|
||||
|
||||
void
|
||||
|
@ -129,6 +129,7 @@ enum class MaybeAdding { Adding = true, NotAdding = false };
|
||||
class ShapeTable {
|
||||
public:
|
||||
friend class NativeObject;
|
||||
friend class BaseShape;
|
||||
static const uint32_t MIN_ENTRIES = 11;
|
||||
|
||||
class Entry {
|
||||
@ -145,6 +146,7 @@ class ShapeTable {
|
||||
public:
|
||||
bool isFree() const { return shape_ == nullptr; }
|
||||
bool isRemoved() const { return shape_ == SHAPE_REMOVED; }
|
||||
bool isLive() const { return !isFree() && !isRemoved(); }
|
||||
bool hadCollision() const { return uintptr_t(shape_) & SHAPE_COLLISION; }
|
||||
|
||||
void setFree() { shape_ = nullptr; }
|
||||
@ -454,6 +456,10 @@ class BaseShape : public gc::TenuredCell
|
||||
void traceChildren(JSTracer* trc);
|
||||
void traceChildrenSkipShapeTable(JSTracer* trc);
|
||||
|
||||
#ifdef DEBUG
|
||||
bool canSkipMarkingShapeTable(Shape* lastShape);
|
||||
#endif
|
||||
|
||||
private:
|
||||
static void staticAsserts() {
|
||||
JS_STATIC_ASSERT(offsetof(BaseShape, clasp_) == offsetof(js::shadow::BaseShape, clasp_));
|
||||
|
@ -908,6 +908,13 @@ nsStyleSet::GetContext(nsStyleContext* aParentContext,
|
||||
relevantLinkVisited);
|
||||
|
||||
if (!result) {
|
||||
// |aVisitedRuleNode| may have a ref-count of zero since we are yet
|
||||
// to create the style context that will hold an owning reference to it.
|
||||
// As a result, we need to make sure it stays alive until that point
|
||||
// in case something in the first call to NS_NewStyleContext triggers a
|
||||
// GC sweep of rule nodes.
|
||||
RefPtr<nsRuleNode> kungFuDeathGrip{aVisitedRuleNode};
|
||||
|
||||
result = NS_NewStyleContext(aParentContext, aPseudoTag, aPseudoType,
|
||||
aRuleNode,
|
||||
aFlags & eSkipParentDisplayBasedStyleFixup);
|
||||
|
@ -21,4 +21,8 @@ ac_add_options --with-branding=mobile/android/branding/nightly
|
||||
|
||||
ac_add_options --disable-stdcxx-compat
|
||||
|
||||
# Don't autoclobber l10n, as this can lead to missing binaries and broken builds
|
||||
# Bug 1283438
|
||||
mk_add_options AUTOCLOBBER=
|
||||
|
||||
. "$topsrcdir/mobile/android/config/mozconfigs/common.override"
|
||||
|
@ -22,4 +22,8 @@ ac_add_options --with-branding=mobile/android/branding/beta
|
||||
|
||||
ac_add_options --disable-stdcxx-compat
|
||||
|
||||
# Don't autoclobber l10n, as this can lead to missing binaries and broken builds
|
||||
# Bug 1283438
|
||||
mk_add_options AUTOCLOBBER=
|
||||
|
||||
. "$topsrcdir/mobile/android/config/mozconfigs/common.override"
|
||||
|
@ -20,4 +20,8 @@ ac_add_options --with-branding=mobile/android/branding/nightly
|
||||
|
||||
ac_add_options --disable-stdcxx-compat
|
||||
|
||||
# Don't autoclobber l10n, as this can lead to missing binaries and broken builds
|
||||
# Bug 1283438
|
||||
mk_add_options AUTOCLOBBER=
|
||||
|
||||
. "$topsrcdir/mobile/android/config/mozconfigs/common.override"
|
||||
|
@ -21,4 +21,8 @@ ac_add_options --with-branding=mobile/android/branding/beta
|
||||
|
||||
ac_add_options --disable-stdcxx-compat
|
||||
|
||||
# Don't autoclobber l10n, as this can lead to missing binaries and broken builds
|
||||
# Bug 1283438
|
||||
mk_add_options AUTOCLOBBER=
|
||||
|
||||
. "$topsrcdir/mobile/android/config/mozconfigs/common.override"
|
||||
|
@ -150,14 +150,24 @@ LoadInfo::LoadInfo(nsIPrincipal* aLoadingPrincipal,
|
||||
// do not look into the CSP if already true:
|
||||
// a CSP saying that SRI isn't needed should not
|
||||
// overrule GetVerifySignedContent
|
||||
nsCOMPtr<nsIContentSecurityPolicy> csp;
|
||||
if (aLoadingPrincipal) {
|
||||
nsCOMPtr<nsIContentSecurityPolicy> csp;
|
||||
aLoadingPrincipal->GetCsp(getter_AddRefs(csp));
|
||||
uint32_t externalType =
|
||||
nsContentUtils::InternalContentPolicyTypeToExternal(aContentPolicyType);
|
||||
// csp could be null if loading principal is system principal
|
||||
if (csp) {
|
||||
uint32_t loadType =
|
||||
nsContentUtils::InternalContentPolicyTypeToExternal(aContentPolicyType);
|
||||
csp->RequireSRIForType(loadType, &mEnforceSRI);
|
||||
csp->RequireSRIForType(externalType, &mEnforceSRI);
|
||||
}
|
||||
// if CSP is delivered via a meta tag, it's speculatively available
|
||||
// as 'preloadCSP'. If we are preloading a script or style, we have
|
||||
// to apply that speculative 'preloadCSP' for such loads.
|
||||
if (!mEnforceSRI && nsContentUtils::IsPreloadType(aContentPolicyType)) {
|
||||
nsCOMPtr<nsIContentSecurityPolicy> preloadCSP;
|
||||
aLoadingPrincipal->GetPreloadCsp(getter_AddRefs(preloadCSP));
|
||||
if (preloadCSP) {
|
||||
preloadCSP->RequireSRIForType(externalType, &mEnforceSRI);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -33,7 +33,10 @@ typedef unsigned long nsSecurityFlags;
|
||||
interface nsILoadInfo : nsISupports
|
||||
{
|
||||
/**
|
||||
* No special security flags:
|
||||
* *** DEPRECATED ***
|
||||
* No LoadInfo created within Gecko should contain this security flag.
|
||||
* Please use any of the five security flags defined underneath.
|
||||
* We only keep this security flag to provide backwards compatibilty.
|
||||
*/
|
||||
const unsigned long SEC_NORMAL = 0;
|
||||
|
||||
|
@ -11,4 +11,4 @@ task:
|
||||
- 'mkdir .\build\src'
|
||||
- 'hg share c:\builds\hg-shared\mozilla-central .\build\src'
|
||||
- 'hg pull -u -R .\build\src --rev %GECKO_HEAD_REV% %GECKO_HEAD_REPOSITORY%'
|
||||
- 'c:\mozilla-build\python\python.exe .\build\src\testing\mozharness\scripts\fx_desktop_build.py --config builds\taskcluster_firefox_{{build_name}}_{{build_type}}.py --branch {{project}} --skip-buildbot-actions --work-dir %cd:X:=x:%\build'
|
||||
- 'c:\mozilla-build\python\python.exe .\build\src\testing\mozharness\scripts\fx_desktop_build.py --config builds\taskcluster_firefox_{{build_name}}_{{build_type}}.py --branch {{project}} --skip-buildbot-actions --work-dir %cd:Z:=z:%\build'
|
||||
|
@ -12,6 +12,8 @@ XPCOMUtils.defineLazyModuleGetter(this, "PlacesUtils",
|
||||
"resource://gre/modules/PlacesUtils.jsm");
|
||||
XPCOMUtils.defineLazyModuleGetter(this, "Downloads",
|
||||
"resource://gre/modules/Downloads.jsm");
|
||||
XPCOMUtils.defineLazyModuleGetter(this, "ContextualIdentityService",
|
||||
"resource:///modules/ContextualIdentityService.jsm");
|
||||
|
||||
this.EXPORTED_SYMBOLS = ["ForgetAboutSite"];
|
||||
|
||||
@ -47,6 +49,14 @@ const Cu = Components.utils;
|
||||
this.ForgetAboutSite = {
|
||||
removeDataFromDomain: function CRH_removeDataFromDomain(aDomain)
|
||||
{
|
||||
// Get all userContextId from the ContextualIdentityService and create
|
||||
// all originAttributes.
|
||||
let oaList = [ {} ]; // init the list with the default originAttributes.
|
||||
|
||||
for (let identity of ContextualIdentityService.getIdentities()) {
|
||||
oaList.push({ userContextId: identity.userContextId});
|
||||
}
|
||||
|
||||
PlacesUtils.history.removePagesFromHost(aDomain, true);
|
||||
|
||||
// Cache
|
||||
@ -74,10 +84,13 @@ this.ForgetAboutSite = {
|
||||
// Cookies
|
||||
let cm = Cc["@mozilla.org/cookiemanager;1"].
|
||||
getService(Ci.nsICookieManager2);
|
||||
let enumerator = cm.getCookiesFromHost(aDomain, {});
|
||||
while (enumerator.hasMoreElements()) {
|
||||
let cookie = enumerator.getNext().QueryInterface(Ci.nsICookie);
|
||||
cm.remove(cookie.host, cookie.name, cookie.path, false, cookie.originAttributes);
|
||||
let enumerator;
|
||||
for (let originAttributes of oaList) {
|
||||
enumerator = cm.getCookiesFromHost(aDomain, originAttributes);
|
||||
while (enumerator.hasMoreElements()) {
|
||||
let cookie = enumerator.getNext().QueryInterface(Ci.nsICookie);
|
||||
cm.remove(cookie.host, cookie.name, cookie.path, false, cookie.originAttributes);
|
||||
}
|
||||
}
|
||||
|
||||
// EME
|
||||
@ -164,10 +177,14 @@ this.ForgetAboutSite = {
|
||||
caUtils);
|
||||
let httpURI = caUtils.makeURI("http://" + aDomain);
|
||||
let httpsURI = caUtils.makeURI("https://" + aDomain);
|
||||
let httpPrincipal = Services.scriptSecurityManager.createCodebasePrincipal(httpURI, {});
|
||||
let httpsPrincipal = Services.scriptSecurityManager.createCodebasePrincipal(httpsURI, {});
|
||||
qms.clearStoragesForPrincipal(httpPrincipal);
|
||||
qms.clearStoragesForPrincipal(httpsPrincipal);
|
||||
for (let originAttributes of oaList) {
|
||||
let httpPrincipal = Services.scriptSecurityManager
|
||||
.createCodebasePrincipal(httpURI, originAttributes);
|
||||
let httpsPrincipal = Services.scriptSecurityManager
|
||||
.createCodebasePrincipal(httpsURI, originAttributes);
|
||||
qms.clearStoragesForPrincipal(httpPrincipal);
|
||||
qms.clearStoragesForPrincipal(httpsPrincipal);
|
||||
}
|
||||
|
||||
function onContentPrefsRemovalFinished() {
|
||||
// Everybody else (including extensions)
|
||||
|
@ -1,6 +1,7 @@
|
||||
[DEFAULT]
|
||||
head = head_forgetaboutsite.js ../../../../dom/push/test/xpcshell/head.js
|
||||
tail =
|
||||
firefox-appdir = browser
|
||||
skip-if = toolkit == 'android' || toolkit == 'gonk'
|
||||
support-files =
|
||||
!/dom/push/test/xpcshell/head.js
|
||||
|
Loading…
Reference in New Issue
Block a user