mirror of
https://github.com/mozilla/gecko-dev.git
synced 2025-02-27 04:38:02 +00:00
Merge mozilla-central to autoland. a=merge CLOSED TREE
This commit is contained in:
commit
b5430ee4da
@ -19,7 +19,7 @@ function get_remote_history(browser) {
|
||||
};
|
||||
|
||||
for (let i = 0; i < sessionHistory.count; i++) {
|
||||
let entry = sessionHistory.legacySHistory.getEntryAtIndex(i, false);
|
||||
let entry = sessionHistory.legacySHistory.getEntryAtIndex(i);
|
||||
result.entries.push({
|
||||
uri: entry.URI.spec,
|
||||
title: entry.title,
|
||||
|
@ -1001,6 +1001,8 @@ BrowserGlue.prototype = {
|
||||
_beforeUIStartup: function BG__beforeUIStartup() {
|
||||
SessionStartup.init();
|
||||
|
||||
PdfJs.earlyInit();
|
||||
|
||||
// check if we're in safe mode
|
||||
if (Services.appinfo.inSafeMode) {
|
||||
Services.ww.openWindow(null, "chrome://browser/content/safeMode.xul",
|
||||
|
@ -30,8 +30,8 @@ add_task(async function test() {
|
||||
await promiseTabState(tab, state);
|
||||
await ContentTask.spawn(tab.linkedBrowser, null, function() {
|
||||
function compareEntries(i, j, history) {
|
||||
let e1 = history.getEntryAtIndex(i, false);
|
||||
let e2 = history.getEntryAtIndex(j, false);
|
||||
let e1 = history.getEntryAtIndex(i);
|
||||
let e2 = history.getEntryAtIndex(j);
|
||||
|
||||
ok(e1.sharesDocumentWith(e2),
|
||||
`${i} should share doc with ${j}`);
|
||||
|
@ -24,7 +24,7 @@ function test() {
|
||||
|
||||
promiseTabState(tab, tabState).then(() => {
|
||||
let sessionHistory = browser.sessionHistory;
|
||||
let entry = sessionHistory.legacySHistory.getEntryAtIndex(0, false);
|
||||
let entry = sessionHistory.legacySHistory.getEntryAtIndex(0);
|
||||
|
||||
whenChildCount(entry, 1, function() {
|
||||
whenChildCount(entry, 2, function() {
|
||||
|
@ -24,13 +24,13 @@ function test() {
|
||||
|
||||
promiseTabState(tab, tabState).then(() => {
|
||||
let sessionHistory = browser.sessionHistory;
|
||||
let entry = sessionHistory.legacySHistory.getEntryAtIndex(0, false);
|
||||
let entry = sessionHistory.legacySHistory.getEntryAtIndex(0);
|
||||
|
||||
whenChildCount(entry, 1, function() {
|
||||
whenChildCount(entry, 2, function() {
|
||||
promiseBrowserLoaded(browser).then(() => {
|
||||
let newSessionHistory = browser.sessionHistory;
|
||||
let newEntry = newSessionHistory.legacySHistory.getEntryAtIndex(0, false);
|
||||
let newEntry = newSessionHistory.legacySHistory.getEntryAtIndex(0);
|
||||
|
||||
whenChildCount(newEntry, 0, function() {
|
||||
// Make sure that we reset the state.
|
||||
|
@ -7,7 +7,7 @@ add_task(async function duplicateTab() {
|
||||
await ContentTask.spawn(tab.linkedBrowser, null, function() {
|
||||
let docshell = content.window.docShell
|
||||
.QueryInterface(Ci.nsIWebNavigation);
|
||||
let shEntry = docshell.sessionHistory.legacySHistory.getEntryAtIndex(0, false);
|
||||
let shEntry = docshell.sessionHistory.legacySHistory.getEntryAtIndex(0);
|
||||
is(shEntry.docshellID.toString(), docshell.historyID.toString());
|
||||
});
|
||||
|
||||
@ -17,7 +17,7 @@ add_task(async function duplicateTab() {
|
||||
await ContentTask.spawn(tab2.linkedBrowser, null, function() {
|
||||
let docshell = content.window.docShell
|
||||
.QueryInterface(Ci.nsIWebNavigation);
|
||||
let shEntry = docshell.sessionHistory.legacySHistory.getEntryAtIndex(0, false);
|
||||
let shEntry = docshell.sessionHistory.legacySHistory.getEntryAtIndex(0);
|
||||
is(shEntry.docshellID.toString(), docshell.historyID.toString());
|
||||
});
|
||||
|
||||
@ -36,7 +36,7 @@ add_task(async function contentToChromeNavigate() {
|
||||
.QueryInterface(Ci.nsIWebNavigation);
|
||||
let sh = docshell.sessionHistory;
|
||||
is(sh.count, 1);
|
||||
is(sh.legacySHistory.getEntryAtIndex(0, false).docshellID.toString(), docshell.historyID.toString());
|
||||
is(sh.legacySHistory.getEntryAtIndex(0).docshellID.toString(), docshell.historyID.toString());
|
||||
});
|
||||
|
||||
// Force the browser to navigate to the chrome process.
|
||||
@ -54,8 +54,8 @@ add_task(async function contentToChromeNavigate() {
|
||||
let sh = docShell.QueryInterface(Ci.nsIWebNavigation).sessionHistory;
|
||||
|
||||
is(sh.count, 2);
|
||||
is(sh.legacySHistory.getEntryAtIndex(0, false).docshellID.toString(), docShell.historyID.toString());
|
||||
is(sh.legacySHistory.getEntryAtIndex(1, false).docshellID.toString(), docShell.historyID.toString());
|
||||
is(sh.legacySHistory.getEntryAtIndex(0).docshellID.toString(), docShell.historyID.toString());
|
||||
is(sh.legacySHistory.getEntryAtIndex(1).docshellID.toString(), docShell.historyID.toString());
|
||||
|
||||
BrowserTestUtils.removeTab(tab);
|
||||
});
|
||||
|
@ -30,7 +30,7 @@ add_task(async function check_history_not_persisted() {
|
||||
.getInterface(Ci.nsISHistory);
|
||||
|
||||
is(sessionHistory.count, 1, "Should be a single history entry");
|
||||
is(sessionHistory.getEntryAtIndex(0, false).URI.spec, "about:blank", "Should be the right URL");
|
||||
is(sessionHistory.getEntryAtIndex(0).URI.spec, "about:blank", "Should be the right URL");
|
||||
});
|
||||
|
||||
// Load a new URL into the tab, it should replace the about:blank history entry
|
||||
@ -40,7 +40,7 @@ add_task(async function check_history_not_persisted() {
|
||||
let sessionHistory = docShell.QueryInterface(Ci.nsIInterfaceRequestor)
|
||||
.getInterface(Ci.nsISHistory);
|
||||
is(sessionHistory.count, 1, "Should be a single history entry");
|
||||
is(sessionHistory.getEntryAtIndex(0, false).URI.spec, "about:robots", "Should be the right URL");
|
||||
is(sessionHistory.getEntryAtIndex(0).URI.spec, "about:robots", "Should be the right URL");
|
||||
});
|
||||
|
||||
// Cleanup.
|
||||
@ -73,7 +73,7 @@ add_task(async function check_history_default_persisted() {
|
||||
.getInterface(Ci.nsISHistory);
|
||||
|
||||
is(sessionHistory.count, 1, "Should be a single history entry");
|
||||
is(sessionHistory.getEntryAtIndex(0, false).URI.spec, "about:blank", "Should be the right URL");
|
||||
is(sessionHistory.getEntryAtIndex(0).URI.spec, "about:blank", "Should be the right URL");
|
||||
});
|
||||
|
||||
// Load a new URL into the tab, it should replace the about:blank history entry
|
||||
@ -83,8 +83,8 @@ add_task(async function check_history_default_persisted() {
|
||||
let sessionHistory = docShell.QueryInterface(Ci.nsIInterfaceRequestor)
|
||||
.getInterface(Ci.nsISHistory);
|
||||
is(sessionHistory.count, 2, "Should be two history entries");
|
||||
is(sessionHistory.getEntryAtIndex(0, false).URI.spec, "about:blank", "Should be the right URL");
|
||||
is(sessionHistory.getEntryAtIndex(1, false).URI.spec, "about:robots", "Should be the right URL");
|
||||
is(sessionHistory.getEntryAtIndex(0).URI.spec, "about:blank", "Should be the right URL");
|
||||
is(sessionHistory.getEntryAtIndex(1).URI.spec, "about:robots", "Should be the right URL");
|
||||
});
|
||||
|
||||
// Cleanup.
|
||||
|
@ -107,6 +107,10 @@ var PdfJs = {
|
||||
Services.ppmm.sharedData.set("pdfjs.enabled", this.checkEnabled());
|
||||
},
|
||||
|
||||
earlyInit() {
|
||||
Services.ppmm.sharedData.set("pdfjs.enabled", this.checkEnabled());
|
||||
},
|
||||
|
||||
initPrefs: function initPrefs() {
|
||||
if (this._initialized) {
|
||||
return;
|
||||
|
@ -217,6 +217,8 @@ def build_one_stage(cc, cxx, asm, ld, ar, ranlib, libtool,
|
||||
"-DLLVM_TOOL_LIBCXX_BUILD=%s" % ("ON" if build_libcxx else "OFF"),
|
||||
"-DLIBCXX_LIBCPPABI_VERSION=\"\"",
|
||||
]
|
||||
if is_linux():
|
||||
cmake_args += ["-DLLVM_BINUTILS_INCDIR=%s/include" % gcc_dir]
|
||||
if is_windows():
|
||||
cmake_args.insert(-1, "-DLLVM_EXPORT_SYMBOLS_FOR_PLUGINS=ON")
|
||||
cmake_args.insert(-1, "-DLLVM_USE_CRT_RELEASE=MT")
|
||||
|
@ -322,8 +322,8 @@ nsDocShell::nsDocShell()
|
||||
, mMarginWidth(-1)
|
||||
, mMarginHeight(-1)
|
||||
, mItemType(typeContent)
|
||||
, mPreviousTransIndex(-1)
|
||||
, mLoadedTransIndex(-1)
|
||||
, mPreviousEntryIndex(-1)
|
||||
, mLoadedEntryIndex(-1)
|
||||
, mChildOffset(0)
|
||||
, mSandboxFlags(0)
|
||||
, mBusyFlags(BUSY_FLAGS_NONE)
|
||||
@ -2286,16 +2286,16 @@ nsDocShell::SetUseErrorPages(bool aUseErrorPages)
|
||||
}
|
||||
|
||||
NS_IMETHODIMP
|
||||
nsDocShell::GetPreviousTransIndex(int32_t* aPreviousTransIndex)
|
||||
nsDocShell::GetPreviousEntryIndex(int32_t* aPreviousEntryIndex)
|
||||
{
|
||||
*aPreviousTransIndex = mPreviousTransIndex;
|
||||
*aPreviousEntryIndex = mPreviousEntryIndex;
|
||||
return NS_OK;
|
||||
}
|
||||
|
||||
NS_IMETHODIMP
|
||||
nsDocShell::GetLoadedTransIndex(int32_t* aLoadedTransIndex)
|
||||
nsDocShell::GetLoadedEntryIndex(int32_t* aLoadedEntryIndex)
|
||||
{
|
||||
*aLoadedTransIndex = mLoadedTransIndex;
|
||||
*aLoadedEntryIndex = mLoadedEntryIndex;
|
||||
return NS_OK;
|
||||
}
|
||||
|
||||
@ -2307,8 +2307,8 @@ nsDocShell::HistoryPurged(int32_t aNumEntries)
|
||||
// eviction. We need to adjust by the number of entries that we
|
||||
// just purged from history, so that we look at the right session history
|
||||
// entries during eviction.
|
||||
mPreviousTransIndex = std::max(-1, mPreviousTransIndex - aNumEntries);
|
||||
mLoadedTransIndex = std::max(0, mLoadedTransIndex - aNumEntries);
|
||||
mPreviousEntryIndex = std::max(-1, mPreviousEntryIndex - aNumEntries);
|
||||
mLoadedEntryIndex = std::max(0, mLoadedEntryIndex - aNumEntries);
|
||||
|
||||
nsTObserverArray<nsDocLoader*>::ForwardIterator iter(mChildList);
|
||||
while (iter.HasMore()) {
|
||||
@ -2322,29 +2322,29 @@ nsDocShell::HistoryPurged(int32_t aNumEntries)
|
||||
}
|
||||
|
||||
nsresult
|
||||
nsDocShell::HistoryTransactionRemoved(int32_t aIndex)
|
||||
nsDocShell::HistoryEntryRemoved(int32_t aIndex)
|
||||
{
|
||||
// These indices are used for fastback cache eviction, to determine
|
||||
// which session history entries are candidates for content viewer
|
||||
// eviction. We need to adjust by the number of entries that we
|
||||
// just purged from history, so that we look at the right session history
|
||||
// entries during eviction.
|
||||
if (aIndex == mPreviousTransIndex) {
|
||||
mPreviousTransIndex = -1;
|
||||
} else if (aIndex < mPreviousTransIndex) {
|
||||
--mPreviousTransIndex;
|
||||
if (aIndex == mPreviousEntryIndex) {
|
||||
mPreviousEntryIndex = -1;
|
||||
} else if (aIndex < mPreviousEntryIndex) {
|
||||
--mPreviousEntryIndex;
|
||||
}
|
||||
if (mLoadedTransIndex == aIndex) {
|
||||
mLoadedTransIndex = 0;
|
||||
} else if (aIndex < mLoadedTransIndex) {
|
||||
--mLoadedTransIndex;
|
||||
if (mLoadedEntryIndex == aIndex) {
|
||||
mLoadedEntryIndex = 0;
|
||||
} else if (aIndex < mLoadedEntryIndex) {
|
||||
--mLoadedEntryIndex;
|
||||
}
|
||||
|
||||
nsTObserverArray<nsDocLoader*>::ForwardIterator iter(mChildList);
|
||||
while (iter.HasMore()) {
|
||||
nsCOMPtr<nsIDocShell> shell = do_QueryObject(iter.GetNext());
|
||||
if (shell) {
|
||||
static_cast<nsDocShell*>(shell.get())->HistoryTransactionRemoved(aIndex);
|
||||
static_cast<nsDocShell*>(shell.get())->HistoryEntryRemoved(aIndex);
|
||||
}
|
||||
}
|
||||
|
||||
@ -3824,7 +3824,7 @@ nsDocShell::AddChildSHEntryInternal(nsISHEntry* aCloneRef,
|
||||
}
|
||||
|
||||
rv = mSessionHistory->LegacySHistory()->GetEntryAtIndex(
|
||||
index, false, getter_AddRefs(currentHE));
|
||||
index, getter_AddRefs(currentHE));
|
||||
NS_ENSURE_TRUE(currentHE, NS_ERROR_FAILURE);
|
||||
|
||||
nsCOMPtr<nsISHEntry> currentEntry(do_QueryInterface(currentHE));
|
||||
@ -3866,7 +3866,7 @@ nsDocShell::AddChildSHEntryToParent(nsISHEntry* aNewEntry, int32_t aChildOffset,
|
||||
// current index by 1
|
||||
RefPtr<ChildSHistory> rootSH = GetRootSessionHistory();
|
||||
if (rootSH) {
|
||||
mPreviousTransIndex = rootSH->Index();
|
||||
mPreviousEntryIndex = rootSH->Index();
|
||||
}
|
||||
|
||||
nsresult rv;
|
||||
@ -3877,10 +3877,10 @@ nsDocShell::AddChildSHEntryToParent(nsISHEntry* aNewEntry, int32_t aChildOffset,
|
||||
}
|
||||
|
||||
if (rootSH) {
|
||||
mLoadedTransIndex = rootSH->Index();
|
||||
mLoadedEntryIndex = rootSH->Index();
|
||||
#ifdef DEBUG_PAGE_CACHE
|
||||
printf("Previous index: %d, Loaded index: %d\n\n", mPreviousTransIndex,
|
||||
mLoadedTransIndex);
|
||||
printf("Previous index: %d, Loaded index: %d\n\n", mPreviousEntryIndex,
|
||||
mLoadedEntryIndex);
|
||||
#endif
|
||||
}
|
||||
|
||||
@ -8120,12 +8120,12 @@ nsDocShell::RestoreFromHistory()
|
||||
mURIResultedInDocument = true;
|
||||
RefPtr<ChildSHistory> rootSH = GetRootSessionHistory();
|
||||
if (rootSH) {
|
||||
mPreviousTransIndex = rootSH->Index();
|
||||
mPreviousEntryIndex = rootSH->Index();
|
||||
rootSH->LegacySHistory()->UpdateIndex();
|
||||
mLoadedTransIndex = rootSH->Index();
|
||||
mLoadedEntryIndex = rootSH->Index();
|
||||
#ifdef DEBUG_PAGE_CACHE
|
||||
printf("Previous index: %d, Loaded index: %d\n\n", mPreviousTransIndex,
|
||||
mLoadedTransIndex);
|
||||
printf("Previous index: %d, Loaded index: %d\n\n", mPreviousEntryIndex,
|
||||
mLoadedEntryIndex);
|
||||
#endif
|
||||
}
|
||||
|
||||
@ -8652,7 +8652,7 @@ nsDocShell::CreateContentViewer(const nsACString& aContentType,
|
||||
idx = mSessionHistory->Index();
|
||||
}
|
||||
mSessionHistory->LegacySHistory()->
|
||||
GetEntryAtIndex(idx, false, getter_AddRefs(mLSHE));
|
||||
GetEntryAtIndex(idx, getter_AddRefs(mLSHE));
|
||||
}
|
||||
|
||||
mLoadType = LOAD_ERROR_PAGE;
|
||||
@ -10076,7 +10076,7 @@ nsDocShell::InternalLoad(nsIURI* aURI,
|
||||
int32_t index = mSessionHistory->Index();
|
||||
nsCOMPtr<nsISHEntry> shEntry;
|
||||
mSessionHistory->LegacySHistory()->GetEntryAtIndex(
|
||||
index, false, getter_AddRefs(shEntry));
|
||||
index, getter_AddRefs(shEntry));
|
||||
NS_ENSURE_TRUE(shEntry, NS_ERROR_FAILURE);
|
||||
shEntry->SetTitle(mTitle);
|
||||
}
|
||||
@ -11573,7 +11573,7 @@ nsDocShell::OnNewURI(nsIURI* aURI, nsIChannel* aChannel,
|
||||
}
|
||||
nsCOMPtr<nsISHEntry> currentSH;
|
||||
mSessionHistory->LegacySHistory()->GetEntryAtIndex(
|
||||
index, false, getter_AddRefs(currentSH));
|
||||
index, getter_AddRefs(currentSH));
|
||||
if (currentSH != mLSHE) {
|
||||
mSessionHistory->LegacySHistory()->ReplaceEntry(index, mLSHE);
|
||||
}
|
||||
@ -11607,12 +11607,12 @@ nsDocShell::OnNewURI(nsIURI* aURI, nsIChannel* aChannel,
|
||||
if (rootSH &&
|
||||
((mLoadType & (LOAD_CMD_HISTORY | LOAD_CMD_RELOAD)) ||
|
||||
mLoadType == LOAD_NORMAL_REPLACE)) {
|
||||
mPreviousTransIndex = rootSH->Index();
|
||||
mPreviousEntryIndex = rootSH->Index();
|
||||
rootSH->LegacySHistory()->UpdateIndex();
|
||||
mLoadedTransIndex = rootSH->Index();
|
||||
mLoadedEntryIndex = rootSH->Index();
|
||||
#ifdef DEBUG_PAGE_CACHE
|
||||
printf("Previous index: %d, Loaded index: %d\n\n",
|
||||
mPreviousTransIndex, mLoadedTransIndex);
|
||||
mPreviousEntryIndex, mLoadedEntryIndex);
|
||||
#endif
|
||||
}
|
||||
|
||||
@ -12254,14 +12254,14 @@ nsDocShell::AddToSessionHistory(nsIURI* aURI, nsIChannel* aChannel,
|
||||
|
||||
if (addToSHistory) {
|
||||
// Add to session history
|
||||
mPreviousTransIndex = mSessionHistory->Index();
|
||||
mPreviousEntryIndex = mSessionHistory->Index();
|
||||
|
||||
bool shouldPersist = ShouldAddToSessionHistory(aURI, aChannel);
|
||||
rv = mSessionHistory->LegacySHistory()->AddEntry(entry, shouldPersist);
|
||||
mLoadedTransIndex = mSessionHistory->Index();
|
||||
mLoadedEntryIndex = mSessionHistory->Index();
|
||||
#ifdef DEBUG_PAGE_CACHE
|
||||
printf("Previous index: %d, Loaded index: %d\n\n",
|
||||
mPreviousTransIndex, mLoadedTransIndex);
|
||||
mPreviousEntryIndex, mLoadedEntryIndex);
|
||||
#endif
|
||||
}
|
||||
} else {
|
||||
|
@ -254,7 +254,7 @@ public:
|
||||
LOCATION_CHANGE_SAME_DOCUMENT);
|
||||
}
|
||||
|
||||
nsresult HistoryTransactionRemoved(int32_t aIndex);
|
||||
nsresult HistoryEntryRemoved(int32_t aIndex);
|
||||
|
||||
// Notify Scroll observers when an async panning/zooming transform
|
||||
// has started being applied
|
||||
@ -1031,11 +1031,11 @@ private: // data members
|
||||
// Create() is called, the type is not expected to change.
|
||||
int32_t mItemType;
|
||||
|
||||
// Index into the SHTransaction list, indicating the previous and current
|
||||
// transaction at the time that this DocShell begins to load. Consequently
|
||||
// Index into the nsISHEntry array, indicating the previous and current
|
||||
// entry at the time that this DocShell begins to load. Consequently
|
||||
// root docshell's indices can differ from child docshells'.
|
||||
int32_t mPreviousTransIndex;
|
||||
int32_t mLoadedTransIndex;
|
||||
int32_t mPreviousEntryIndex;
|
||||
int32_t mLoadedEntryIndex;
|
||||
|
||||
// Offset in the parent's child list.
|
||||
// -1 if the docshell is added dynamically to the parent shell.
|
||||
|
@ -539,12 +539,12 @@ interface nsIDocShell : nsIDocShellTreeItem
|
||||
readonly attribute nsIChannel failedChannel;
|
||||
|
||||
/**
|
||||
* Keeps track of the previous SHTransaction index and the current
|
||||
* SHTransaction index at the time that the doc shell begins to load.
|
||||
* Keeps track of the previous nsISHEntry index and the current
|
||||
* nsISHEntry index at the time that the doc shell begins to load.
|
||||
* Used for ContentViewer eviction.
|
||||
*/
|
||||
readonly attribute long previousTransIndex;
|
||||
readonly attribute long loadedTransIndex;
|
||||
readonly attribute long previousEntryIndex;
|
||||
readonly attribute long loadedEntryIndex;
|
||||
|
||||
/**
|
||||
* Notification that entries have been removed from the beginning of a
|
||||
|
@ -9,7 +9,6 @@ XPIDL_SOURCES += [
|
||||
'nsISHEntry.idl',
|
||||
'nsISHistory.idl',
|
||||
'nsISHistoryListener.idl',
|
||||
'nsISHTransaction.idl',
|
||||
]
|
||||
|
||||
XPIDL_MODULE = 'shistory'
|
||||
@ -28,7 +27,6 @@ UNIFIED_SOURCES += [
|
||||
'nsSHEntry.cpp',
|
||||
'nsSHEntryShared.cpp',
|
||||
'nsSHistory.cpp',
|
||||
'nsSHTransaction.cpp',
|
||||
'ParentSHistory.cpp',
|
||||
]
|
||||
|
||||
|
@ -31,7 +31,7 @@ class nsSHEntryShared;
|
||||
[ptr] native nsDocShellEditorDataPtr(nsDocShellEditorData);
|
||||
[ptr] native nsSHEntryShared(nsSHEntryShared);
|
||||
|
||||
[scriptable, uuid(0dad26b8-a259-42c7-93f1-2fa7fc076e45)]
|
||||
[builtinclass, scriptable, uuid(0dad26b8-a259-42c7-93f1-2fa7fc076e45)]
|
||||
interface nsISHEntry : nsISupports
|
||||
{
|
||||
/**
|
||||
@ -400,6 +400,13 @@ interface nsISHEntry : nsISupports
|
||||
* @throw if nothing was replaced.
|
||||
*/
|
||||
void ReplaceChild(in nsISHEntry aNewChild);
|
||||
|
||||
/**
|
||||
* When an entry is serving is within nsISHistory's array of entries, this
|
||||
* property specifies if it should persist. If not it will be replaced by
|
||||
* new additions to the list.
|
||||
*/
|
||||
[infallible] attribute boolean persist;
|
||||
};
|
||||
|
||||
%{ C++
|
||||
|
@ -1,24 +0,0 @@
|
||||
/* -*- Mode: IDL; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
|
||||
/* This Source Code Form is subject to the terms of the Mozilla Public
|
||||
* License, v. 2.0. If a copy of the MPL was not distributed with this
|
||||
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
|
||||
|
||||
#include "nsISupports.idl"
|
||||
|
||||
interface nsISHEntry;
|
||||
|
||||
[scriptable, uuid(2EDF705F-D252-4971-9F09-71DD0F760DC6)]
|
||||
interface nsISHTransaction : nsISupports
|
||||
{
|
||||
/**
|
||||
* The nsISHEntry for the current transaction.
|
||||
*/
|
||||
attribute nsISHEntry sHEntry;
|
||||
|
||||
/**
|
||||
* Specifies if this transaction should persist. If not it will be replaced
|
||||
* by new additions to the list.
|
||||
*/
|
||||
attribute boolean persist;
|
||||
};
|
||||
|
@ -9,7 +9,6 @@ interface nsIBFCacheEntry;
|
||||
interface nsIDocShell;
|
||||
interface nsISHEntry;
|
||||
interface nsISHistoryListener;
|
||||
interface nsISHTransaction;
|
||||
interface nsIURI;
|
||||
|
||||
%{C++
|
||||
@ -49,10 +48,9 @@ interface nsISHistory: nsISupports
|
||||
readonly attribute long count;
|
||||
|
||||
/**
|
||||
* A readonly property of the interface that returns
|
||||
* the index of the current document in session history.
|
||||
* The index of the current document in session history.
|
||||
*/
|
||||
readonly attribute long index;
|
||||
attribute long index;
|
||||
|
||||
/**
|
||||
* A readonly property of the interface that returns
|
||||
@ -70,21 +68,13 @@ interface nsISHistory: nsISupports
|
||||
attribute long maxLength;
|
||||
|
||||
/**
|
||||
* Called to obtain handle to the history entry at a
|
||||
* given index.
|
||||
* Get the history entry at a given index. Returns non-null on success.
|
||||
*
|
||||
* @param index The index value whose entry is requested.
|
||||
* The oldest entry is located at index == 0.
|
||||
* @param modifyIndex A boolean flag that indicates if the current
|
||||
* index of session history should be modified
|
||||
* to the parameter index.
|
||||
*
|
||||
* @return <code>NS_OK</code> history entry for
|
||||
* the index is obtained successfully.
|
||||
* <code>NS_ERROR_FAILURE</code> Error in obtaining
|
||||
* history entry for the given index.
|
||||
* @return The found entry; never null.
|
||||
*/
|
||||
nsISHEntry getEntryAtIndex(in long aIndex, in boolean aModifyIndex);
|
||||
nsISHEntry getEntryAtIndex(in long aIndex);
|
||||
|
||||
/**
|
||||
* Called to purge older documents from history.
|
||||
@ -158,11 +148,6 @@ interface nsISHistory: nsISupports
|
||||
*/
|
||||
void addEntry(in nsISHEntry aEntry, in boolean aPersist);
|
||||
|
||||
/**
|
||||
* Get the transaction at a particular index.
|
||||
*/
|
||||
nsISHTransaction GetTransactionAtIndex(in int32_t aIndex);
|
||||
|
||||
/**
|
||||
* Sets the toplevel docshell object to which this SHistory object belongs to.
|
||||
*/
|
||||
|
@ -38,6 +38,7 @@ nsSHEntry::nsSHEntry()
|
||||
, mIsSrcdocEntry(false)
|
||||
, mScrollRestorationIsManual(false)
|
||||
, mLoadedInThisProcess(false)
|
||||
, mPersist(true)
|
||||
{
|
||||
}
|
||||
|
||||
@ -1001,3 +1002,20 @@ nsSHEntry::SetAsHistoryLoad()
|
||||
mLoadType = LOAD_HISTORY;
|
||||
return NS_OK;
|
||||
}
|
||||
|
||||
NS_IMETHODIMP
|
||||
nsSHEntry::GetPersist(bool* aPersist)
|
||||
{
|
||||
NS_ENSURE_ARG_POINTER(aPersist);
|
||||
|
||||
*aPersist = mPersist;
|
||||
return NS_OK;
|
||||
}
|
||||
|
||||
NS_IMETHODIMP
|
||||
nsSHEntry::SetPersist(bool aPersist)
|
||||
{
|
||||
mPersist = aPersist;
|
||||
return NS_OK;
|
||||
}
|
||||
|
||||
|
@ -61,6 +61,7 @@ private:
|
||||
bool mIsSrcdocEntry;
|
||||
bool mScrollRestorationIsManual;
|
||||
bool mLoadedInThisProcess;
|
||||
bool mPersist;
|
||||
};
|
||||
|
||||
#endif /* nsSHEntry_h */
|
||||
|
@ -1,57 +0,0 @@
|
||||
/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
|
||||
/* vim: set ts=8 sts=2 et sw=2 tw=80: */
|
||||
/* This Source Code Form is subject to the terms of the Mozilla Public
|
||||
* License, v. 2.0. If a copy of the MPL was not distributed with this
|
||||
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
|
||||
|
||||
#include "nsSHTransaction.h"
|
||||
#include "nsISHEntry.h"
|
||||
|
||||
nsSHTransaction::nsSHTransaction()
|
||||
: mPersist(true)
|
||||
{
|
||||
}
|
||||
|
||||
nsSHTransaction::~nsSHTransaction()
|
||||
{
|
||||
}
|
||||
|
||||
NS_IMPL_ADDREF(nsSHTransaction)
|
||||
NS_IMPL_RELEASE(nsSHTransaction)
|
||||
|
||||
NS_INTERFACE_MAP_BEGIN(nsSHTransaction)
|
||||
NS_INTERFACE_MAP_ENTRY_AMBIGUOUS(nsISupports, nsISHTransaction)
|
||||
NS_INTERFACE_MAP_ENTRY(nsISHTransaction)
|
||||
NS_INTERFACE_MAP_END
|
||||
|
||||
NS_IMETHODIMP
|
||||
nsSHTransaction::GetSHEntry(nsISHEntry** aResult)
|
||||
{
|
||||
NS_ENSURE_ARG_POINTER(aResult);
|
||||
*aResult = mSHEntry;
|
||||
NS_IF_ADDREF(*aResult);
|
||||
return NS_OK;
|
||||
}
|
||||
|
||||
NS_IMETHODIMP
|
||||
nsSHTransaction::SetSHEntry(nsISHEntry* aSHEntry)
|
||||
{
|
||||
mSHEntry = aSHEntry;
|
||||
return NS_OK;
|
||||
}
|
||||
|
||||
NS_IMETHODIMP
|
||||
nsSHTransaction::SetPersist(bool aPersist)
|
||||
{
|
||||
mPersist = aPersist;
|
||||
return NS_OK;
|
||||
}
|
||||
|
||||
NS_IMETHODIMP
|
||||
nsSHTransaction::GetPersist(bool* aPersist)
|
||||
{
|
||||
NS_ENSURE_ARG_POINTER(aPersist);
|
||||
|
||||
*aPersist = mPersist;
|
||||
return NS_OK;
|
||||
}
|
@ -1,31 +0,0 @@
|
||||
/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
|
||||
/* vim: set ts=8 sts=2 et sw=2 tw=80: */
|
||||
/* This Source Code Form is subject to the terms of the Mozilla Public
|
||||
* License, v. 2.0. If a copy of the MPL was not distributed with this
|
||||
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
|
||||
|
||||
#ifndef nsSHTransaction_h
|
||||
#define nsSHTransaction_h
|
||||
|
||||
#include "nsCOMPtr.h"
|
||||
#include "nsISHTransaction.h"
|
||||
|
||||
class nsISHEntry;
|
||||
|
||||
class nsSHTransaction : public nsISHTransaction
|
||||
{
|
||||
public:
|
||||
NS_DECL_ISUPPORTS
|
||||
NS_DECL_NSISHTRANSACTION
|
||||
|
||||
nsSHTransaction();
|
||||
|
||||
protected:
|
||||
virtual ~nsSHTransaction();
|
||||
|
||||
protected:
|
||||
nsCOMPtr<nsISHEntry> mSHEntry;
|
||||
bool mPersist;
|
||||
};
|
||||
|
||||
#endif /* nsSHTransaction_h */
|
@ -19,7 +19,6 @@
|
||||
#include "nsIObserverService.h"
|
||||
#include "nsISHEntry.h"
|
||||
#include "nsISHistoryListener.h"
|
||||
#include "nsSHTransaction.h"
|
||||
#include "nsIURI.h"
|
||||
#include "nsNetUtil.h"
|
||||
#include "nsTArray.h"
|
||||
@ -185,32 +184,24 @@ nsSHistoryObserver::Observe(nsISupports* aSubject, const char* aTopic,
|
||||
namespace {
|
||||
|
||||
already_AddRefed<nsIContentViewer>
|
||||
GetContentViewerForTransaction(nsISHTransaction* aTrans)
|
||||
GetContentViewerForEntry(nsISHEntry* aEntry)
|
||||
{
|
||||
nsCOMPtr<nsISHEntry> entry;
|
||||
aTrans->GetSHEntry(getter_AddRefs(entry));
|
||||
if (!entry) {
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
nsCOMPtr<nsISHEntry> ownerEntry;
|
||||
nsCOMPtr<nsIContentViewer> viewer;
|
||||
entry->GetAnyContentViewer(getter_AddRefs(ownerEntry),
|
||||
getter_AddRefs(viewer));
|
||||
aEntry->GetAnyContentViewer(getter_AddRefs(ownerEntry),
|
||||
getter_AddRefs(viewer));
|
||||
return viewer.forget();
|
||||
}
|
||||
|
||||
} // namespace
|
||||
|
||||
void
|
||||
nsSHistory::EvictContentViewerForTransaction(nsISHTransaction* aTrans)
|
||||
nsSHistory::EvictContentViewerForEntry(nsISHEntry* aEntry)
|
||||
{
|
||||
nsCOMPtr<nsISHEntry> entry;
|
||||
aTrans->GetSHEntry(getter_AddRefs(entry));
|
||||
nsCOMPtr<nsIContentViewer> viewer;
|
||||
nsCOMPtr<nsISHEntry> ownerEntry;
|
||||
entry->GetAnyContentViewer(getter_AddRefs(ownerEntry),
|
||||
getter_AddRefs(viewer));
|
||||
aEntry->GetAnyContentViewer(getter_AddRefs(ownerEntry),
|
||||
getter_AddRefs(viewer));
|
||||
if (viewer) {
|
||||
NS_ASSERTION(ownerEntry, "Content viewer exists but its SHEntry is null");
|
||||
|
||||
@ -228,9 +219,9 @@ nsSHistory::EvictContentViewerForTransaction(nsISHTransaction* aTrans)
|
||||
|
||||
// When dropping bfcache, we have to remove associated dynamic entries as well.
|
||||
int32_t index = -1;
|
||||
GetIndexOfEntry(entry, &index);
|
||||
GetIndexOfEntry(aEntry, &index);
|
||||
if (index != -1) {
|
||||
RemoveDynEntries(index, entry);
|
||||
RemoveDynEntries(index, aEntry);
|
||||
}
|
||||
}
|
||||
|
||||
@ -616,6 +607,12 @@ nsSHistory::AddEntry(nsISHEntry* aSHEntry, bool aPersist)
|
||||
return NS_ERROR_FAILURE;
|
||||
}
|
||||
|
||||
nsCOMPtr<nsISHEntry> currentTxn;
|
||||
if (mIndex >= 0) {
|
||||
nsresult rv = GetEntryAtIndex(mIndex, getter_AddRefs(currentTxn));
|
||||
NS_ENSURE_SUCCESS(rv, rv);
|
||||
}
|
||||
|
||||
aSHEntry->SetSHistory(this);
|
||||
|
||||
// If we have a root docshell, update the docshell id of the root shentry to
|
||||
@ -625,39 +622,23 @@ nsSHistory::AddEntry(nsISHEntry* aSHEntry, bool aPersist)
|
||||
aSHEntry->SetDocshellID(&docshellID);
|
||||
}
|
||||
|
||||
nsCOMPtr<nsISHTransaction> currentTxn;
|
||||
GetTransactionAtIndex(mIndex, getter_AddRefs(currentTxn));
|
||||
|
||||
bool currentPersist = true;
|
||||
if (currentTxn) {
|
||||
currentTxn->GetPersist(¤tPersist);
|
||||
}
|
||||
|
||||
int32_t currentIndex = mIndex;
|
||||
|
||||
if (!currentPersist) {
|
||||
NOTIFY_LISTENERS(OnHistoryReplaceEntry, (currentIndex));
|
||||
NS_ENSURE_SUCCESS(currentTxn->SetSHEntry(aSHEntry), NS_ERROR_FAILURE);
|
||||
currentTxn->SetPersist(aPersist);
|
||||
if (currentTxn && !currentTxn->GetPersist()) {
|
||||
NOTIFY_LISTENERS(OnHistoryReplaceEntry, (mIndex));
|
||||
aSHEntry->SetPersist(aPersist);
|
||||
mEntries[mIndex] = aSHEntry;
|
||||
return NS_OK;
|
||||
}
|
||||
|
||||
nsCOMPtr<nsIURI> uri;
|
||||
aSHEntry->GetURI(getter_AddRefs(uri));
|
||||
NOTIFY_LISTENERS(OnHistoryNewEntry, (uri, currentIndex));
|
||||
NOTIFY_LISTENERS(OnHistoryNewEntry, (uri, mIndex));
|
||||
|
||||
// Note that a listener may have changed mIndex. So use mIndex instead of
|
||||
// currentIndex.
|
||||
|
||||
nsCOMPtr<nsISHTransaction> txn = new nsSHTransaction();
|
||||
txn->SetPersist(aPersist);
|
||||
txn->SetSHEntry(aSHEntry);
|
||||
|
||||
// Remove all transactions after the current one, add the new one, and set
|
||||
// the new one as the current one.
|
||||
// Remove all entries after the current one, add the new one, and set the new
|
||||
// one as the current one.
|
||||
MOZ_ASSERT(mIndex >= -1);
|
||||
mTransactions.TruncateLength(mIndex + 1);
|
||||
mTransactions.AppendElement(txn);
|
||||
aSHEntry->SetPersist(aPersist);
|
||||
mEntries.TruncateLength(mIndex + 1);
|
||||
mEntries.AppendElement(aSHEntry);
|
||||
mIndex++;
|
||||
|
||||
NOTIFY_LISTENERS(OnLengthChanged, (Length()));
|
||||
@ -680,7 +661,6 @@ nsSHistory::GetCount(int32_t* aResult)
|
||||
return NS_OK;
|
||||
}
|
||||
|
||||
/* Get index of the history list */
|
||||
NS_IMETHODIMP
|
||||
nsSHistory::GetIndex(int32_t* aResult)
|
||||
{
|
||||
@ -689,6 +669,19 @@ nsSHistory::GetIndex(int32_t* aResult)
|
||||
return NS_OK;
|
||||
}
|
||||
|
||||
NS_IMETHODIMP
|
||||
nsSHistory::SetIndex(int32_t aIndex)
|
||||
{
|
||||
if (aIndex < 0 || aIndex >= Length()) {
|
||||
return NS_ERROR_FAILURE;
|
||||
}
|
||||
|
||||
mIndex = aIndex;
|
||||
NOTIFY_LISTENERS(OnIndexChanged, (mIndex))
|
||||
|
||||
return NS_OK;
|
||||
}
|
||||
|
||||
/* Get the requestedIndex */
|
||||
NS_IMETHODIMP
|
||||
nsSHistory::GetRequestedIndex(int32_t* aResult)
|
||||
@ -698,33 +691,8 @@ nsSHistory::GetRequestedIndex(int32_t* aResult)
|
||||
return NS_OK;
|
||||
}
|
||||
|
||||
/* Get the entry at a given index */
|
||||
NS_IMETHODIMP
|
||||
nsSHistory::GetEntryAtIndex(int32_t aIndex, bool aModifyIndex,
|
||||
nsISHEntry** aResult)
|
||||
{
|
||||
nsresult rv;
|
||||
nsCOMPtr<nsISHTransaction> txn;
|
||||
|
||||
/* GetTransactionAtIndex ensures aResult is valid and validates aIndex */
|
||||
rv = GetTransactionAtIndex(aIndex, getter_AddRefs(txn));
|
||||
if (NS_SUCCEEDED(rv) && txn) {
|
||||
// Get the Entry from the transaction
|
||||
rv = txn->GetSHEntry(aResult);
|
||||
if (NS_SUCCEEDED(rv) && (*aResult)) {
|
||||
// Set mIndex to the requested index, if asked to do so..
|
||||
if (aModifyIndex) {
|
||||
mIndex = aIndex;
|
||||
NOTIFY_LISTENERS(OnIndexChanged, (mIndex))
|
||||
}
|
||||
}
|
||||
}
|
||||
return rv;
|
||||
}
|
||||
|
||||
/* Get the transaction at a given index */
|
||||
NS_IMETHODIMP
|
||||
nsSHistory::GetTransactionAtIndex(int32_t aIndex, nsISHTransaction** aResult)
|
||||
nsSHistory::GetEntryAtIndex(int32_t aIndex, nsISHEntry** aResult)
|
||||
{
|
||||
NS_ENSURE_ARG_POINTER(aResult);
|
||||
|
||||
@ -732,7 +700,7 @@ nsSHistory::GetTransactionAtIndex(int32_t aIndex, nsISHTransaction** aResult)
|
||||
return NS_ERROR_FAILURE;
|
||||
}
|
||||
|
||||
*aResult = mTransactions[aIndex];
|
||||
*aResult = mEntries[aIndex];
|
||||
NS_ADDREF(*aResult);
|
||||
return NS_OK;
|
||||
}
|
||||
@ -746,13 +714,7 @@ nsSHistory::GetIndexOfEntry(nsISHEntry* aSHEntry, int32_t* aResult)
|
||||
*aResult = -1;
|
||||
|
||||
for (int32_t i = 0; i < Length(); i++) {
|
||||
nsCOMPtr<nsISHEntry> entry;
|
||||
nsresult rv = mTransactions[i]->GetSHEntry(getter_AddRefs(entry));
|
||||
if (NS_FAILED(rv) || !entry) {
|
||||
return NS_ERROR_FAILURE;
|
||||
}
|
||||
|
||||
if (aSHEntry == entry) {
|
||||
if (aSHEntry == mEntries[i]) {
|
||||
*aResult = i;
|
||||
return NS_OK;
|
||||
}
|
||||
@ -766,13 +728,7 @@ nsresult
|
||||
nsSHistory::PrintHistory()
|
||||
{
|
||||
for (int32_t i = 0; i < Length(); i++) {
|
||||
nsCOMPtr<nsISHTransaction> txn = mTransactions[i];
|
||||
nsCOMPtr<nsISHEntry> entry;
|
||||
nsresult rv = txn->GetSHEntry(getter_AddRefs(entry));
|
||||
if (NS_FAILED(rv) && !entry) {
|
||||
return NS_ERROR_FAILURE;
|
||||
}
|
||||
|
||||
nsCOMPtr<nsISHEntry> entry = mEntries[i];
|
||||
nsCOMPtr<nsILayoutHistoryState> layoutHistoryState;
|
||||
nsCOMPtr<nsIURI> uri;
|
||||
nsString title;
|
||||
@ -787,7 +743,7 @@ nsSHistory::PrintHistory()
|
||||
uri->GetSpec(url);
|
||||
}
|
||||
|
||||
printf("**** SH Transaction #%d, Entry = %x\n", i, entry.get());
|
||||
printf("**** SH Entry #%d: %x\n", i, entry.get());
|
||||
printf("\t\t URL = %s\n", url.get());
|
||||
|
||||
printf("\t\t Title = %s\n", NS_LossyConvertUTF16toASCII(title).get());
|
||||
@ -850,7 +806,7 @@ nsSHistory::PurgeHistory(int32_t aNumEntries)
|
||||
}
|
||||
|
||||
// Remove the first `aNumEntries` entries.
|
||||
mTransactions.RemoveElementsAt(0, aNumEntries);
|
||||
mEntries.RemoveElementsAt(0, aNumEntries);
|
||||
|
||||
// Adjust the indices, but don't let them go below -1.
|
||||
mIndex -= aNumEntries;
|
||||
@ -902,30 +858,28 @@ NS_IMETHODIMP
|
||||
nsSHistory::ReplaceEntry(int32_t aIndex, nsISHEntry* aReplaceEntry)
|
||||
{
|
||||
NS_ENSURE_ARG(aReplaceEntry);
|
||||
nsresult rv;
|
||||
nsCOMPtr<nsISHTransaction> currentTxn;
|
||||
|
||||
rv = GetTransactionAtIndex(aIndex, getter_AddRefs(currentTxn));
|
||||
|
||||
if (currentTxn) {
|
||||
nsCOMPtr<nsISHistory> shistoryOfEntry;
|
||||
aReplaceEntry->GetSHistory(getter_AddRefs(shistoryOfEntry));
|
||||
if (shistoryOfEntry && shistoryOfEntry != this) {
|
||||
NS_WARNING("The entry has been associated to another nsISHistory instance. "
|
||||
"Try nsISHEntry.clone() and nsISHEntry.abandonBFCacheEntry() "
|
||||
"first if you're copying an entry from another nsISHistory.");
|
||||
return NS_ERROR_FAILURE;
|
||||
}
|
||||
|
||||
aReplaceEntry->SetSHistory(this);
|
||||
|
||||
NOTIFY_LISTENERS(OnHistoryReplaceEntry, (aIndex));
|
||||
|
||||
// Set the replacement entry in the transaction
|
||||
rv = currentTxn->SetSHEntry(aReplaceEntry);
|
||||
rv = currentTxn->SetPersist(true);
|
||||
if (aIndex < 0 || aIndex >= Length()) {
|
||||
return NS_ERROR_FAILURE;
|
||||
}
|
||||
return rv;
|
||||
|
||||
nsCOMPtr<nsISHistory> shistoryOfEntry;
|
||||
aReplaceEntry->GetSHistory(getter_AddRefs(shistoryOfEntry));
|
||||
if (shistoryOfEntry && shistoryOfEntry != this) {
|
||||
NS_WARNING("The entry has been associated to another nsISHistory instance. "
|
||||
"Try nsISHEntry.clone() and nsISHEntry.abandonBFCacheEntry() "
|
||||
"first if you're copying an entry from another nsISHistory.");
|
||||
return NS_ERROR_FAILURE;
|
||||
}
|
||||
|
||||
aReplaceEntry->SetSHistory(this);
|
||||
|
||||
NOTIFY_LISTENERS(OnHistoryReplaceEntry, (aIndex));
|
||||
|
||||
aReplaceEntry->SetPersist(true);
|
||||
mEntries[aIndex] = aReplaceEntry;
|
||||
|
||||
return NS_OK;
|
||||
}
|
||||
|
||||
NS_IMETHODIMP
|
||||
@ -953,7 +907,7 @@ nsSHistory::EvictAllContentViewers()
|
||||
// XXXbz we don't actually do a good job of evicting things as we should, so
|
||||
// we might have viewers quite far from mIndex. So just evict everything.
|
||||
for (int32_t i = 0; i < Length(); i++) {
|
||||
EvictContentViewerForTransaction(mTransactions[i]);
|
||||
EvictContentViewerForEntry(mEntries[i]);
|
||||
}
|
||||
|
||||
return NS_OK;
|
||||
@ -1021,11 +975,11 @@ nsSHistory::EvictOutOfRangeWindowContentViewers(int32_t aIndex)
|
||||
//
|
||||
// to ensure that this SHistory object isn't responsible for more than
|
||||
// VIEWER_WINDOW content viewers. But our job is complicated by the
|
||||
// fact that two transactions which are related by either hash navigations or
|
||||
// fact that two entries which are related by either hash navigations or
|
||||
// history.pushState will have the same content viewer.
|
||||
//
|
||||
// To illustrate the issue, suppose VIEWER_WINDOW = 3 and we have four
|
||||
// linked transactions in our history. Suppose we then add a new content
|
||||
// linked entries in our history. Suppose we then add a new content
|
||||
// viewer and call into this function. So the history looks like:
|
||||
//
|
||||
// A A A A B
|
||||
@ -1066,8 +1020,7 @@ nsSHistory::EvictOutOfRangeWindowContentViewers(int32_t aIndex)
|
||||
// if it appears outside this range.
|
||||
nsCOMArray<nsIContentViewer> safeViewers;
|
||||
for (int32_t i = startSafeIndex; i <= endSafeIndex; i++) {
|
||||
nsCOMPtr<nsIContentViewer> viewer =
|
||||
GetContentViewerForTransaction(mTransactions[i]);
|
||||
nsCOMPtr<nsIContentViewer> viewer = GetContentViewerForEntry(mEntries[i]);
|
||||
safeViewers.AppendObject(viewer);
|
||||
}
|
||||
|
||||
@ -1075,35 +1028,32 @@ nsSHistory::EvictOutOfRangeWindowContentViewers(int32_t aIndex)
|
||||
// (It's important that the condition checks Length(), rather than a cached
|
||||
// copy of Length(), because the length might change between iterations.)
|
||||
for (int32_t i = 0; i < Length(); i++) {
|
||||
nsCOMPtr<nsISHTransaction> trans = mTransactions[i];
|
||||
nsCOMPtr<nsIContentViewer> viewer = GetContentViewerForTransaction(trans);
|
||||
nsCOMPtr<nsISHEntry> entry = mEntries[i];
|
||||
nsCOMPtr<nsIContentViewer> viewer = GetContentViewerForEntry(entry);
|
||||
if (safeViewers.IndexOf(viewer) == -1) {
|
||||
EvictContentViewerForTransaction(trans);
|
||||
EvictContentViewerForEntry(entry);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
namespace {
|
||||
|
||||
class TransactionAndDistance
|
||||
class EntryAndDistance
|
||||
{
|
||||
public:
|
||||
TransactionAndDistance(nsSHistory* aSHistory, nsISHTransaction* aTrans, uint32_t aDist)
|
||||
EntryAndDistance(nsSHistory* aSHistory, nsISHEntry* aEntry, uint32_t aDist)
|
||||
: mSHistory(aSHistory)
|
||||
, mTransaction(aTrans)
|
||||
, mEntry(aEntry)
|
||||
, mLastTouched(0)
|
||||
, mDistance(aDist)
|
||||
{
|
||||
mViewer = GetContentViewerForTransaction(aTrans);
|
||||
NS_ASSERTION(mViewer, "Transaction should have a content viewer");
|
||||
mViewer = GetContentViewerForEntry(aEntry);
|
||||
NS_ASSERTION(mViewer, "Entry should have a content viewer");
|
||||
|
||||
nsCOMPtr<nsISHEntry> shentry;
|
||||
mTransaction->GetSHEntry(getter_AddRefs(shentry));
|
||||
|
||||
shentry->GetLastTouched(&mLastTouched);
|
||||
mEntry->GetLastTouched(&mLastTouched);
|
||||
}
|
||||
|
||||
bool operator<(const TransactionAndDistance& aOther) const
|
||||
bool operator<(const EntryAndDistance& aOther) const
|
||||
{
|
||||
// Compare distances first, and fall back to last-accessed times.
|
||||
if (aOther.mDistance != this->mDistance) {
|
||||
@ -1113,17 +1063,17 @@ public:
|
||||
return this->mLastTouched < aOther.mLastTouched;
|
||||
}
|
||||
|
||||
bool operator==(const TransactionAndDistance& aOther) const
|
||||
bool operator==(const EntryAndDistance& aOther) const
|
||||
{
|
||||
// This is a little silly; we need == so the default comaprator can be
|
||||
// instantiated, but this function is never actually called when we sort
|
||||
// the list of TransactionAndDistance objects.
|
||||
// the list of EntryAndDistance objects.
|
||||
return aOther.mDistance == this->mDistance &&
|
||||
aOther.mLastTouched == this->mLastTouched;
|
||||
}
|
||||
|
||||
RefPtr<nsSHistory> mSHistory;
|
||||
nsCOMPtr<nsISHTransaction> mTransaction;
|
||||
nsCOMPtr<nsISHEntry> mEntry;
|
||||
nsCOMPtr<nsIContentViewer> mViewer;
|
||||
uint32_t mLastTouched;
|
||||
int32_t mDistance;
|
||||
@ -1135,18 +1085,18 @@ public:
|
||||
void
|
||||
nsSHistory::GloballyEvictContentViewers()
|
||||
{
|
||||
// First, collect from each SHistory object the transactions which have a
|
||||
// cached content viewer. Associate with each transaction its distance from
|
||||
// its SHistory's current index.
|
||||
// First, collect from each SHistory object the entries which have a cached
|
||||
// content viewer. Associate with each entry its distance from its SHistory's
|
||||
// current index.
|
||||
|
||||
nsTArray<TransactionAndDistance> transactions;
|
||||
nsTArray<EntryAndDistance> entries;
|
||||
|
||||
for (auto shist : gSHistoryList) {
|
||||
|
||||
// Maintain a list of the transactions which have viewers and belong to
|
||||
// Maintain a list of the entries which have viewers and belong to
|
||||
// this particular shist object. We'll add this list to the global list,
|
||||
// |transactions|, eventually.
|
||||
nsTArray<TransactionAndDistance> shTransactions;
|
||||
// |entries|, eventually.
|
||||
nsTArray<EntryAndDistance> shEntries;
|
||||
|
||||
// Content viewers are likely to exist only within shist->mIndex -/+
|
||||
// VIEWER_WINDOW, so only search within that range.
|
||||
@ -1164,18 +1114,18 @@ nsSHistory::GloballyEvictContentViewers()
|
||||
int32_t startIndex, endIndex;
|
||||
shist->WindowIndices(shist->mIndex, &startIndex, &endIndex);
|
||||
for (int32_t i = startIndex; i <= endIndex; i++) {
|
||||
nsCOMPtr<nsISHTransaction> trans = shist->mTransactions[i];
|
||||
nsCOMPtr<nsISHEntry> entry = shist->mEntries[i];
|
||||
nsCOMPtr<nsIContentViewer> contentViewer =
|
||||
GetContentViewerForTransaction(trans);
|
||||
GetContentViewerForEntry(entry);
|
||||
|
||||
if (contentViewer) {
|
||||
// Because one content viewer might belong to multiple SHEntries, we
|
||||
// have to search through shTransactions to see if we already know
|
||||
// have to search through shEntries to see if we already know
|
||||
// about this content viewer. If we find the viewer, update its
|
||||
// distance from the SHistory's index and continue.
|
||||
bool found = false;
|
||||
for (uint32_t j = 0; j < shTransactions.Length(); j++) {
|
||||
TransactionAndDistance& container = shTransactions[j];
|
||||
for (uint32_t j = 0; j < shEntries.Length(); j++) {
|
||||
EntryAndDistance& container = shEntries[j];
|
||||
if (container.mViewer == contentViewer) {
|
||||
container.mDistance = std::min(container.mDistance,
|
||||
DeprecatedAbs(i - shist->mIndex));
|
||||
@ -1184,44 +1134,43 @@ nsSHistory::GloballyEvictContentViewers()
|
||||
}
|
||||
}
|
||||
|
||||
// If we didn't find a TransactionAndDistance for this content viewer,
|
||||
// make a new one.
|
||||
// If we didn't find a EntryAndDistance for this content viewer, make a
|
||||
// new one.
|
||||
if (!found) {
|
||||
TransactionAndDistance container(shist, trans,
|
||||
DeprecatedAbs(i - shist->mIndex));
|
||||
shTransactions.AppendElement(container);
|
||||
EntryAndDistance container(shist, entry,
|
||||
DeprecatedAbs(i - shist->mIndex));
|
||||
shEntries.AppendElement(container);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// We've found all the transactions belonging to shist which have viewers.
|
||||
// Add those transactions to our global list and move on.
|
||||
transactions.AppendElements(shTransactions);
|
||||
// We've found all the entries belonging to shist which have viewers.
|
||||
// Add those entries to our global list and move on.
|
||||
entries.AppendElements(shEntries);
|
||||
}
|
||||
|
||||
// We now have collected all cached content viewers. First check that we
|
||||
// have enough that we actually need to evict some.
|
||||
if ((int32_t)transactions.Length() <= sHistoryMaxTotalViewers) {
|
||||
if ((int32_t)entries.Length() <= sHistoryMaxTotalViewers) {
|
||||
return;
|
||||
}
|
||||
|
||||
// If we need to evict, sort our list of transactions and evict the largest
|
||||
// If we need to evict, sort our list of entries and evict the largest
|
||||
// ones. (We could of course get better algorithmic complexity here by using
|
||||
// a heap or something more clever. But sHistoryMaxTotalViewers isn't large,
|
||||
// so let's not worry about it.)
|
||||
transactions.Sort();
|
||||
entries.Sort();
|
||||
|
||||
for (int32_t i = transactions.Length() - 1; i >= sHistoryMaxTotalViewers;
|
||||
for (int32_t i = entries.Length() - 1; i >= sHistoryMaxTotalViewers;
|
||||
--i) {
|
||||
(transactions[i].mSHistory)->
|
||||
EvictContentViewerForTransaction(transactions[i].mTransaction);
|
||||
(entries[i].mSHistory)->EvictContentViewerForEntry(entries[i].mEntry);
|
||||
}
|
||||
}
|
||||
|
||||
nsresult
|
||||
nsSHistory::FindTransactionForBFCache(nsIBFCacheEntry* aEntry,
|
||||
nsISHTransaction** aResult,
|
||||
int32_t* aResultIndex)
|
||||
nsSHistory::FindEntryForBFCache(nsIBFCacheEntry* aBFEntry,
|
||||
nsISHEntry** aResult,
|
||||
int32_t* aResultIndex)
|
||||
{
|
||||
*aResult = nullptr;
|
||||
*aResultIndex = -1;
|
||||
@ -1230,13 +1179,11 @@ nsSHistory::FindTransactionForBFCache(nsIBFCacheEntry* aEntry,
|
||||
WindowIndices(mIndex, &startIndex, &endIndex);
|
||||
|
||||
for (int32_t i = startIndex; i <= endIndex; ++i) {
|
||||
nsCOMPtr<nsISHTransaction> trans = mTransactions[i];
|
||||
nsCOMPtr<nsISHEntry> entry;
|
||||
trans->GetSHEntry(getter_AddRefs(entry));
|
||||
nsCOMPtr<nsISHEntry> shEntry = mEntries[i];
|
||||
|
||||
// Does entry have the same BFCacheEntry as the argument to this method?
|
||||
if (entry->HasBFCacheEntry(aEntry)) {
|
||||
trans.forget(aResult);
|
||||
// Does shEntry have the same BFCacheEntry as the argument to this method?
|
||||
if (shEntry->HasBFCacheEntry(aBFEntry)) {
|
||||
shEntry.forget(aResult);
|
||||
*aResultIndex = i;
|
||||
return NS_OK;
|
||||
}
|
||||
@ -1245,28 +1192,28 @@ nsSHistory::FindTransactionForBFCache(nsIBFCacheEntry* aEntry,
|
||||
}
|
||||
|
||||
nsresult
|
||||
nsSHistory::EvictExpiredContentViewerForEntry(nsIBFCacheEntry* aEntry)
|
||||
nsSHistory::EvictExpiredContentViewerForEntry(nsIBFCacheEntry* aBFEntry)
|
||||
{
|
||||
int32_t index;
|
||||
nsCOMPtr<nsISHTransaction> trans;
|
||||
FindTransactionForBFCache(aEntry, getter_AddRefs(trans), &index);
|
||||
nsCOMPtr<nsISHEntry> shEntry;
|
||||
FindEntryForBFCache(aBFEntry, getter_AddRefs(shEntry), &index);
|
||||
|
||||
if (index == mIndex) {
|
||||
NS_WARNING("How did the current SHEntry expire?");
|
||||
return NS_OK;
|
||||
}
|
||||
|
||||
if (trans) {
|
||||
EvictContentViewerForTransaction(trans);
|
||||
if (shEntry) {
|
||||
EvictContentViewerForEntry(shEntry);
|
||||
}
|
||||
|
||||
return NS_OK;
|
||||
}
|
||||
|
||||
NS_IMETHODIMP
|
||||
nsSHistory::AddToExpirationTracker(nsIBFCacheEntry* aEntry)
|
||||
nsSHistory::AddToExpirationTracker(nsIBFCacheEntry* aBFEntry)
|
||||
{
|
||||
RefPtr<nsSHEntryShared> entry = static_cast<nsSHEntryShared*>(aEntry);
|
||||
RefPtr<nsSHEntryShared> entry = static_cast<nsSHEntryShared*>(aBFEntry);
|
||||
if (!mHistoryTracker || !entry) {
|
||||
return NS_ERROR_FAILURE;
|
||||
}
|
||||
@ -1276,9 +1223,9 @@ nsSHistory::AddToExpirationTracker(nsIBFCacheEntry* aEntry)
|
||||
}
|
||||
|
||||
NS_IMETHODIMP
|
||||
nsSHistory::RemoveFromExpirationTracker(nsIBFCacheEntry* aEntry)
|
||||
nsSHistory::RemoveFromExpirationTracker(nsIBFCacheEntry* aBFEntry)
|
||||
{
|
||||
RefPtr<nsSHEntryShared> entry = static_cast<nsSHEntryShared*>(aEntry);
|
||||
RefPtr<nsSHEntryShared> entry = static_cast<nsSHEntryShared*>(aBFEntry);
|
||||
MOZ_ASSERT(mHistoryTracker && !mHistoryTracker->IsEmpty());
|
||||
if (!mHistoryTracker || !entry) {
|
||||
return NS_ERROR_FAILURE;
|
||||
@ -1357,7 +1304,7 @@ RemoveChildEntries(nsISHistory* aHistory, int32_t aIndex,
|
||||
nsTArray<nsID>& aEntryIDs)
|
||||
{
|
||||
nsCOMPtr<nsISHEntry> root;
|
||||
aHistory->GetEntryAtIndex(aIndex, false, getter_AddRefs(root));
|
||||
aHistory->GetEntryAtIndex(aIndex, getter_AddRefs(root));
|
||||
return root ? RemoveFromSessionHistoryEntry(root, aEntryIDs) : false;
|
||||
}
|
||||
|
||||
@ -1406,25 +1353,25 @@ nsSHistory::RemoveDuplicate(int32_t aIndex, bool aKeepNext)
|
||||
|
||||
nsresult rv;
|
||||
nsCOMPtr<nsISHEntry> root1, root2;
|
||||
rv = GetEntryAtIndex(aIndex, false, getter_AddRefs(root1));
|
||||
rv = GetEntryAtIndex(aIndex, getter_AddRefs(root1));
|
||||
NS_ENSURE_SUCCESS(rv, false);
|
||||
rv = GetEntryAtIndex(compareIndex, false, getter_AddRefs(root2));
|
||||
rv = GetEntryAtIndex(compareIndex, getter_AddRefs(root2));
|
||||
NS_ENSURE_SUCCESS(rv, false);
|
||||
|
||||
if (IsSameTree(root1, root2)) {
|
||||
mTransactions.RemoveElementAt(aIndex);
|
||||
mEntries.RemoveElementAt(aIndex);
|
||||
|
||||
if (mRootDocShell) {
|
||||
static_cast<nsDocShell*>(mRootDocShell)->HistoryTransactionRemoved(aIndex);
|
||||
static_cast<nsDocShell*>(mRootDocShell)->HistoryEntryRemoved(aIndex);
|
||||
}
|
||||
|
||||
// Adjust our indices to reflect the removed transaction
|
||||
// Adjust our indices to reflect the removed entry.
|
||||
if (mIndex > aIndex) {
|
||||
mIndex = mIndex - 1;
|
||||
NOTIFY_LISTENERS(OnIndexChanged, (mIndex));
|
||||
}
|
||||
|
||||
// NB: If the transaction we are removing is the transaction currently
|
||||
// NB: If the entry we are removing is the entry currently
|
||||
// being navigated to (mRequestedIndex) then we adjust the index
|
||||
// only if we're not keeping the next entry (because if we are keeping
|
||||
// the next entry (because the current is a duplicate of the next), then
|
||||
@ -1475,7 +1422,7 @@ nsSHistory::RemoveDynEntries(int32_t aIndex, nsISHEntry* aEntry)
|
||||
// Remove dynamic entries which are at the index and belongs to the container.
|
||||
nsCOMPtr<nsISHEntry> entry(aEntry);
|
||||
if (!entry) {
|
||||
GetEntryAtIndex(aIndex, false, getter_AddRefs(entry));
|
||||
GetEntryAtIndex(aIndex, getter_AddRefs(entry));
|
||||
}
|
||||
|
||||
if (entry) {
|
||||
@ -1488,15 +1435,13 @@ nsSHistory::RemoveDynEntries(int32_t aIndex, nsISHEntry* aEntry)
|
||||
}
|
||||
|
||||
void
|
||||
nsSHistory::RemoveDynEntriesForBFCacheEntry(nsIBFCacheEntry* aEntry)
|
||||
nsSHistory::RemoveDynEntriesForBFCacheEntry(nsIBFCacheEntry* aBFEntry)
|
||||
{
|
||||
int32_t index;
|
||||
nsCOMPtr<nsISHTransaction> trans;
|
||||
FindTransactionForBFCache(aEntry, getter_AddRefs(trans), &index);
|
||||
if (trans) {
|
||||
nsCOMPtr<nsISHEntry> entry;
|
||||
trans->GetSHEntry(getter_AddRefs(entry));
|
||||
RemoveDynEntries(index, entry);
|
||||
nsCOMPtr<nsISHEntry> shEntry;
|
||||
FindEntryForBFCache(aBFEntry, getter_AddRefs(shEntry), &index);
|
||||
if (shEntry) {
|
||||
RemoveDynEntries(index, shEntry);
|
||||
}
|
||||
}
|
||||
|
||||
@ -1520,7 +1465,7 @@ nsSHistory::GetCurrentURI(nsIURI** aResultURI)
|
||||
nsresult rv;
|
||||
|
||||
nsCOMPtr<nsISHEntry> currentEntry;
|
||||
rv = GetEntryAtIndex(mIndex, false, getter_AddRefs(currentEntry));
|
||||
rv = GetEntryAtIndex(mIndex, getter_AddRefs(currentEntry));
|
||||
if (NS_FAILED(rv) && !currentEntry) {
|
||||
return rv;
|
||||
}
|
||||
@ -1567,8 +1512,8 @@ nsSHistory::LoadEntry(int32_t aIndex, long aLoadType, uint32_t aHistCmd)
|
||||
// Keep note of requested history index in mRequestedIndex.
|
||||
mRequestedIndex = aIndex;
|
||||
|
||||
GetEntryAtIndex(mIndex, false, getter_AddRefs(prevEntry));
|
||||
GetEntryAtIndex(mRequestedIndex, false, getter_AddRefs(nextEntry));
|
||||
GetEntryAtIndex(mIndex, getter_AddRefs(prevEntry));
|
||||
GetEntryAtIndex(mRequestedIndex, getter_AddRefs(nextEntry));
|
||||
if (!nextEntry || !prevEntry) {
|
||||
mRequestedIndex = -1;
|
||||
return NS_ERROR_FAILURE;
|
||||
|
@ -22,7 +22,6 @@ class nsIDocShell;
|
||||
class nsDocShell;
|
||||
class nsSHistoryObserver;
|
||||
class nsISHEntry;
|
||||
class nsISHTransaction;
|
||||
|
||||
class nsSHistory final : public mozilla::LinkedListElement<nsSHistory>,
|
||||
public nsISHistory,
|
||||
@ -144,16 +143,16 @@ private:
|
||||
nsresult PrintHistory();
|
||||
#endif
|
||||
|
||||
// Find the transaction for a given bfcache entry. It only looks up between
|
||||
// Find the history entry for a given bfcache entry. It only looks up between
|
||||
// the range where alive viewers may exist (i.e nsISHistory::VIEWER_WINDOW).
|
||||
nsresult FindTransactionForBFCache(nsIBFCacheEntry* aEntry,
|
||||
nsISHTransaction** aResult,
|
||||
int32_t* aResultIndex);
|
||||
nsresult FindEntryForBFCache(nsIBFCacheEntry* aBFEntry,
|
||||
nsISHEntry** aResult,
|
||||
int32_t* aResultIndex);
|
||||
|
||||
// Evict content viewers in this window which don't lie in the "safe" range
|
||||
// around aIndex.
|
||||
void EvictOutOfRangeWindowContentViewers(int32_t aIndex);
|
||||
void EvictContentViewerForTransaction(nsISHTransaction* aTrans);
|
||||
void EvictContentViewerForEntry(nsISHEntry* aEntry);
|
||||
static void GloballyEvictContentViewers();
|
||||
static void GloballyEvictAllContentViewers();
|
||||
|
||||
@ -164,7 +163,7 @@ private:
|
||||
nsresult LoadNextPossibleEntry(int32_t aNewIndex, long aLoadType,
|
||||
uint32_t aHistCmd);
|
||||
|
||||
// aIndex is the index of the transaction which may be removed.
|
||||
// aIndex is the index of the entry which may be removed.
|
||||
// If aKeepNext is true, aIndex is compared to aIndex + 1,
|
||||
// otherwise comparison is done to aIndex - 1.
|
||||
bool RemoveDuplicate(int32_t aIndex, bool aKeepNext);
|
||||
@ -172,15 +171,15 @@ private:
|
||||
// Track all bfcache entries and evict on expiration.
|
||||
mozilla::UniquePtr<HistoryTracker> mHistoryTracker;
|
||||
|
||||
nsTArray<nsCOMPtr<nsISHTransaction>> mTransactions;
|
||||
nsTArray<nsCOMPtr<nsISHEntry>> mEntries; // entries are never null
|
||||
int32_t mIndex; // -1 means "no index"
|
||||
int32_t mRequestedIndex; // -1 means "no requested index"
|
||||
|
||||
void WindowIndices(int32_t aIndex, int32_t* aOutStartIndex,
|
||||
int32_t* aOutEndIndex);
|
||||
|
||||
// Length of mTransactions.
|
||||
int32_t Length() { return int32_t(mTransactions.Length()); }
|
||||
// Length of mEntries.
|
||||
int32_t Length() { return int32_t(mEntries.Length()); }
|
||||
|
||||
// Session History listeners
|
||||
nsAutoTObserverArray<nsWeakPtr, 2> mListeners;
|
||||
|
@ -23,7 +23,7 @@ add_task(async function test() {
|
||||
.QueryInterface(Ci.nsIWebNavigation)
|
||||
.sessionHistory;
|
||||
|
||||
is(shistory.legacySHistory.getEntryAtIndex(shistory.index, false).title,
|
||||
is(shistory.legacySHistory.getEntryAtIndex(shistory.index).title,
|
||||
oldTitle, 'SHEntry title after pushstate.');
|
||||
});
|
||||
});
|
||||
|
@ -59,7 +59,7 @@
|
||||
var history = gBrowser.webNavigation.sessionHistory;
|
||||
if (history.count == gExpected.length) {
|
||||
for (var i=0; i<history.count; i++) {
|
||||
var shEntry = history.legacySHistory.getEntryAtIndex(i,false).
|
||||
var shEntry = history.legacySHistory.getEntryAtIndex(i).
|
||||
QueryInterface(Ci.nsISHEntry);
|
||||
is(!!shEntry.contentViewer, gExpected[i], "content viewer "+i+", test "+gTestCount);
|
||||
}
|
||||
@ -71,9 +71,9 @@
|
||||
if (j == i)
|
||||
continue;
|
||||
|
||||
let shentry1 = history.legacySHistory.getEntryAtIndex(i, false)
|
||||
let shentry1 = history.legacySHistory.getEntryAtIndex(i)
|
||||
.QueryInterface(Ci.nsISHEntry);
|
||||
let shentry2 = history.legacySHistory.getEntryAtIndex(j, false)
|
||||
let shentry2 = history.legacySHistory.getEntryAtIndex(j)
|
||||
.QueryInterface(Ci.nsISHEntry);
|
||||
ok(!shentry1.sharesDocumentWith(shentry2),
|
||||
'Test ' + gTestCount + ': shentry[' + i + "] shouldn't " +
|
||||
|
@ -49,8 +49,8 @@
|
||||
let shistory = docShell.QueryInterface(SpecialPowers.Ci.nsIWebNavigation)
|
||||
.sessionHistory;
|
||||
// Now staticFrame has frame0 -> frame1 -> frame2.
|
||||
opener.is(docShell.previousTransIndex, 3, 'docShell.previousTransIndex');
|
||||
opener.is(docShell.loadedTransIndex, 2, 'docShell.loadedTransIndex');
|
||||
opener.is(docShell.previousEntryIndex, 3, 'docShell.previousEntryIndex');
|
||||
opener.is(docShell.loadedEntryIndex, 2, 'docShell.loadedEntryIndex');
|
||||
opener.is(shistory.index, 2, 'shistory.index');
|
||||
opener.is(history.length, 4, 'history.length');
|
||||
opener.is(document.getElementById('staticFrame').contentWindow.location.href, BASE_URL + 'frame2.html', 'staticFrame location');
|
||||
@ -109,8 +109,8 @@
|
||||
.sessionHistory;
|
||||
// staticFrame: frame0 -> frame1 -> frame2 -> iframe_static
|
||||
// innerStaticFrame: frame0 -> frame1
|
||||
opener.is(docShell.previousTransIndex, 5, 'docShell.previousTransIndex');
|
||||
opener.is(docShell.loadedTransIndex, 4, 'docShell.loadedTransIndex');
|
||||
opener.is(docShell.previousEntryIndex, 5, 'docShell.previousEntryIndex');
|
||||
opener.is(docShell.loadedEntryIndex, 4, 'docShell.loadedEntryIndex');
|
||||
opener.is(shistory.index, 4, 'shistory.index');
|
||||
opener.is(history.length, 6, 'history.length');
|
||||
let staticFrame = document.getElementById('staticFrame');
|
||||
|
@ -12,7 +12,7 @@
|
||||
isOK = true;
|
||||
}
|
||||
document.body.textContent = isOK ? "PASSED" : "FAILED";
|
||||
opener.ok(isOK, "Duplicate session history transactions should have been removed!");
|
||||
opener.ok(isOK, "Duplicate session history entries should have been removed!");
|
||||
opener.nextTest();
|
||||
window.close();
|
||||
}
|
||||
|
@ -49,7 +49,7 @@ https://bugzilla.mozilla.org/show_bug.cgi?id=1375833
|
||||
ok(newFrameDocShellId, "sanity check for docshell ID");
|
||||
is(newFrameDocShellId, frameDocShellId, "check docshell ID remains after reload");
|
||||
|
||||
let entry = shistory.legacySHistory.getEntryAtIndex(shistory.index, false);
|
||||
let entry = shistory.legacySHistory.getEntryAtIndex(shistory.index);
|
||||
let frameEntry = entry.GetChildAt(0);
|
||||
is(String(frameEntry.docshellID), frameDocShellId, "check newly added shentry uses the same docshell ID");
|
||||
|
||||
|
@ -26,7 +26,7 @@ var testFiles =
|
||||
"file_bug508537_1.html", // Dynamic frames and forward-back
|
||||
"file_document_write_1.html", // Session history + document.write
|
||||
//"file_static_and_dynamic_1.html",// Static and dynamic frames and forward-back
|
||||
"file_bug534178.html", // Session history transaction clean-up.
|
||||
"file_bug534178.html", // Session history entry clean-up.
|
||||
"file_fragment_handling_during_load.html",
|
||||
"file_nested_frames.html",
|
||||
"file_shiftReload_and_pushState.html",
|
||||
|
@ -76,7 +76,7 @@ function* runTest() {
|
||||
.sessionHistory;
|
||||
|
||||
// Get the title of the inner popup's current SHEntry
|
||||
var sheTitle = sh.legacySHistory.getEntryAtIndex(sh.index, false).title;
|
||||
var sheTitle = sh.legacySHistory.getEntryAtIndex(sh.index).title;
|
||||
is(sheTitle, "Changed", "SHEntry's title should change when we change.");
|
||||
|
||||
popup.close();
|
||||
|
@ -109,7 +109,7 @@ function dumpSHistory(theWindow)
|
||||
dump(" requestedIndex: " + sh.legacySHistory.requestedIndex + "\n");
|
||||
|
||||
for (let i = 0; i < sh.count; i++) {
|
||||
let shentry = sh.legacySHistory.getEntryAtIndex(i, false);
|
||||
let shentry = sh.legacySHistory.getEntryAtIndex(i);
|
||||
dump(" " + i + ": " + shentry.URI.spec + '\n');
|
||||
for (let j = 0; j < shentry.childCount; j++) {
|
||||
let child = shentry.GetChildAt(j);
|
||||
|
@ -61,6 +61,7 @@ public:
|
||||
: DOMEventTargetHelper(aGlobal)
|
||||
, mPlaybackRate(1.0)
|
||||
, mAnimationIndex(sNextAnimationIndex++)
|
||||
, mCachedChildIndex(-1)
|
||||
, mPendingState(PendingState::NotPending)
|
||||
, mFinishedAtLastComposeStyle(false)
|
||||
, mIsRelevant(false)
|
||||
@ -405,6 +406,8 @@ public:
|
||||
*/
|
||||
virtual void MaybeQueueCancelEvent(const StickyTimeDuration& aActiveTime) {};
|
||||
|
||||
int32_t& CachedChildIndexRef() { return mCachedChildIndex; }
|
||||
|
||||
protected:
|
||||
void SilentlySetCurrentTime(const TimeDuration& aNewCurrentTime);
|
||||
void CancelNoUpdate();
|
||||
@ -579,6 +582,10 @@ protected:
|
||||
// possible for two different objects to have the same index.
|
||||
uint64_t mAnimationIndex;
|
||||
|
||||
// While ordering Animation objects for event dispatch, the index of the
|
||||
// target node in its parent may be cached in mCachedChildIndex.
|
||||
int32_t mCachedChildIndex;
|
||||
|
||||
// Indicates if the animation is in the pending state (and what state it is
|
||||
// waiting to enter when it finished pending). We use this rather than
|
||||
// checking if this animation is tracked by a PendingAnimationTracker because
|
||||
|
@ -247,6 +247,10 @@ private:
|
||||
return;
|
||||
}
|
||||
|
||||
for (auto& pending : mPendingEvents) {
|
||||
pending.mAnimation->CachedChildIndexRef() = -1;
|
||||
}
|
||||
|
||||
// FIXME: Replace with mPendingEvents.StableSort when bug 1147091 is
|
||||
// fixed.
|
||||
std::stable_sort(mPendingEvents.begin(), mPendingEvents.end(),
|
||||
|
@ -259,8 +259,7 @@ MarkDocShell(nsIDocShellTreeItem* aNode, bool aCleanupJS)
|
||||
int32_t historyCount = history->Count();
|
||||
for (int32_t i = 0; i < historyCount; ++i) {
|
||||
nsCOMPtr<nsISHEntry> shEntry;
|
||||
history->LegacySHistory()->GetEntryAtIndex(
|
||||
i, false, getter_AddRefs(shEntry));
|
||||
history->LegacySHistory()->GetEntryAtIndex(i, getter_AddRefs(shEntry));
|
||||
|
||||
MarkSHEntry(shEntry, aCleanupJS);
|
||||
}
|
||||
|
@ -2654,9 +2654,12 @@ nsContentUtils::GetCommonFlattenedTreeAncestorForStyle(Element* aElement1,
|
||||
|
||||
/* static */
|
||||
bool
|
||||
nsContentUtils::PositionIsBefore(nsINode* aNode1, nsINode* aNode2)
|
||||
nsContentUtils::PositionIsBefore(nsINode* aNode1, nsINode* aNode2,
|
||||
int32_t* aNode1Index,
|
||||
int32_t* aNode2Index)
|
||||
{
|
||||
return (aNode2->CompareDocumentPosition(*aNode1) &
|
||||
// Note, CompareDocumentPosition takes the latter params in different order.
|
||||
return (aNode2->CompareDocumentPosition(*aNode1, aNode2Index, aNode1Index) &
|
||||
(Node_Binding::DOCUMENT_POSITION_PRECEDING |
|
||||
Node_Binding::DOCUMENT_POSITION_DISCONNECTED)) ==
|
||||
Node_Binding::DOCUMENT_POSITION_PRECEDING;
|
||||
|
@ -434,8 +434,14 @@ public:
|
||||
/**
|
||||
* Returns true if aNode1 is before aNode2 in the same connected
|
||||
* tree.
|
||||
* aNode1Index and aNode2Index are in/out arguments. If non-null, and value is
|
||||
* not -1, that value is used instead of calling slow ComputeIndexOf on the
|
||||
* parent node. If value is -1, the value will be set to the return value of
|
||||
* ComputeIndexOf.
|
||||
*/
|
||||
static bool PositionIsBefore(nsINode* aNode1, nsINode* aNode2);
|
||||
static bool PositionIsBefore(nsINode* aNode1, nsINode* aNode2,
|
||||
int32_t* aNode1Index = nullptr,
|
||||
int32_t* aNode2Index = nullptr);
|
||||
|
||||
/**
|
||||
* Utility routine to compare two "points", where a point is a
|
||||
|
@ -752,7 +752,9 @@ nsINode::LookupPrefix(const nsAString& aNamespaceURI, nsAString& aPrefix)
|
||||
}
|
||||
|
||||
uint16_t
|
||||
nsINode::CompareDocumentPosition(nsINode& aOtherNode) const
|
||||
nsINode::CompareDocumentPosition(nsINode& aOtherNode,
|
||||
int32_t* aThisIndex,
|
||||
int32_t* aOtherIndex) const
|
||||
{
|
||||
if (this == &aOtherNode) {
|
||||
return 0;
|
||||
@ -852,9 +854,38 @@ nsINode::CompareDocumentPosition(nsINode& aOtherNode) const
|
||||
// child1 or child2 can be an attribute here. This will work fine since
|
||||
// ComputeIndexOf will return -1 for the attribute making the
|
||||
// attribute be considered before any child.
|
||||
return parent->ComputeIndexOf(child1) < parent->ComputeIndexOf(child2) ?
|
||||
int32_t child1Index;
|
||||
bool cachedChild1Index = false;
|
||||
if (&aOtherNode == child1 && aOtherIndex) {
|
||||
cachedChild1Index = true;
|
||||
child1Index = *aOtherIndex != -1 ?
|
||||
*aOtherIndex : parent->ComputeIndexOf(child1);
|
||||
} else {
|
||||
child1Index = parent->ComputeIndexOf(child1);
|
||||
}
|
||||
|
||||
int32_t child2Index;
|
||||
bool cachedChild2Index = false;
|
||||
if (this == child2 && aThisIndex) {
|
||||
cachedChild2Index = true;
|
||||
child2Index = *aThisIndex != -1 ?
|
||||
*aThisIndex : parent->ComputeIndexOf(child2);
|
||||
} else {
|
||||
child2Index = parent->ComputeIndexOf(child2);
|
||||
}
|
||||
|
||||
uint16_t retVal = child1Index < child2Index ?
|
||||
Node_Binding::DOCUMENT_POSITION_PRECEDING :
|
||||
Node_Binding::DOCUMENT_POSITION_FOLLOWING;
|
||||
|
||||
if (cachedChild1Index) {
|
||||
*aOtherIndex = child1Index;
|
||||
}
|
||||
if (cachedChild2Index) {
|
||||
*aThisIndex = child2Index;
|
||||
}
|
||||
|
||||
return retVal;
|
||||
}
|
||||
parent = child1;
|
||||
}
|
||||
@ -1460,12 +1491,68 @@ nsINode::GetPreviousSibling() const
|
||||
return mPreviousOrLastSibling;
|
||||
}
|
||||
|
||||
// CACHE_POINTER_SHIFT indicates how many steps to downshift the |this| pointer.
|
||||
// It should be small enough to not cause collisions between adjecent objects,
|
||||
// and large enough to make sure that all indexes are used.
|
||||
#define CACHE_POINTER_SHIFT 6
|
||||
#define CACHE_NUM_SLOTS 128
|
||||
#define CACHE_CHILD_LIMIT 10
|
||||
|
||||
#define CACHE_GET_INDEX(_parent) \
|
||||
((NS_PTR_TO_INT32(_parent) >> CACHE_POINTER_SHIFT) & \
|
||||
(CACHE_NUM_SLOTS - 1))
|
||||
|
||||
struct IndexCacheSlot
|
||||
{
|
||||
const nsINode* mParent;
|
||||
const nsINode* mChild;
|
||||
int32_t mChildIndex;
|
||||
};
|
||||
|
||||
static IndexCacheSlot sIndexCache[CACHE_NUM_SLOTS];
|
||||
|
||||
static inline void
|
||||
AddChildAndIndexToCache(const nsINode* aParent, const nsINode* aChild,
|
||||
int32_t aChildIndex)
|
||||
{
|
||||
uint32_t index = CACHE_GET_INDEX(aParent);
|
||||
sIndexCache[index].mParent = aParent;
|
||||
sIndexCache[index].mChild = aChild;
|
||||
sIndexCache[index].mChildIndex = aChildIndex;
|
||||
}
|
||||
|
||||
static inline void
|
||||
GetChildAndIndexFromCache(const nsINode* aParent,
|
||||
const nsINode** aChild,
|
||||
int32_t* aChildIndex)
|
||||
{
|
||||
uint32_t index = CACHE_GET_INDEX(aParent);
|
||||
if (sIndexCache[index].mParent == aParent) {
|
||||
*aChild = sIndexCache[index].mChild;
|
||||
*aChildIndex = sIndexCache[index].mChildIndex;
|
||||
} else {
|
||||
*aChild = nullptr;
|
||||
*aChildIndex = -1;
|
||||
}
|
||||
}
|
||||
|
||||
static inline void
|
||||
RemoveFromCache(const nsINode* aParent)
|
||||
{
|
||||
uint32_t index = CACHE_GET_INDEX(aParent);
|
||||
if (sIndexCache[index].mParent == aParent) {
|
||||
sIndexCache[index] = { nullptr, nullptr, -1 };
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
nsINode::AppendChildToChildList(nsIContent* aKid)
|
||||
{
|
||||
MOZ_ASSERT(aKid);
|
||||
MOZ_ASSERT(!aKid->mNextSibling);
|
||||
|
||||
RemoveFromCache(this);
|
||||
|
||||
if (mFirstChild) {
|
||||
nsIContent* lastChild = GetLastChild();
|
||||
lastChild->mNextSibling = aKid;
|
||||
@ -1485,6 +1572,8 @@ nsINode::InsertChildToChildList(nsIContent* aKid, nsIContent* aNextSibling)
|
||||
MOZ_ASSERT(aKid);
|
||||
MOZ_ASSERT(aNextSibling);
|
||||
|
||||
RemoveFromCache(this);
|
||||
|
||||
nsIContent* previousSibling = aNextSibling->mPreviousOrLastSibling;
|
||||
aNextSibling->mPreviousOrLastSibling = aKid;
|
||||
aKid->mPreviousOrLastSibling = previousSibling;
|
||||
@ -1506,6 +1595,8 @@ nsINode::DisconnectChild(nsIContent* aKid)
|
||||
MOZ_ASSERT(aKid);
|
||||
MOZ_ASSERT(GetChildCount() > 0);
|
||||
|
||||
RemoveFromCache(this);
|
||||
|
||||
nsIContent* previousSibling = aKid->GetPreviousSibling();
|
||||
nsCOMPtr<nsIContent> ref = aKid;
|
||||
|
||||
@ -1557,11 +1648,48 @@ nsINode::ComputeIndexOf(const nsINode* aChild) const
|
||||
return GetChildCount() - 1;
|
||||
}
|
||||
|
||||
if (mChildCount >= CACHE_CHILD_LIMIT) {
|
||||
const nsINode* child;
|
||||
int32_t childIndex;
|
||||
GetChildAndIndexFromCache(this, &child, &childIndex);
|
||||
if (child) {
|
||||
if (child == aChild) {
|
||||
return childIndex;
|
||||
}
|
||||
|
||||
int32_t nextIndex = childIndex;
|
||||
int32_t prevIndex = childIndex;
|
||||
nsINode* prev = child->GetPreviousSibling();
|
||||
nsINode* next = child->GetNextSibling();
|
||||
do {
|
||||
if (next) {
|
||||
++nextIndex;
|
||||
if (next == aChild) {
|
||||
AddChildAndIndexToCache(this, aChild, nextIndex);
|
||||
return nextIndex;
|
||||
}
|
||||
next = next->GetNextSibling();
|
||||
}
|
||||
if (prev) {
|
||||
--prevIndex;
|
||||
if (prev == aChild) {
|
||||
AddChildAndIndexToCache(this, aChild, prevIndex);
|
||||
return prevIndex;
|
||||
}
|
||||
prev = prev->GetPreviousSibling();
|
||||
}
|
||||
} while (prev || next);
|
||||
}
|
||||
}
|
||||
|
||||
int32_t index = 0;
|
||||
nsINode* current = mFirstChild;
|
||||
while (current) {
|
||||
MOZ_ASSERT(current->GetParentNode() == this);
|
||||
if (current == aChild) {
|
||||
if (mChildCount >= CACHE_CHILD_LIMIT) {
|
||||
AddChildAndIndexToCache(this, current, index);
|
||||
}
|
||||
return index;
|
||||
}
|
||||
current = current->GetNextSibling();
|
||||
|
@ -1769,7 +1769,11 @@ public:
|
||||
{
|
||||
return HasChildren();
|
||||
}
|
||||
uint16_t CompareDocumentPosition(nsINode& aOther) const;
|
||||
|
||||
// See nsContentUtils::PositionIsBefore for aThisIndex and aOtherIndex usage.
|
||||
uint16_t CompareDocumentPosition(nsINode& aOther,
|
||||
int32_t* aThisIndex = nullptr,
|
||||
int32_t* aOtherIndex = nullptr) const;
|
||||
void GetNodeValue(nsAString& aNodeValue)
|
||||
{
|
||||
GetNodeValueInternal(aNodeValue);
|
||||
|
@ -43,7 +43,7 @@ function getPopupURL() {
|
||||
.QueryInterface(Ci.nsIWebNavigation)
|
||||
.sessionHistory;
|
||||
|
||||
return sh.legacySHistory.getEntryAtIndex(sh.index, false).URI.spec;
|
||||
return sh.legacySHistory.getEntryAtIndex(sh.index).URI.spec;
|
||||
}
|
||||
|
||||
var wyciwygURL;
|
||||
|
@ -3482,16 +3482,8 @@ PreprocessHelper::ProcessCurrentStream()
|
||||
|
||||
MOZ_ASSERT(mCurrentBytecodeFileDesc);
|
||||
|
||||
JS::BuildIdCharVector buildId;
|
||||
bool ok = GetBuildId(&buildId);
|
||||
if (NS_WARN_IF(!ok)) {
|
||||
ContinueWithStatus(NS_ERROR_FAILURE);
|
||||
return;
|
||||
}
|
||||
|
||||
RefPtr<JS::WasmModule> module =
|
||||
JS::DeserializeWasmModule(mCurrentBytecodeFileDesc,
|
||||
std::move(buildId),
|
||||
nullptr,
|
||||
0);
|
||||
if (NS_WARN_IF(!module)) {
|
||||
|
@ -175,7 +175,7 @@ function getSHTitle(sh, offset)
|
||||
offset = 0;
|
||||
|
||||
// False instructs the SHistory not to modify its current index.
|
||||
return sh.legacySHistory.getEntryAtIndex(sh.index + offset, false).title;
|
||||
return sh.legacySHistory.getEntryAtIndex(sh.index + offset).title;
|
||||
}
|
||||
|
||||
// Tests that win's location ends with str
|
||||
|
@ -139,4 +139,14 @@
|
||||
|
||||
#define NOT_OBJECT_KIND_DESCRIPTOR 0
|
||||
|
||||
#define TYPEDARRAY_KIND_INT8 0
|
||||
#define TYPEDARRAY_KIND_UINT8 1
|
||||
#define TYPEDARRAY_KIND_INT16 2
|
||||
#define TYPEDARRAY_KIND_UINT16 3
|
||||
#define TYPEDARRAY_KIND_INT32 4
|
||||
#define TYPEDARRAY_KIND_UINT32 5
|
||||
#define TYPEDARRAY_KIND_FLOAT32 6
|
||||
#define TYPEDARRAY_KIND_FLOAT64 7
|
||||
#define TYPEDARRAY_KIND_UINT8CLAMPED 8
|
||||
|
||||
#endif
|
||||
|
@ -3751,11 +3751,13 @@ static const bool js_isUriUnescaped[] = {
|
||||
#undef ____
|
||||
|
||||
static inline bool
|
||||
TransferBufferToString(StringBuffer& sb, MutableHandleValue rval)
|
||||
TransferBufferToString(StringBuffer& sb, JSString* str, MutableHandleValue rval)
|
||||
{
|
||||
JSString* str = sb.finishString();
|
||||
if (!str)
|
||||
return false;
|
||||
if (!sb.empty()) {
|
||||
str = sb.finishString();
|
||||
if (!str)
|
||||
return false;
|
||||
}
|
||||
rval.setString(str);
|
||||
return true;
|
||||
}
|
||||
@ -3776,9 +3778,8 @@ template <typename CharT>
|
||||
static MOZ_NEVER_INLINE EncodeResult
|
||||
Encode(StringBuffer& sb, const CharT* chars, size_t length, const bool* unescapedSet)
|
||||
{
|
||||
Latin1Char hexBuf[4];
|
||||
Latin1Char hexBuf[3];
|
||||
hexBuf[0] = '%';
|
||||
hexBuf[3] = 0;
|
||||
|
||||
auto appendEncoded = [&sb, &hexBuf](Latin1Char c) {
|
||||
static const char HexDigits[] = "0123456789ABCDEF"; /* NB: uppercase */
|
||||
@ -3788,12 +3789,28 @@ Encode(StringBuffer& sb, const CharT* chars, size_t length, const bool* unescape
|
||||
return sb.append(hexBuf, 3);
|
||||
};
|
||||
|
||||
auto appendRange = [&sb, chars, length](size_t start, size_t end) {
|
||||
MOZ_ASSERT(start <= end);
|
||||
|
||||
if (start < end) {
|
||||
if (start == 0) {
|
||||
if (!sb.reserve(length))
|
||||
return false;
|
||||
}
|
||||
return sb.append(chars + start, chars + end);
|
||||
}
|
||||
return true;
|
||||
};
|
||||
|
||||
size_t startAppend = 0;
|
||||
for (size_t k = 0; k < length; k++) {
|
||||
CharT c = chars[k];
|
||||
if (c < 128 && (js_isUriUnescaped[c] || (unescapedSet && unescapedSet[c]))) {
|
||||
if (!sb.append(Latin1Char(c)))
|
||||
return Encode_Failure;
|
||||
continue;
|
||||
} else {
|
||||
if (!appendRange(startAppend, k))
|
||||
return Encode_Failure;
|
||||
|
||||
if (mozilla::IsSame<CharT, Latin1Char>::value) {
|
||||
if (c < 0x80) {
|
||||
if (!appendEncoded(c))
|
||||
@ -3828,9 +3845,16 @@ Encode(StringBuffer& sb, const CharT* chars, size_t length, const bool* unescape
|
||||
return Encode_Failure;
|
||||
}
|
||||
}
|
||||
|
||||
startAppend = k + 1;
|
||||
}
|
||||
}
|
||||
|
||||
if (startAppend > 0) {
|
||||
if (!appendRange(startAppend, length))
|
||||
return Encode_Failure;
|
||||
}
|
||||
|
||||
return Encode_Success;
|
||||
}
|
||||
|
||||
@ -3844,8 +3868,6 @@ Encode(JSContext* cx, HandleLinearString str, const bool* unescapedSet, MutableH
|
||||
}
|
||||
|
||||
StringBuffer sb(cx);
|
||||
if (!sb.reserve(length))
|
||||
return false;
|
||||
|
||||
EncodeResult res;
|
||||
if (str->hasLatin1Chars()) {
|
||||
@ -3865,7 +3887,7 @@ Encode(JSContext* cx, HandleLinearString str, const bool* unescapedSet, MutableH
|
||||
}
|
||||
|
||||
MOZ_ASSERT(res == Encode_Success);
|
||||
return TransferBufferToString(sb, rval);
|
||||
return TransferBufferToString(sb, str, rval);
|
||||
}
|
||||
|
||||
enum DecodeResult { Decode_Failure, Decode_BadUri, Decode_Success };
|
||||
@ -3874,6 +3896,15 @@ template <typename CharT>
|
||||
static DecodeResult
|
||||
Decode(StringBuffer& sb, const CharT* chars, size_t length, const bool* reservedSet)
|
||||
{
|
||||
auto appendRange = [&sb, chars](size_t start, size_t end) {
|
||||
MOZ_ASSERT(start <= end);
|
||||
|
||||
if (start < end)
|
||||
return sb.append(chars + start, chars + end);
|
||||
return true;
|
||||
};
|
||||
|
||||
size_t startAppend = 0;
|
||||
for (size_t k = 0; k < length; k++) {
|
||||
CharT c = chars[k];
|
||||
if (c == '%') {
|
||||
@ -3887,14 +3918,14 @@ Decode(StringBuffer& sb, const CharT* chars, size_t length, const bool* reserved
|
||||
uint32_t B = JS7_UNHEX(chars[k+1]) * 16 + JS7_UNHEX(chars[k+2]);
|
||||
k += 2;
|
||||
if (B < 128) {
|
||||
c = CharT(B);
|
||||
if (reservedSet && reservedSet[c]) {
|
||||
if (!sb.append(chars + start, k - start + 1))
|
||||
return Decode_Failure;
|
||||
} else {
|
||||
if (!sb.append(c))
|
||||
return Decode_Failure;
|
||||
}
|
||||
Latin1Char ch = Latin1Char(B);
|
||||
if (reservedSet && reservedSet[ch])
|
||||
continue;
|
||||
|
||||
if (!appendRange(startAppend, start))
|
||||
return Decode_Failure;
|
||||
if (!sb.append(ch))
|
||||
return Decode_Failure;
|
||||
} else {
|
||||
int n = 1;
|
||||
while (B & (0x80 >> n))
|
||||
@ -3924,6 +3955,9 @@ Decode(StringBuffer& sb, const CharT* chars, size_t length, const bool* reserved
|
||||
octets[j] = char(B);
|
||||
}
|
||||
|
||||
if (!appendRange(startAppend, start))
|
||||
return Decode_Failure;
|
||||
|
||||
uint32_t v = JS::Utf8ToOneUcs4Char(octets, n);
|
||||
MOZ_ASSERT(v >= 128);
|
||||
if (v >= unicode::NonBMPMin) {
|
||||
@ -3939,12 +3973,16 @@ Decode(StringBuffer& sb, const CharT* chars, size_t length, const bool* reserved
|
||||
return Decode_Failure;
|
||||
}
|
||||
}
|
||||
} else {
|
||||
if (!sb.append(c))
|
||||
return Decode_Failure;
|
||||
|
||||
startAppend = k + 1;
|
||||
}
|
||||
}
|
||||
|
||||
if (startAppend > 0) {
|
||||
if (!appendRange(startAppend, length))
|
||||
return Decode_Failure;
|
||||
}
|
||||
|
||||
return Decode_Success;
|
||||
}
|
||||
|
||||
@ -3977,7 +4015,7 @@ Decode(JSContext* cx, HandleLinearString str, const bool* reservedSet, MutableHa
|
||||
}
|
||||
|
||||
MOZ_ASSERT(res == Decode_Success);
|
||||
return TransferBufferToString(sb, rval);
|
||||
return TransferBufferToString(sb, str, rval);
|
||||
}
|
||||
|
||||
static bool
|
||||
@ -4024,18 +4062,21 @@ str_encodeURI_Component(JSContext* cx, unsigned argc, Value* vp)
|
||||
return Encode(cx, str, nullptr, args.rval());
|
||||
}
|
||||
|
||||
bool
|
||||
js::EncodeURI(JSContext* cx, StringBuffer& sb, const char* chars, size_t length)
|
||||
JSString*
|
||||
js::EncodeURI(JSContext* cx, const char* chars, size_t length)
|
||||
{
|
||||
StringBuffer sb(cx);
|
||||
EncodeResult result = Encode(sb, reinterpret_cast<const Latin1Char*>(chars), length,
|
||||
js_isUriReservedPlusPound);
|
||||
if (result == EncodeResult::Encode_Failure)
|
||||
return false;
|
||||
return nullptr;
|
||||
if (result == EncodeResult::Encode_BadUri) {
|
||||
JS_ReportErrorNumberASCII(cx, GetErrorMessage, nullptr, JSMSG_BAD_URI);
|
||||
return false;
|
||||
return nullptr;
|
||||
}
|
||||
return true;
|
||||
if (sb.empty())
|
||||
return NewStringCopyN<CanGC>(cx, chars, length);
|
||||
return sb.finishString();
|
||||
}
|
||||
|
||||
static bool
|
||||
|
@ -1191,22 +1191,40 @@ function TypedArraySort(comparefn) {
|
||||
return obj;
|
||||
|
||||
if (comparefn === undefined) {
|
||||
if (IsUint8TypedArray(obj)) {
|
||||
var kind = GetTypedArrayKind(obj);
|
||||
switch (kind) {
|
||||
case TYPEDARRAY_KIND_UINT8:
|
||||
case TYPEDARRAY_KIND_UINT8CLAMPED:
|
||||
return CountingSort(obj, len, false /* signed */, TypedArrayCompareInt);
|
||||
} else if (IsInt8TypedArray(obj)) {
|
||||
case TYPEDARRAY_KIND_INT8:
|
||||
return CountingSort(obj, len, true /* signed */, TypedArrayCompareInt);
|
||||
} else if (IsUint16TypedArray(obj)) {
|
||||
return RadixSort(obj, len, buffer, 2 /* nbytes */, false /* signed */, false /* floating */, TypedArrayCompareInt);
|
||||
} else if (IsInt16TypedArray(obj)) {
|
||||
return RadixSort(obj, len, buffer, 2 /* nbytes */, true /* signed */, false /* floating */, TypedArrayCompareInt);
|
||||
} else if (IsUint32TypedArray(obj)) {
|
||||
return RadixSort(obj, len, buffer, 4 /* nbytes */, false /* signed */, false /* floating */, TypedArrayCompareInt);
|
||||
} else if (IsInt32TypedArray(obj)) {
|
||||
return RadixSort(obj, len, buffer, 4 /* nbytes */, true /* signed */, false /* floating */, TypedArrayCompareInt);
|
||||
} else if (IsFloat32TypedArray(obj)) {
|
||||
return RadixSort(obj, len, buffer, 4 /* nbytes */, true /* signed */, true /* floating */, TypedArrayCompare);
|
||||
case TYPEDARRAY_KIND_UINT16:
|
||||
return RadixSort(obj, len, buffer,
|
||||
2 /* nbytes */, false /* signed */, false /* floating */,
|
||||
TypedArrayCompareInt);
|
||||
case TYPEDARRAY_KIND_INT16:
|
||||
return RadixSort(obj, len, buffer,
|
||||
2 /* nbytes */, true /* signed */, false /* floating */,
|
||||
TypedArrayCompareInt);
|
||||
case TYPEDARRAY_KIND_UINT32:
|
||||
return RadixSort(obj, len, buffer,
|
||||
4 /* nbytes */, false /* signed */, false /* floating */,
|
||||
TypedArrayCompareInt);
|
||||
case TYPEDARRAY_KIND_INT32:
|
||||
return RadixSort(obj, len, buffer,
|
||||
4 /* nbytes */, true /* signed */, false /* floating */,
|
||||
TypedArrayCompareInt);
|
||||
case TYPEDARRAY_KIND_FLOAT32:
|
||||
return RadixSort(obj, len, buffer,
|
||||
4 /* nbytes */, true /* signed */, true /* floating */,
|
||||
TypedArrayCompare);
|
||||
case TYPEDARRAY_KIND_FLOAT64:
|
||||
default:
|
||||
// Include |default| to ensure Ion marks this call as the
|
||||
// last instruction in the if-statement.
|
||||
assert(kind === TYPEDARRAY_KIND_FLOAT64, "unexpected typed array kind");
|
||||
return QuickSort(obj, len, TypedArrayCompare);
|
||||
}
|
||||
return QuickSort(obj, len, TypedArrayCompare);
|
||||
}
|
||||
|
||||
// To satisfy step 2 from TypedArray SortCompare described in 22.2.3.26
|
||||
|
@ -24,7 +24,7 @@ GetBuildId(JS::BuildIdCharVector* buildId)
|
||||
static JSScript*
|
||||
FreezeThaw(JSContext* cx, JS::HandleScript script)
|
||||
{
|
||||
JS::SetBuildIdOp(cx, GetBuildId);
|
||||
JS::SetProcessBuildIdOp(::GetBuildId);
|
||||
|
||||
// freeze
|
||||
JS::TranscodeBuffer buffer;
|
||||
|
@ -7626,9 +7626,9 @@ JS::FinishIncrementalEncoding(JSContext* cx, JS::HandleScript script, TranscodeB
|
||||
}
|
||||
|
||||
JS_PUBLIC_API(void)
|
||||
JS::SetBuildIdOp(JSContext* cx, JS::BuildIdOp buildIdOp)
|
||||
JS::SetProcessBuildIdOp(JS::BuildIdOp buildIdOp)
|
||||
{
|
||||
cx->runtime()->buildIdOp = buildIdOp;
|
||||
GetBuildId = buildIdOp;
|
||||
}
|
||||
|
||||
JS_PUBLIC_API(void)
|
||||
@ -7654,10 +7654,9 @@ JS::GetWasmModule(HandleObject obj)
|
||||
}
|
||||
|
||||
JS_PUBLIC_API(RefPtr<JS::WasmModule>)
|
||||
JS::DeserializeWasmModule(PRFileDesc* bytecode, JS::BuildIdCharVector&& buildId,
|
||||
UniqueChars filename, unsigned line)
|
||||
JS::DeserializeWasmModule(PRFileDesc* bytecode, UniqueChars filename, unsigned line)
|
||||
{
|
||||
return wasm::DeserializeModule(bytecode, std::move(buildId), std::move(filename), line);
|
||||
return wasm::DeserializeModule(bytecode, std::move(filename), line);
|
||||
}
|
||||
|
||||
JS_PUBLIC_API(void)
|
||||
|
@ -4654,22 +4654,22 @@ SetAsmJSCacheOps(JSContext* cx, const AsmJSCacheOps* callbacks);
|
||||
* engine, it is critical that the buildId shall change for each new build of
|
||||
* the JS engine.
|
||||
*/
|
||||
|
||||
typedef js::Vector<char, 0, js::SystemAllocPolicy> BuildIdCharVector;
|
||||
|
||||
typedef bool
|
||||
(* BuildIdOp)(BuildIdCharVector* buildId);
|
||||
|
||||
extern JS_PUBLIC_API(void)
|
||||
SetBuildIdOp(JSContext* cx, BuildIdOp buildIdOp);
|
||||
SetProcessBuildIdOp(BuildIdOp buildIdOp);
|
||||
|
||||
/**
|
||||
* The WasmModule interface allows the embedding to hold a reference to the
|
||||
* underying C++ implementation of a JS WebAssembly.Module object for purposes
|
||||
* of efficient postMessage() and (de)serialization from a random thread.
|
||||
*
|
||||
* For postMessage() sharing:
|
||||
*
|
||||
* - GetWasmModule() is called when making a structured clone of payload
|
||||
* In particular, this allows postMessage() of a WebAssembly.Module:
|
||||
* GetWasmModule() is called when making a structured clone of a payload
|
||||
* containing a WebAssembly.Module object. The structured clone buffer holds a
|
||||
* refcount of the JS::WasmModule until createObject() is called in the target
|
||||
* agent's JSContext. The new WebAssembly.Module object continues to hold the
|
||||
@ -4678,22 +4678,6 @@ SetBuildIdOp(JSContext* cx, BuildIdOp buildIdOp);
|
||||
* methods of the C++ module) must be thread-safe.
|
||||
*/
|
||||
|
||||
class WasmModuleListener
|
||||
{
|
||||
protected:
|
||||
virtual ~WasmModuleListener() {}
|
||||
|
||||
public:
|
||||
// These method signatures are chosen to exactly match nsISupports so that a
|
||||
// plain nsISupports-implementing class can trivially implement this
|
||||
// interface too. We can't simply #include "nsISupports.h" so we use MFBT
|
||||
// equivalents for all the platform-dependent types.
|
||||
virtual MozExternalRefCountType MOZ_XPCOM_ABI AddRef() = 0;
|
||||
virtual MozExternalRefCountType MOZ_XPCOM_ABI Release() = 0;
|
||||
|
||||
virtual void onCompilationComplete() = 0;
|
||||
};
|
||||
|
||||
struct WasmModule : js::AtomicRefCounted<WasmModule>
|
||||
{
|
||||
virtual ~WasmModule() {}
|
||||
@ -4706,9 +4690,13 @@ IsWasmModuleObject(HandleObject obj);
|
||||
extern JS_PUBLIC_API(RefPtr<WasmModule>)
|
||||
GetWasmModule(HandleObject obj);
|
||||
|
||||
/**
|
||||
* This function will be removed when bug 1487479 expunges the last remaining
|
||||
* bits of wasm IDB support.
|
||||
*/
|
||||
|
||||
extern JS_PUBLIC_API(RefPtr<WasmModule>)
|
||||
DeserializeWasmModule(PRFileDesc* bytecode, BuildIdCharVector&& buildId,
|
||||
JS::UniqueChars filename, unsigned line);
|
||||
DeserializeWasmModule(PRFileDesc* bytecode, JS::UniqueChars filename, unsigned line);
|
||||
|
||||
/**
|
||||
* Convenience class for imitating a JS level for-of loop. Typical usage:
|
||||
|
@ -3634,7 +3634,6 @@ WorkerMain(WorkerInput* input)
|
||||
JS_SetContextPrivate(cx, sc);
|
||||
JS_SetGrayGCRootsTracer(cx, TraceGrayRoots, nullptr);
|
||||
SetWorkerContextOptions(cx);
|
||||
JS::SetBuildIdOp(cx, ShellBuildId);
|
||||
|
||||
JS_SetFutexCanWait(cx);
|
||||
JS::SetWarningReporter(cx, WarningReporter);
|
||||
@ -9884,6 +9883,8 @@ main(int argc, char** argv, char** envp)
|
||||
if (!InitSharedObjectMailbox())
|
||||
return 1;
|
||||
|
||||
JS::SetProcessBuildIdOp(ShellBuildId);
|
||||
|
||||
// The fake CPU count must be set before initializing the Runtime,
|
||||
// which spins up the thread pool.
|
||||
int32_t cpuCount = op.getIntOption("cpu-count"); // What we're really setting
|
||||
@ -9924,7 +9925,6 @@ main(int argc, char** argv, char** envp)
|
||||
JS_SetDestroyCompartmentCallback(cx, DestroyShellCompartmentPrivate);
|
||||
|
||||
JS_AddInterruptCallback(cx, ShellInterruptCallback);
|
||||
JS::SetBuildIdOp(cx, ShellBuildId);
|
||||
JS::SetAsmJSCacheOps(cx, &asmJSCacheOps);
|
||||
|
||||
bufferStreamState =
|
||||
|
@ -219,8 +219,8 @@ FileEscapedString(FILE* fp, const char* chars, size_t length, uint32_t quote)
|
||||
return res;
|
||||
}
|
||||
|
||||
bool
|
||||
EncodeURI(JSContext* cx, StringBuffer& sb, const char* chars, size_t length);
|
||||
JSString*
|
||||
EncodeURI(JSContext* cx, const char* chars, size_t length);
|
||||
|
||||
} // namespace js
|
||||
|
||||
|
@ -226,7 +226,6 @@ struct JSContext : public JS::RootingContext,
|
||||
bool permanentAtomsPopulated() { return runtime_->permanentAtomsPopulated(); }
|
||||
const js::FrozenAtomSet& permanentAtoms() { return *runtime_->permanentAtoms(); }
|
||||
js::WellKnownSymbols& wellKnownSymbols() { return *runtime_->wellKnownSymbols; }
|
||||
JS::BuildIdOp buildIdOp() { return runtime_->buildIdOp; }
|
||||
const JS::AsmJSCacheOps& asmJSCacheOps() { return runtime_->asmJSCacheOps; }
|
||||
js::PropertyName* emptyString() { return runtime_->emptyString; }
|
||||
js::FreeOp* defaultFreeOp() { return runtime_->defaultFreeOp(); }
|
||||
|
@ -66,6 +66,7 @@ using JS::DoubleNaNValue;
|
||||
/* static */ MOZ_THREAD_LOCAL(JSContext*) js::TlsContext;
|
||||
/* static */ Atomic<size_t> JSRuntime::liveRuntimesCount;
|
||||
Atomic<JS::LargeAllocationFailureCallback> js::OnLargeAllocationFailure;
|
||||
Atomic<JS::BuildIdOp> js::GetBuildId;
|
||||
|
||||
namespace js {
|
||||
bool gCanUseExtraThreads = true;
|
||||
@ -119,7 +120,6 @@ JSRuntime::JSRuntime(JSRuntime* parentRuntime)
|
||||
readPrincipals(nullptr),
|
||||
warningReporter(nullptr),
|
||||
geckoProfiler_(thisFromCtor()),
|
||||
buildIdOp(nullptr),
|
||||
trustedPrincipals_(nullptr),
|
||||
wrapObjectCallbacks(&DefaultWrapObjectCallbacks),
|
||||
preserveWrapperCallback(nullptr),
|
||||
|
@ -400,8 +400,6 @@ struct JSRuntime : public js::MallocProvider<JSRuntime>
|
||||
void finishRoots();
|
||||
|
||||
public:
|
||||
js::UnprotectedData<JS::BuildIdOp> buildIdOp;
|
||||
|
||||
/* AsmJSCache callbacks are runtime-wide. */
|
||||
js::UnprotectedData<JS::AsmJSCacheOps> asmJSCacheOps;
|
||||
|
||||
@ -1193,6 +1191,9 @@ extern const JSSecurityCallbacks NullSecurityCallbacks;
|
||||
// and may be null. See comment in jsapi.h.
|
||||
extern mozilla::Atomic<JS::LargeAllocationFailureCallback> OnLargeAllocationFailure;
|
||||
|
||||
// This callback is set by JS::SetBuildIdOp and may be null. See comment in jsapi.h.
|
||||
extern mozilla::Atomic<JS::BuildIdOp> GetBuildId;
|
||||
|
||||
} /* namespace js */
|
||||
|
||||
#endif /* vm_Runtime_h */
|
||||
|
@ -1019,63 +1019,38 @@ intrinsic_SharedArrayBuffersMemorySame(JSContext* cx, unsigned argc, Value* vp)
|
||||
}
|
||||
|
||||
static bool
|
||||
intrinsic_IsSpecificTypedArray(JSContext* cx, unsigned argc, Value* vp, Scalar::Type type)
|
||||
intrinsic_GetTypedArrayKind(JSContext* cx, unsigned argc, Value* vp)
|
||||
{
|
||||
CallArgs args = CallArgsFromVp(argc, vp);
|
||||
MOZ_ASSERT(args.length() == 1);
|
||||
MOZ_ASSERT(args[0].isObject());
|
||||
|
||||
static_assert(TYPEDARRAY_KIND_INT8 == Scalar::Type::Int8,
|
||||
"TYPEDARRAY_KIND_INT8 doesn't match the scalar type");
|
||||
static_assert(TYPEDARRAY_KIND_UINT8 == Scalar::Type::Uint8,
|
||||
"TYPEDARRAY_KIND_UINT8 doesn't match the scalar type");
|
||||
static_assert(TYPEDARRAY_KIND_INT16 == Scalar::Type::Int16,
|
||||
"TYPEDARRAY_KIND_INT16 doesn't match the scalar type");
|
||||
static_assert(TYPEDARRAY_KIND_UINT16 == Scalar::Type::Uint16,
|
||||
"TYPEDARRAY_KIND_UINT16 doesn't match the scalar type");
|
||||
static_assert(TYPEDARRAY_KIND_INT32 == Scalar::Type::Int32,
|
||||
"TYPEDARRAY_KIND_INT32 doesn't match the scalar type");
|
||||
static_assert(TYPEDARRAY_KIND_UINT32 == Scalar::Type::Uint32,
|
||||
"TYPEDARRAY_KIND_UINT32 doesn't match the scalar type");
|
||||
static_assert(TYPEDARRAY_KIND_FLOAT32 == Scalar::Type::Float32,
|
||||
"TYPEDARRAY_KIND_FLOAT32 doesn't match the scalar type");
|
||||
static_assert(TYPEDARRAY_KIND_FLOAT64 == Scalar::Type::Float64,
|
||||
"TYPEDARRAY_KIND_FLOAT64 doesn't match the scalar type");
|
||||
static_assert(TYPEDARRAY_KIND_UINT8CLAMPED == Scalar::Type::Uint8Clamped,
|
||||
"TYPEDARRAY_KIND_UINT8CLAMPED doesn't match the scalar type");
|
||||
|
||||
JSObject* obj = &args[0].toObject();
|
||||
Scalar::Type type = JS_GetArrayBufferViewType(obj);
|
||||
|
||||
bool isArray = JS_GetArrayBufferViewType(obj) == type;
|
||||
|
||||
args.rval().setBoolean(isArray);
|
||||
args.rval().setInt32(static_cast<int32_t>(type));
|
||||
return true;
|
||||
}
|
||||
|
||||
static bool
|
||||
intrinsic_IsUint8TypedArray(JSContext* cx, unsigned argc, Value* vp)
|
||||
{
|
||||
return intrinsic_IsSpecificTypedArray(cx, argc, vp, Scalar::Uint8) ||
|
||||
intrinsic_IsSpecificTypedArray(cx, argc, vp, Scalar::Uint8Clamped);
|
||||
}
|
||||
|
||||
static bool
|
||||
intrinsic_IsInt8TypedArray(JSContext* cx, unsigned argc, Value* vp)
|
||||
{
|
||||
return intrinsic_IsSpecificTypedArray(cx, argc, vp, Scalar::Int8);
|
||||
}
|
||||
|
||||
static bool
|
||||
intrinsic_IsUint16TypedArray(JSContext* cx, unsigned argc, Value* vp)
|
||||
{
|
||||
return intrinsic_IsSpecificTypedArray(cx, argc, vp, Scalar::Uint16);
|
||||
}
|
||||
|
||||
static bool
|
||||
intrinsic_IsInt16TypedArray(JSContext* cx, unsigned argc, Value* vp)
|
||||
{
|
||||
return intrinsic_IsSpecificTypedArray(cx, argc, vp, Scalar::Int16);
|
||||
}
|
||||
|
||||
static bool
|
||||
intrinsic_IsUint32TypedArray(JSContext* cx, unsigned argc, Value* vp)
|
||||
{
|
||||
return intrinsic_IsSpecificTypedArray(cx, argc, vp, Scalar::Uint32);
|
||||
}
|
||||
|
||||
static bool
|
||||
intrinsic_IsInt32TypedArray(JSContext* cx, unsigned argc, Value* vp)
|
||||
{
|
||||
return intrinsic_IsSpecificTypedArray(cx, argc, vp, Scalar::Int32);
|
||||
}
|
||||
|
||||
static bool
|
||||
intrinsic_IsFloat32TypedArray(JSContext* cx, unsigned argc, Value* vp)
|
||||
{
|
||||
return intrinsic_IsSpecificTypedArray(cx, argc, vp, Scalar::Float32);
|
||||
}
|
||||
|
||||
static bool
|
||||
intrinsic_IsPossiblyWrappedTypedArray(JSContext* cx, unsigned argc, Value* vp)
|
||||
{
|
||||
@ -2545,13 +2520,7 @@ static const JSFunctionSpec intrinsic_functions[] = {
|
||||
JS_FN("SharedArrayBuffersMemorySame",
|
||||
intrinsic_SharedArrayBuffersMemorySame, 2,0),
|
||||
|
||||
JS_FN("IsUint8TypedArray", intrinsic_IsUint8TypedArray, 1,0),
|
||||
JS_FN("IsInt8TypedArray", intrinsic_IsInt8TypedArray, 1,0),
|
||||
JS_FN("IsUint16TypedArray", intrinsic_IsUint16TypedArray, 1,0),
|
||||
JS_FN("IsInt16TypedArray", intrinsic_IsInt16TypedArray, 1,0),
|
||||
JS_FN("IsUint32TypedArray", intrinsic_IsUint32TypedArray, 1,0),
|
||||
JS_FN("IsInt32TypedArray", intrinsic_IsInt32TypedArray, 1,0),
|
||||
JS_FN("IsFloat32TypedArray", intrinsic_IsFloat32TypedArray, 1,0),
|
||||
JS_FN("GetTypedArrayKind", intrinsic_GetTypedArrayKind, 1,0),
|
||||
JS_INLINABLE_FN("IsTypedArray",
|
||||
intrinsic_IsInstanceOfBuiltin<TypedArrayObject>, 1,0,
|
||||
IntrinsicIsTypedArray),
|
||||
|
@ -86,8 +86,8 @@ static XDRResult
|
||||
VersionCheck(XDRState<mode>* xdr)
|
||||
{
|
||||
JS::BuildIdCharVector buildId;
|
||||
MOZ_ASSERT(xdr->cx()->buildIdOp());
|
||||
if (!xdr->cx()->buildIdOp()(&buildId)) {
|
||||
MOZ_ASSERT(GetBuildId);
|
||||
if (!GetBuildId(&buildId)) {
|
||||
ReportOutOfMemory(xdr->cx());
|
||||
return xdr->fail(JS::TranscodeResult_Throw);
|
||||
}
|
||||
|
@ -2079,7 +2079,7 @@ class MOZ_STACK_CLASS JS_HAZ_ROOTED ModuleValidator
|
||||
env_.memoryUsage = MemoryUsage::None;
|
||||
return true;
|
||||
}
|
||||
SharedModule finish() {
|
||||
SharedModule finish(UniqueLinkData* linkData) {
|
||||
MOZ_ASSERT(env_.funcTypes.empty());
|
||||
if (!env_.funcTypes.resize(funcImportMap_.count() + funcDefs_.length()))
|
||||
return nullptr;
|
||||
@ -2122,8 +2122,8 @@ class MOZ_STACK_CLASS JS_HAZ_ROOTED ModuleValidator
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
MutableCompileArgs args = cx_->new_<CompileArgs>();
|
||||
if (!args || !args->initFromContext(cx_, std::move(scriptedCaller)))
|
||||
MutableCompileArgs args = cx_->new_<CompileArgs>(cx_, std::move(scriptedCaller));
|
||||
if (!args)
|
||||
return nullptr;
|
||||
|
||||
uint32_t codeSectionSize = 0;
|
||||
@ -2155,7 +2155,7 @@ class MOZ_STACK_CLASS JS_HAZ_ROOTED ModuleValidator
|
||||
if (!mg.finishFuncDefs())
|
||||
return nullptr;
|
||||
|
||||
return mg.finishModule(*bytes);
|
||||
return mg.finishModule(*bytes, linkData);
|
||||
}
|
||||
};
|
||||
|
||||
@ -5685,7 +5685,8 @@ CheckModuleEnd(ModuleValidator &m)
|
||||
}
|
||||
|
||||
static SharedModule
|
||||
CheckModule(JSContext* cx, AsmJSParser& parser, ParseNode* stmtList, unsigned* time)
|
||||
CheckModule(JSContext* cx, AsmJSParser& parser, ParseNode* stmtList, UniqueLinkData* linkData,
|
||||
unsigned* time)
|
||||
{
|
||||
int64_t before = PRMJ_Now();
|
||||
|
||||
@ -5726,7 +5727,7 @@ CheckModule(JSContext* cx, AsmJSParser& parser, ParseNode* stmtList, unsigned* t
|
||||
if (!CheckModuleEnd(m))
|
||||
return nullptr;
|
||||
|
||||
SharedModule module = m.finish();
|
||||
SharedModule module = m.finish(linkData);
|
||||
if (!module)
|
||||
return nullptr;
|
||||
|
||||
@ -6326,6 +6327,72 @@ AsmJSMetadata::sizeOfExcludingThis(MallocSizeOf mallocSizeOf) const
|
||||
|
||||
namespace {
|
||||
|
||||
// Assumptions captures ambient state that must be the same when compiling and
|
||||
// deserializing a module for the compiled code to be valid. If it's not, then
|
||||
// the module must be recompiled from scratch.
|
||||
|
||||
struct Assumptions
|
||||
{
|
||||
uint32_t cpuId;
|
||||
JS::BuildIdCharVector buildId;
|
||||
|
||||
Assumptions();
|
||||
bool init();
|
||||
|
||||
bool operator==(const Assumptions& rhs) const;
|
||||
bool operator!=(const Assumptions& rhs) const { return !(*this == rhs); }
|
||||
|
||||
size_t serializedSize() const;
|
||||
uint8_t* serialize(uint8_t* cursor) const;
|
||||
const uint8_t* deserialize(const uint8_t* cursor, size_t remain);
|
||||
};
|
||||
|
||||
Assumptions::Assumptions()
|
||||
: cpuId(ObservedCPUFeatures()),
|
||||
buildId()
|
||||
{}
|
||||
|
||||
bool
|
||||
Assumptions::init()
|
||||
{
|
||||
return GetBuildId && GetBuildId(&buildId);
|
||||
}
|
||||
|
||||
bool
|
||||
Assumptions::operator==(const Assumptions& rhs) const
|
||||
{
|
||||
return cpuId == rhs.cpuId &&
|
||||
buildId.length() == rhs.buildId.length() &&
|
||||
ArrayEqual(buildId.begin(), rhs.buildId.begin(), buildId.length());
|
||||
}
|
||||
|
||||
size_t
|
||||
Assumptions::serializedSize() const
|
||||
{
|
||||
return sizeof(uint32_t) +
|
||||
SerializedPodVectorSize(buildId);
|
||||
}
|
||||
|
||||
uint8_t*
|
||||
Assumptions::serialize(uint8_t* cursor) const
|
||||
{
|
||||
// The format of serialized Assumptions must never change in a way that
|
||||
// would cause old cache files written with by an old build-id to match the
|
||||
// assumptions of a different build-id.
|
||||
|
||||
cursor = WriteScalar<uint32_t>(cursor, cpuId);
|
||||
cursor = SerializePodVector(cursor, buildId);
|
||||
return cursor;
|
||||
}
|
||||
|
||||
const uint8_t*
|
||||
Assumptions::deserialize(const uint8_t* cursor, size_t remain)
|
||||
{
|
||||
(cursor = ReadScalarChecked<uint32_t>(cursor, &remain, &cpuId)) &&
|
||||
(cursor = DeserializePodVectorChecked(cursor, &remain, &buildId));
|
||||
return cursor;
|
||||
}
|
||||
|
||||
class ModuleChars
|
||||
{
|
||||
protected:
|
||||
@ -6517,7 +6584,7 @@ struct ScopedCacheEntryOpenedForRead
|
||||
} // unnamed namespace
|
||||
|
||||
static JS::AsmJSCacheResult
|
||||
StoreAsmJSModuleInCache(AsmJSParser& parser, Module& module, JSContext* cx)
|
||||
StoreAsmJSModuleInCache(AsmJSParser& parser, Module& module, const LinkData& linkData, JSContext* cx)
|
||||
{
|
||||
ModuleCharsForStore moduleChars;
|
||||
if (!moduleChars.init(parser))
|
||||
@ -6525,11 +6592,16 @@ StoreAsmJSModuleInCache(AsmJSParser& parser, Module& module, JSContext* cx)
|
||||
|
||||
MOZ_RELEASE_ASSERT(module.bytecode().length() == 0);
|
||||
|
||||
size_t compiledSize = module.compiledSerializedSize();
|
||||
MOZ_RELEASE_ASSERT(compiledSize <= UINT32_MAX);
|
||||
size_t moduleSize = module.serializedSize(linkData);
|
||||
MOZ_RELEASE_ASSERT(moduleSize <= UINT32_MAX);
|
||||
|
||||
size_t serializedSize = sizeof(uint32_t) +
|
||||
compiledSize +
|
||||
Assumptions assumptions;
|
||||
if (!assumptions.init())
|
||||
return JS::AsmJSCache_InternalError;
|
||||
|
||||
size_t serializedSize = assumptions.serializedSize() +
|
||||
sizeof(uint32_t) +
|
||||
moduleSize +
|
||||
moduleChars.serializedSize();
|
||||
|
||||
JS::OpenAsmJSCacheEntryForWriteOp open = cx->asmJSCacheOps().openEntryForWrite;
|
||||
@ -6547,14 +6619,12 @@ StoreAsmJSModuleInCache(AsmJSParser& parser, Module& module, JSContext* cx)
|
||||
|
||||
uint8_t* cursor = entry.memory;
|
||||
|
||||
// Everything serialized before the Module must not change incompatibly
|
||||
// between any two builds (regardless of platform, architecture, ...).
|
||||
// (The Module::assumptionsMatch() guard everything in the Module and
|
||||
// afterwards.)
|
||||
cursor = WriteScalar<uint32_t>(cursor, compiledSize);
|
||||
cursor = assumptions.serialize(cursor);
|
||||
|
||||
module.compiledSerialize(cursor, compiledSize);
|
||||
cursor += compiledSize;
|
||||
cursor = WriteScalar<uint32_t>(cursor, moduleSize);
|
||||
|
||||
module.serialize(linkData, cursor, moduleSize);
|
||||
cursor += moduleSize;
|
||||
|
||||
cursor = moduleChars.serialize(cursor);
|
||||
|
||||
@ -6582,32 +6652,32 @@ LookupAsmJSModuleInCache(JSContext* cx, AsmJSParser& parser, bool* loadedFromCac
|
||||
if (!open(cx->global(), begin, limit, &entry.serializedSize, &entry.memory, &entry.handle))
|
||||
return true;
|
||||
|
||||
size_t remain = entry.serializedSize;
|
||||
const uint8_t* cursor = entry.memory;
|
||||
|
||||
uint32_t compiledSize;
|
||||
cursor = ReadScalarChecked<uint32_t>(cursor, &remain, &compiledSize);
|
||||
Assumptions deserializedAssumptions;
|
||||
cursor = deserializedAssumptions.deserialize(cursor, entry.serializedSize);
|
||||
if (!cursor)
|
||||
return true;
|
||||
|
||||
Assumptions assumptions;
|
||||
if (!assumptions.initBuildIdFromContext(cx))
|
||||
return false;
|
||||
Assumptions currentAssumptions;
|
||||
if (!currentAssumptions.init() || currentAssumptions != deserializedAssumptions)
|
||||
return true;
|
||||
|
||||
if (!Module::assumptionsMatch(assumptions, cursor, remain))
|
||||
uint32_t moduleSize;
|
||||
cursor = ReadScalar<uint32_t>(cursor, &moduleSize);
|
||||
if (!cursor)
|
||||
return true;
|
||||
|
||||
MutableAsmJSMetadata asmJSMetadata = cx->new_<AsmJSMetadata>();
|
||||
if (!asmJSMetadata)
|
||||
return false;
|
||||
|
||||
*module = Module::deserialize(/* bytecodeBegin = */ nullptr, /* bytecodeSize = */ 0,
|
||||
cursor, compiledSize, asmJSMetadata.get());
|
||||
*module = Module::deserialize(cursor, moduleSize, asmJSMetadata.get());
|
||||
if (!*module) {
|
||||
ReportOutOfMemory(cx);
|
||||
return false;
|
||||
}
|
||||
cursor += compiledSize;
|
||||
cursor += moduleSize;
|
||||
|
||||
// Due to the hash comparison made by openEntryForRead, this should succeed
|
||||
// with high probability.
|
||||
@ -6775,8 +6845,9 @@ js::CompileAsmJS(JSContext* cx, AsmJSParser& parser, ParseNode* stmtList, bool*
|
||||
if (!loadedFromCache) {
|
||||
// "Checking" parses, validates and compiles, producing a fully compiled
|
||||
// WasmModuleObject as result.
|
||||
UniqueLinkData linkData;
|
||||
unsigned time;
|
||||
module = CheckModule(cx, parser, stmtList, &time);
|
||||
module = CheckModule(cx, parser, stmtList, &linkData, &time);
|
||||
if (!module)
|
||||
return NoExceptionPending(cx);
|
||||
|
||||
@ -6784,7 +6855,7 @@ js::CompileAsmJS(JSContext* cx, AsmJSParser& parser, ParseNode* stmtList, bool*
|
||||
// AsmJSModule must be stored before static linking since static linking
|
||||
// specializes the AsmJSModule to the current process's address space
|
||||
// and therefore must be executed after a cache hit.
|
||||
JS::AsmJSCacheResult cacheResult = StoreAsmJSModuleInCache(parser, *module, cx);
|
||||
JS::AsmJSCacheResult cacheResult = StoreAsmJSModuleInCache(parser, *module, *linkData, cx);
|
||||
|
||||
// Build the string message to display in the developer console.
|
||||
message = BuildConsoleMessage(time, cacheResult);
|
||||
|
@ -40,6 +40,73 @@ using mozilla::BinarySearch;
|
||||
using mozilla::MakeEnumeratedRange;
|
||||
using mozilla::PodAssign;
|
||||
|
||||
size_t
|
||||
LinkData::SymbolicLinkArray::serializedSize() const
|
||||
{
|
||||
size_t size = 0;
|
||||
for (const Uint32Vector& offsets : *this)
|
||||
size += SerializedPodVectorSize(offsets);
|
||||
return size;
|
||||
}
|
||||
|
||||
uint8_t*
|
||||
LinkData::SymbolicLinkArray::serialize(uint8_t* cursor) const
|
||||
{
|
||||
for (const Uint32Vector& offsets : *this)
|
||||
cursor = SerializePodVector(cursor, offsets);
|
||||
return cursor;
|
||||
}
|
||||
|
||||
const uint8_t*
|
||||
LinkData::SymbolicLinkArray::deserialize(const uint8_t* cursor)
|
||||
{
|
||||
for (Uint32Vector& offsets : *this) {
|
||||
cursor = DeserializePodVector(cursor, &offsets);
|
||||
if (!cursor)
|
||||
return nullptr;
|
||||
}
|
||||
return cursor;
|
||||
}
|
||||
|
||||
size_t
|
||||
LinkData::SymbolicLinkArray::sizeOfExcludingThis(MallocSizeOf mallocSizeOf) const
|
||||
{
|
||||
size_t size = 0;
|
||||
for (const Uint32Vector& offsets : *this)
|
||||
size += offsets.sizeOfExcludingThis(mallocSizeOf);
|
||||
return size;
|
||||
}
|
||||
|
||||
size_t
|
||||
LinkData::serializedSize() const
|
||||
{
|
||||
return sizeof(pod()) +
|
||||
SerializedPodVectorSize(internalLinks) +
|
||||
symbolicLinks.serializedSize();
|
||||
}
|
||||
|
||||
uint8_t*
|
||||
LinkData::serialize(uint8_t* cursor) const
|
||||
{
|
||||
MOZ_ASSERT(tier == Tier::Serialized);
|
||||
|
||||
cursor = WriteBytes(cursor, &pod(), sizeof(pod()));
|
||||
cursor = SerializePodVector(cursor, internalLinks);
|
||||
cursor = symbolicLinks.serialize(cursor);
|
||||
return cursor;
|
||||
}
|
||||
|
||||
const uint8_t*
|
||||
LinkData::deserialize(const uint8_t* cursor)
|
||||
{
|
||||
MOZ_ASSERT(tier == Tier::Serialized);
|
||||
|
||||
(cursor = ReadBytes(cursor, &pod(), sizeof(pod()))) &&
|
||||
(cursor = DeserializePodVector(cursor, &internalLinks)) &&
|
||||
(cursor = symbolicLinks.deserialize(cursor));
|
||||
return cursor;
|
||||
}
|
||||
|
||||
CodeSegment::~CodeSegment()
|
||||
{
|
||||
if (unregisterOnDestroy_)
|
||||
@ -134,9 +201,9 @@ FreeCode::operator()(uint8_t* bytes)
|
||||
}
|
||||
|
||||
static bool
|
||||
StaticallyLink(const ModuleSegment& ms, const LinkDataTier& linkData)
|
||||
StaticallyLink(const ModuleSegment& ms, const LinkData& linkData)
|
||||
{
|
||||
for (LinkDataTier::InternalLink link : linkData.internalLinks) {
|
||||
for (LinkData::InternalLink link : linkData.internalLinks) {
|
||||
CodeLabel label;
|
||||
label.patchAt()->bind(link.patchAtOffset);
|
||||
label.target()->bind(link.targetOffset);
|
||||
@ -167,9 +234,9 @@ StaticallyLink(const ModuleSegment& ms, const LinkDataTier& linkData)
|
||||
}
|
||||
|
||||
static void
|
||||
StaticallyUnlink(uint8_t* base, const LinkDataTier& linkData)
|
||||
StaticallyUnlink(uint8_t* base, const LinkData& linkData)
|
||||
{
|
||||
for (LinkDataTier::InternalLink link : linkData.internalLinks) {
|
||||
for (LinkData::InternalLink link : linkData.internalLinks) {
|
||||
CodeLabel label;
|
||||
label.patchAt()->bind(link.patchAtOffset);
|
||||
label.target()->bind(-size_t(base)); // to reset immediate to null
|
||||
@ -275,7 +342,7 @@ SendCodeRangesToProfiler(const ModuleSegment& ms, const Bytes& bytecode, const M
|
||||
ModuleSegment::ModuleSegment(Tier tier,
|
||||
UniqueCodeBytes codeBytes,
|
||||
uint32_t codeLength,
|
||||
const LinkDataTier& linkData)
|
||||
const LinkData& linkData)
|
||||
: CodeSegment(std::move(codeBytes), codeLength, CodeSegment::Kind::Module),
|
||||
tier_(tier),
|
||||
trapCode_(base() + linkData.trapOffset)
|
||||
@ -283,7 +350,7 @@ ModuleSegment::ModuleSegment(Tier tier,
|
||||
}
|
||||
|
||||
/* static */ UniqueModuleSegment
|
||||
ModuleSegment::create(Tier tier, MacroAssembler& masm, const LinkDataTier& linkData)
|
||||
ModuleSegment::create(Tier tier, MacroAssembler& masm, const LinkData& linkData)
|
||||
{
|
||||
uint32_t codeLength = masm.bytesNeeded();
|
||||
|
||||
@ -298,7 +365,7 @@ ModuleSegment::create(Tier tier, MacroAssembler& masm, const LinkDataTier& linkD
|
||||
}
|
||||
|
||||
/* static */ UniqueModuleSegment
|
||||
ModuleSegment::create(Tier tier, const Bytes& unlinkedBytes, const LinkDataTier& linkData)
|
||||
ModuleSegment::create(Tier tier, const Bytes& unlinkedBytes, const LinkData& linkData)
|
||||
{
|
||||
uint32_t codeLength = unlinkedBytes.length();
|
||||
|
||||
@ -314,7 +381,7 @@ ModuleSegment::create(Tier tier, const Bytes& unlinkedBytes, const LinkDataTier&
|
||||
bool
|
||||
ModuleSegment::initialize(const CodeTier& codeTier,
|
||||
const ShareableBytes& bytecode,
|
||||
const LinkDataTier& linkData,
|
||||
const LinkData& linkData,
|
||||
const Metadata& metadata,
|
||||
const MetadataTier& metadataTier)
|
||||
{
|
||||
@ -347,7 +414,7 @@ ModuleSegment::addSizeOfMisc(mozilla::MallocSizeOf mallocSizeOf, size_t* code, s
|
||||
}
|
||||
|
||||
uint8_t*
|
||||
ModuleSegment::serialize(uint8_t* cursor, const LinkDataTier& linkData) const
|
||||
ModuleSegment::serialize(uint8_t* cursor, const LinkData& linkData) const
|
||||
{
|
||||
MOZ_ASSERT(tier() == Tier::Serialized);
|
||||
|
||||
@ -359,7 +426,7 @@ ModuleSegment::serialize(uint8_t* cursor, const LinkDataTier& linkData) const
|
||||
}
|
||||
|
||||
/* static */ const uint8_t*
|
||||
ModuleSegment::deserialize(const uint8_t* cursor, const LinkDataTier& linkData,
|
||||
ModuleSegment::deserialize(const uint8_t* cursor, const LinkData& linkData,
|
||||
UniqueModuleSegment* segment)
|
||||
{
|
||||
uint32_t length;
|
||||
@ -981,7 +1048,7 @@ CodeTier::serializedSize() const
|
||||
}
|
||||
|
||||
uint8_t*
|
||||
CodeTier::serialize(uint8_t* cursor, const LinkDataTier& linkData) const
|
||||
CodeTier::serialize(uint8_t* cursor, const LinkData& linkData) const
|
||||
{
|
||||
cursor = metadata_->serialize(cursor);
|
||||
cursor = segment_->serialize(cursor, linkData);
|
||||
@ -989,7 +1056,7 @@ CodeTier::serialize(uint8_t* cursor, const LinkDataTier& linkData) const
|
||||
}
|
||||
|
||||
/* static */ const uint8_t*
|
||||
CodeTier::deserialize(const uint8_t* cursor, const LinkDataTier& linkData,
|
||||
CodeTier::deserialize(const uint8_t* cursor, const LinkData& linkData,
|
||||
UniqueCodeTier* codeTier)
|
||||
{
|
||||
auto metadata = js::MakeUnique<MetadataTier>(Tier::Serialized);
|
||||
@ -1084,7 +1151,7 @@ Code::Code(UniqueCodeTier tier1, const Metadata& metadata, JumpTables&& maybeJum
|
||||
{}
|
||||
|
||||
bool
|
||||
Code::initialize(const ShareableBytes& bytecode, const LinkDataTier& linkData)
|
||||
Code::initialize(const ShareableBytes& bytecode, const LinkData& linkData)
|
||||
{
|
||||
MOZ_ASSERT(!initialized());
|
||||
|
||||
@ -1097,7 +1164,7 @@ Code::initialize(const ShareableBytes& bytecode, const LinkDataTier& linkData)
|
||||
|
||||
bool
|
||||
Code::setTier2(UniqueCodeTier tier2, const ShareableBytes& bytecode,
|
||||
const LinkDataTier& linkData) const
|
||||
const LinkData& linkData) const
|
||||
{
|
||||
MOZ_RELEASE_ASSERT(!hasTier2());
|
||||
MOZ_RELEASE_ASSERT(tier2->tier() == Tier::Ion && tier1_->tier() == Tier::Baseline);
|
||||
@ -1361,7 +1428,7 @@ Code::addSizeOfMiscIfNotSeen(MallocSizeOf mallocSizeOf,
|
||||
bool
|
||||
CodeTier::initialize(const Code& code,
|
||||
const ShareableBytes& bytecode,
|
||||
const LinkDataTier& linkData,
|
||||
const LinkData& linkData,
|
||||
const Metadata& metadata)
|
||||
{
|
||||
MOZ_ASSERT(!initialized());
|
||||
@ -1390,7 +1457,7 @@ Code::serialize(uint8_t* cursor, const LinkData& linkData) const
|
||||
MOZ_RELEASE_ASSERT(!metadata().debugEnabled);
|
||||
|
||||
cursor = metadata().serialize(cursor);
|
||||
cursor = codeTier(Tier::Serialized).serialize(cursor, linkData.tier(Tier::Serialized));
|
||||
cursor = codeTier(Tier::Serialized).serialize(cursor, linkData);
|
||||
return cursor;
|
||||
}
|
||||
|
||||
@ -1406,7 +1473,7 @@ Code::deserialize(const uint8_t* cursor,
|
||||
return nullptr;
|
||||
|
||||
UniqueCodeTier codeTier;
|
||||
cursor = CodeTier::deserialize(cursor, linkData.tier(Tier::Serialized), &codeTier);
|
||||
cursor = CodeTier::deserialize(cursor, linkData, &codeTier);
|
||||
if (!cursor)
|
||||
return nullptr;
|
||||
|
||||
@ -1415,7 +1482,7 @@ Code::deserialize(const uint8_t* cursor,
|
||||
return nullptr;
|
||||
|
||||
MutableCode code = js_new<Code>(std::move(codeTier), metadata, std::move(jumpTables));
|
||||
if (!code || !code->initialize(bytecode, linkData.tier(Tier::Serialized)))
|
||||
if (!code || !code->initialize(bytecode, linkData))
|
||||
return nullptr;
|
||||
|
||||
*out = code;
|
||||
|
@ -27,14 +27,55 @@
|
||||
namespace js {
|
||||
|
||||
struct AsmJSMetadata;
|
||||
class WasmInstanceObject;
|
||||
|
||||
namespace wasm {
|
||||
|
||||
struct LinkDataTier;
|
||||
struct MetadataTier;
|
||||
struct Metadata;
|
||||
class LinkData;
|
||||
|
||||
// LinkData contains all the metadata necessary to patch all the locations
|
||||
// that depend on the absolute address of a ModuleSegment. This happens in a
|
||||
// "linking" step after compilation and after the module's code is serialized.
|
||||
// The LinkData is serialized along with the Module but does not (normally, see
|
||||
// Module::debugLinkData_ comment) persist after (de)serialization, which
|
||||
// distinguishes it from Metadata, which is stored in the Code object.
|
||||
|
||||
struct LinkDataCacheablePod
|
||||
{
|
||||
uint32_t trapOffset = 0;
|
||||
|
||||
LinkDataCacheablePod() = default;
|
||||
};
|
||||
|
||||
struct LinkData : LinkDataCacheablePod
|
||||
{
|
||||
const Tier tier;
|
||||
|
||||
explicit LinkData(Tier tier) : tier(tier) {}
|
||||
|
||||
LinkDataCacheablePod& pod() { return *this; }
|
||||
const LinkDataCacheablePod& pod() const { return *this; }
|
||||
|
||||
struct InternalLink {
|
||||
uint32_t patchAtOffset;
|
||||
uint32_t targetOffset;
|
||||
#ifdef JS_CODELABEL_LINKMODE
|
||||
uint32_t mode;
|
||||
#endif
|
||||
};
|
||||
typedef Vector<InternalLink, 0, SystemAllocPolicy> InternalLinkVector;
|
||||
|
||||
struct SymbolicLinkArray : EnumeratedArray<SymbolicAddress, SymbolicAddress::Limit, Uint32Vector> {
|
||||
WASM_DECLARE_SERIALIZABLE(SymbolicLinkArray)
|
||||
};
|
||||
|
||||
InternalLinkVector internalLinks;
|
||||
SymbolicLinkArray symbolicLinks;
|
||||
|
||||
WASM_DECLARE_SERIALIZABLE(LinkData)
|
||||
};
|
||||
|
||||
typedef UniquePtr<LinkData> UniqueLinkData;
|
||||
|
||||
// ShareableBytes is a reference-counted Vector of bytes.
|
||||
|
||||
@ -145,18 +186,18 @@ class ModuleSegment : public CodeSegment
|
||||
ModuleSegment(Tier tier,
|
||||
UniqueCodeBytes codeBytes,
|
||||
uint32_t codeLength,
|
||||
const LinkDataTier& linkData);
|
||||
const LinkData& linkData);
|
||||
|
||||
static UniqueModuleSegment create(Tier tier,
|
||||
jit::MacroAssembler& masm,
|
||||
const LinkDataTier& linkData);
|
||||
const LinkData& linkData);
|
||||
static UniqueModuleSegment create(Tier tier,
|
||||
const Bytes& unlinkedBytes,
|
||||
const LinkDataTier& linkData);
|
||||
const LinkData& linkData);
|
||||
|
||||
bool initialize(const CodeTier& codeTier,
|
||||
const ShareableBytes& bytecode,
|
||||
const LinkDataTier& linkData,
|
||||
const LinkData& linkData,
|
||||
const Metadata& metadata,
|
||||
const MetadataTier& metadataTier);
|
||||
|
||||
@ -169,8 +210,8 @@ class ModuleSegment : public CodeSegment
|
||||
// Structured clone support:
|
||||
|
||||
size_t serializedSize() const;
|
||||
uint8_t* serialize(uint8_t* cursor, const LinkDataTier& linkData) const;
|
||||
static const uint8_t* deserialize(const uint8_t* cursor, const LinkDataTier& linkData,
|
||||
uint8_t* serialize(uint8_t* cursor, const LinkData& linkData) const;
|
||||
static const uint8_t* deserialize(const uint8_t* cursor, const LinkData& linkData,
|
||||
UniqueModuleSegment* segment);
|
||||
|
||||
const CodeRange* lookupRange(const void* pc) const;
|
||||
@ -619,7 +660,7 @@ class CodeTier
|
||||
|
||||
bool initialize(const Code& code,
|
||||
const ShareableBytes& bytecode,
|
||||
const LinkDataTier& linkData,
|
||||
const LinkData& linkData,
|
||||
const Metadata& metadata);
|
||||
|
||||
Tier tier() const { return segment_->tier(); }
|
||||
@ -631,8 +672,8 @@ class CodeTier
|
||||
const CodeRange* lookupRange(const void* pc) const;
|
||||
|
||||
size_t serializedSize() const;
|
||||
uint8_t* serialize(uint8_t* cursor, const LinkDataTier& linkData) const;
|
||||
static const uint8_t* deserialize(const uint8_t* cursor, const LinkDataTier& linkData,
|
||||
uint8_t* serialize(uint8_t* cursor, const LinkData& linkData) const;
|
||||
static const uint8_t* deserialize(const uint8_t* cursor, const LinkData& linkData,
|
||||
UniqueCodeTier* codeTier);
|
||||
void addSizeOfMisc(MallocSizeOf mallocSizeOf, size_t* code, size_t* data) const;
|
||||
};
|
||||
@ -709,7 +750,7 @@ class Code : public ShareableBase<Code>
|
||||
Code(UniqueCodeTier tier1, const Metadata& metadata, JumpTables&& maybeJumpTables);
|
||||
bool initialized() const { return tier1_->initialized(); }
|
||||
|
||||
bool initialize(const ShareableBytes& bytecode, const LinkDataTier& linkData);
|
||||
bool initialize(const ShareableBytes& bytecode, const LinkData& linkData);
|
||||
|
||||
void setTieringEntry(size_t i, void* target) const { jumpTables_.setTieringEntry(i, target); }
|
||||
void** tieringJumpTable() const { return jumpTables_.tiering(); }
|
||||
@ -719,7 +760,7 @@ class Code : public ShareableBase<Code>
|
||||
uint32_t getFuncIndex(JSFunction* fun) const;
|
||||
|
||||
bool setTier2(UniqueCodeTier tier2, const ShareableBytes& bytecode,
|
||||
const LinkDataTier& linkData) const;
|
||||
const LinkData& linkData) const;
|
||||
void commitTier2() const;
|
||||
|
||||
bool hasTier2() const { return hasTier2_; }
|
||||
|
@ -34,58 +34,46 @@ using namespace js;
|
||||
using namespace js::jit;
|
||||
using namespace js::wasm;
|
||||
|
||||
template <class DecoderT>
|
||||
static bool
|
||||
DecodeFunctionBody(DecoderT& d, ModuleGenerator& mg, uint32_t funcIndex)
|
||||
uint32_t
|
||||
wasm::ObservedCPUFeatures()
|
||||
{
|
||||
uint32_t bodySize;
|
||||
if (!d.readVarU32(&bodySize))
|
||||
return d.fail("expected number of function body bytes");
|
||||
enum Arch {
|
||||
X86 = 0x1,
|
||||
X64 = 0x2,
|
||||
ARM = 0x3,
|
||||
MIPS = 0x4,
|
||||
MIPS64 = 0x5,
|
||||
ARM64 = 0x6,
|
||||
ARCH_BITS = 3
|
||||
};
|
||||
|
||||
if (bodySize > MaxFunctionBytes)
|
||||
return d.fail("function body too big");
|
||||
|
||||
const size_t offsetInModule = d.currentOffset();
|
||||
|
||||
// Skip over the function body; it will be validated by the compilation thread.
|
||||
const uint8_t* bodyBegin;
|
||||
if (!d.readBytes(bodySize, &bodyBegin))
|
||||
return d.fail("function body length too big");
|
||||
|
||||
return mg.compileFuncDef(funcIndex, offsetInModule, bodyBegin, bodyBegin + bodySize);
|
||||
#if defined(JS_CODEGEN_X86)
|
||||
MOZ_ASSERT(uint32_t(jit::CPUInfo::GetSSEVersion()) <= (UINT32_MAX >> ARCH_BITS));
|
||||
return X86 | (uint32_t(jit::CPUInfo::GetSSEVersion()) << ARCH_BITS);
|
||||
#elif defined(JS_CODEGEN_X64)
|
||||
MOZ_ASSERT(uint32_t(jit::CPUInfo::GetSSEVersion()) <= (UINT32_MAX >> ARCH_BITS));
|
||||
return X64 | (uint32_t(jit::CPUInfo::GetSSEVersion()) << ARCH_BITS);
|
||||
#elif defined(JS_CODEGEN_ARM)
|
||||
MOZ_ASSERT(jit::GetARMFlags() <= (UINT32_MAX >> ARCH_BITS));
|
||||
return ARM | (jit::GetARMFlags() << ARCH_BITS);
|
||||
#elif defined(JS_CODEGEN_ARM64)
|
||||
MOZ_ASSERT(jit::GetARM64Flags() <= (UINT32_MAX >> ARCH_BITS));
|
||||
return ARM64 | (jit::GetARM64Flags() << ARCH_BITS);
|
||||
#elif defined(JS_CODEGEN_MIPS32)
|
||||
MOZ_ASSERT(jit::GetMIPSFlags() <= (UINT32_MAX >> ARCH_BITS));
|
||||
return MIPS | (jit::GetMIPSFlags() << ARCH_BITS);
|
||||
#elif defined(JS_CODEGEN_MIPS64)
|
||||
MOZ_ASSERT(jit::GetMIPSFlags() <= (UINT32_MAX >> ARCH_BITS));
|
||||
return MIPS64 | (jit::GetMIPSFlags() << ARCH_BITS);
|
||||
#elif defined(JS_CODEGEN_NONE)
|
||||
return 0;
|
||||
#else
|
||||
# error "unknown architecture"
|
||||
#endif
|
||||
}
|
||||
|
||||
template <class DecoderT>
|
||||
static bool
|
||||
DecodeCodeSection(const ModuleEnvironment& env, DecoderT& d, ModuleGenerator& mg)
|
||||
{
|
||||
if (!env.codeSection) {
|
||||
if (env.numFuncDefs() != 0)
|
||||
return d.fail("expected code section");
|
||||
|
||||
return mg.finishFuncDefs();
|
||||
}
|
||||
|
||||
uint32_t numFuncDefs;
|
||||
if (!d.readVarU32(&numFuncDefs))
|
||||
return d.fail("expected function body count");
|
||||
|
||||
if (numFuncDefs != env.numFuncDefs())
|
||||
return d.fail("function body count does not match function signature count");
|
||||
|
||||
for (uint32_t funcDefIndex = 0; funcDefIndex < numFuncDefs; funcDefIndex++) {
|
||||
if (!DecodeFunctionBody(d, mg, env.numFuncImports() + funcDefIndex))
|
||||
return false;
|
||||
}
|
||||
|
||||
if (!d.finishSection(*env.codeSection, "code"))
|
||||
return false;
|
||||
|
||||
return mg.finishFuncDefs();
|
||||
}
|
||||
|
||||
bool
|
||||
CompileArgs::initFromContext(JSContext* cx, ScriptedCaller&& scriptedCaller)
|
||||
CompileArgs::CompileArgs(JSContext* cx, ScriptedCaller&& scriptedCaller)
|
||||
: scriptedCaller(std::move(scriptedCaller))
|
||||
{
|
||||
#ifdef ENABLE_WASM_GC
|
||||
bool gcEnabled = cx->options().wasmGc();
|
||||
@ -104,9 +92,6 @@ CompileArgs::initFromContext(JSContext* cx, ScriptedCaller&& scriptedCaller)
|
||||
// only enable it when a developer actually cares: when the debugger tab
|
||||
// is open.
|
||||
debugEnabled = cx->realm()->debuggerObservesAsmJS();
|
||||
|
||||
this->scriptedCaller = std::move(scriptedCaller);
|
||||
return assumptions.initBuildIdFromContext(cx);
|
||||
}
|
||||
|
||||
// Classify the current system as one of a set of recognizable classes. This
|
||||
@ -419,6 +404,56 @@ InitialCompileFlags(const CompileArgs& args, Decoder& d, CompileMode* mode, Tier
|
||||
*debug = debugEnabled ? DebugEnabled::True : DebugEnabled::False;
|
||||
}
|
||||
|
||||
template <class DecoderT>
|
||||
static bool
|
||||
DecodeFunctionBody(DecoderT& d, ModuleGenerator& mg, uint32_t funcIndex)
|
||||
{
|
||||
uint32_t bodySize;
|
||||
if (!d.readVarU32(&bodySize))
|
||||
return d.fail("expected number of function body bytes");
|
||||
|
||||
if (bodySize > MaxFunctionBytes)
|
||||
return d.fail("function body too big");
|
||||
|
||||
const size_t offsetInModule = d.currentOffset();
|
||||
|
||||
// Skip over the function body; it will be validated by the compilation thread.
|
||||
const uint8_t* bodyBegin;
|
||||
if (!d.readBytes(bodySize, &bodyBegin))
|
||||
return d.fail("function body length too big");
|
||||
|
||||
return mg.compileFuncDef(funcIndex, offsetInModule, bodyBegin, bodyBegin + bodySize);
|
||||
}
|
||||
|
||||
template <class DecoderT>
|
||||
static bool
|
||||
DecodeCodeSection(const ModuleEnvironment& env, DecoderT& d, ModuleGenerator& mg)
|
||||
{
|
||||
if (!env.codeSection) {
|
||||
if (env.numFuncDefs() != 0)
|
||||
return d.fail("expected code section");
|
||||
|
||||
return mg.finishFuncDefs();
|
||||
}
|
||||
|
||||
uint32_t numFuncDefs;
|
||||
if (!d.readVarU32(&numFuncDefs))
|
||||
return d.fail("expected function body count");
|
||||
|
||||
if (numFuncDefs != env.numFuncDefs())
|
||||
return d.fail("function body count does not match function signature count");
|
||||
|
||||
for (uint32_t funcDefIndex = 0; funcDefIndex < numFuncDefs; funcDefIndex++) {
|
||||
if (!DecodeFunctionBody(d, mg, env.numFuncImports() + funcDefIndex))
|
||||
return false;
|
||||
}
|
||||
|
||||
if (!d.finishSection(*env.codeSection, "code"))
|
||||
return false;
|
||||
|
||||
return mg.finishFuncDefs();
|
||||
}
|
||||
|
||||
SharedModule
|
||||
wasm::CompileBuffer(const CompileArgs& args, const ShareableBytes& bytecode, UniqueChars* error,
|
||||
UniqueCharsVector* warnings)
|
||||
|
@ -24,6 +24,13 @@
|
||||
namespace js {
|
||||
namespace wasm {
|
||||
|
||||
// Return a uint32_t which captures the observed properties of the CPU that
|
||||
// affect compilation. If code compiled now is to be serialized and executed
|
||||
// later, the ObservedCPUFeatures() must be ensured to be the same.
|
||||
|
||||
uint32_t
|
||||
ObservedCPUFeatures();
|
||||
|
||||
// Describes the JS scripted caller of a request to compile a wasm module.
|
||||
|
||||
struct ScriptedCaller
|
||||
@ -39,7 +46,6 @@ struct ScriptedCaller
|
||||
|
||||
struct CompileArgs : ShareableBase<CompileArgs>
|
||||
{
|
||||
Assumptions assumptions;
|
||||
ScriptedCaller scriptedCaller;
|
||||
UniqueChars sourceMapURL;
|
||||
bool baselineEnabled;
|
||||
@ -49,9 +55,8 @@ struct CompileArgs : ShareableBase<CompileArgs>
|
||||
HasGcTypes gcTypesEnabled;
|
||||
bool testTiering;
|
||||
|
||||
CompileArgs(Assumptions&& assumptions, ScriptedCaller&& scriptedCaller)
|
||||
: assumptions(std::move(assumptions)),
|
||||
scriptedCaller(std::move(scriptedCaller)),
|
||||
explicit CompileArgs(ScriptedCaller&& scriptedCaller)
|
||||
: scriptedCaller(std::move(scriptedCaller)),
|
||||
baselineEnabled(false),
|
||||
debugEnabled(false),
|
||||
ionEnabled(false),
|
||||
@ -60,10 +65,7 @@ struct CompileArgs : ShareableBase<CompileArgs>
|
||||
testTiering(false)
|
||||
{}
|
||||
|
||||
// If CompileArgs is constructed without arguments, initFromContext() must
|
||||
// be called to complete initialization.
|
||||
CompileArgs() = default;
|
||||
bool initFromContext(JSContext* cx, ScriptedCaller&& scriptedCaller);
|
||||
CompileArgs(JSContext* cx, ScriptedCaller&& scriptedCaller);
|
||||
};
|
||||
|
||||
typedef RefPtr<CompileArgs> MutableCompileArgs;
|
||||
|
@ -455,14 +455,16 @@ DebugState::debugDisplayURL(JSContext* cx) const
|
||||
return nullptr;
|
||||
|
||||
if (const char* filename = metadata().filename.get()) {
|
||||
js::StringBuffer filenamePrefix(cx);
|
||||
// EncodeURI returns false due to invalid chars or OOM -- fail only
|
||||
// during OOM.
|
||||
if (!EncodeURI(cx, filenamePrefix, filename, strlen(filename))) {
|
||||
if (!cx->isExceptionPending())
|
||||
JSString* filenamePrefix = EncodeURI(cx, filename, strlen(filename));
|
||||
if (!filenamePrefix) {
|
||||
if (cx->isThrowingOutOfMemory())
|
||||
return nullptr;
|
||||
|
||||
MOZ_ASSERT(!cx->isThrowingOverRecursed());
|
||||
cx->clearPendingException(); // ignore invalid URI
|
||||
} else if (!result.append(filenamePrefix.finishString())) {
|
||||
} else if (!result.append(filenamePrefix)) {
|
||||
return nullptr;
|
||||
}
|
||||
}
|
||||
|
@ -71,7 +71,7 @@ ModuleGenerator::ModuleGenerator(const CompileArgs& args, ModuleEnvironment* env
|
||||
error_(error),
|
||||
cancelled_(cancelled),
|
||||
env_(env),
|
||||
linkDataTier_(nullptr),
|
||||
linkData_(nullptr),
|
||||
metadataTier_(nullptr),
|
||||
taskState_(mutexid::WasmCompileTaskState),
|
||||
lifo_(GENERATOR_LIFO_DEFAULT_CHUNK_SIZE),
|
||||
@ -183,17 +183,14 @@ ModuleGenerator::init(Metadata* maybeAsmJSMetadata)
|
||||
return false;
|
||||
}
|
||||
|
||||
linkDataTier_ = js::MakeUnique<LinkDataTier>(tier());
|
||||
if (!linkDataTier_)
|
||||
linkData_ = js::MakeUnique<LinkData>(tier());
|
||||
if (!linkData_)
|
||||
return false;
|
||||
|
||||
metadataTier_ = js::MakeUnique<MetadataTier>(tier());
|
||||
if (!metadataTier_)
|
||||
return false;
|
||||
|
||||
if (!assumptions_.clone(compileArgs_->assumptions))
|
||||
return false;
|
||||
|
||||
// The funcToCodeRange_ maps function indices to code-range indices and all
|
||||
// elements will be initialized by the time module generation is finished.
|
||||
|
||||
@ -514,8 +511,8 @@ ModuleGenerator::noteCodeRange(uint32_t codeRangeIndex, const CodeRange& codeRan
|
||||
debugTrapCodeOffset_ = codeRange.begin();
|
||||
break;
|
||||
case CodeRange::TrapExit:
|
||||
MOZ_ASSERT(!linkDataTier_->trapOffset);
|
||||
linkDataTier_->trapOffset = codeRange.begin();
|
||||
MOZ_ASSERT(!linkData_->trapOffset);
|
||||
linkData_->trapOffset = codeRange.begin();
|
||||
break;
|
||||
case CodeRange::Throw:
|
||||
// Jumped to by other stubs, so nothing to do.
|
||||
@ -586,18 +583,18 @@ ModuleGenerator::linkCompiledCode(const CompiledCode& code)
|
||||
|
||||
for (const SymbolicAccess& access : code.symbolicAccesses) {
|
||||
uint32_t patchAt = offsetInModule + access.patchAt.offset();
|
||||
if (!linkDataTier_->symbolicLinks[access.target].append(patchAt))
|
||||
if (!linkData_->symbolicLinks[access.target].append(patchAt))
|
||||
return false;
|
||||
}
|
||||
|
||||
for (const CodeLabel& codeLabel : code.codeLabels) {
|
||||
LinkDataTier::InternalLink link;
|
||||
LinkData::InternalLink link;
|
||||
link.patchAtOffset = offsetInModule + codeLabel.patchAt().offset();
|
||||
link.targetOffset = offsetInModule + codeLabel.target().offset();
|
||||
#ifdef JS_CODELABEL_LINKMODE
|
||||
link.mode = codeLabel.linkMode();
|
||||
#endif
|
||||
if (!linkDataTier_->internalLinks.append(link))
|
||||
if (!linkData_->internalLinks.append(link))
|
||||
return false;
|
||||
}
|
||||
|
||||
@ -937,11 +934,11 @@ ModuleGenerator::finish(const ShareableBytes& bytecode)
|
||||
if (!finishMetadata(bytecode))
|
||||
return nullptr;
|
||||
|
||||
return ModuleSegment::create(tier(), masm_, *linkDataTier_);
|
||||
return ModuleSegment::create(tier(), masm_, *linkData_);
|
||||
}
|
||||
|
||||
SharedModule
|
||||
ModuleGenerator::finishModule(const ShareableBytes& bytecode)
|
||||
ModuleGenerator::finishModule(const ShareableBytes& bytecode, UniqueLinkData* linkData)
|
||||
{
|
||||
MOZ_ASSERT(mode() == CompileMode::Once || mode() == CompileMode::Tier1);
|
||||
|
||||
@ -953,24 +950,12 @@ ModuleGenerator::finishModule(const ShareableBytes& bytecode)
|
||||
if (!jumpTables.init(mode(), *moduleSegment, metadataTier_->codeRanges))
|
||||
return nullptr;
|
||||
|
||||
UniqueConstBytes maybeDebuggingBytes;
|
||||
if (env_->debugEnabled()) {
|
||||
MOZ_ASSERT(mode() == CompileMode::Once);
|
||||
Bytes bytes;
|
||||
if (!bytes.resize(masm_.bytesNeeded()))
|
||||
return nullptr;
|
||||
masm_.executableCopy(bytes.begin(), /* flushICache = */ false);
|
||||
maybeDebuggingBytes = js::MakeUnique<Bytes>(std::move(bytes));
|
||||
if (!maybeDebuggingBytes)
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
auto codeTier = js::MakeUnique<CodeTier>(std::move(metadataTier_), std::move(moduleSegment));
|
||||
if (!codeTier)
|
||||
return nullptr;
|
||||
|
||||
MutableCode code = js_new<Code>(std::move(codeTier), *metadata_, std::move(jumpTables));
|
||||
if (!code || !code->initialize(bytecode, *linkDataTier_))
|
||||
if (!code || !code->initialize(bytecode, *linkData_))
|
||||
return nullptr;
|
||||
|
||||
StructTypeVector structTypes;
|
||||
@ -979,22 +964,42 @@ ModuleGenerator::finishModule(const ShareableBytes& bytecode)
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
SharedModule module(js_new<Module>(std::move(assumptions_),
|
||||
*code,
|
||||
std::move(maybeDebuggingBytes),
|
||||
LinkData(std::move(linkDataTier_)),
|
||||
UniqueBytes debugUnlinkedCode;
|
||||
UniqueLinkData debugLinkData;
|
||||
if (env_->debugEnabled()) {
|
||||
MOZ_ASSERT(mode() == CompileMode::Once);
|
||||
MOZ_ASSERT(tier() == Tier::Debug);
|
||||
|
||||
debugUnlinkedCode = js::MakeUnique<Bytes>();
|
||||
if (!debugUnlinkedCode || !debugUnlinkedCode->resize(masm_.bytesNeeded()))
|
||||
return nullptr;
|
||||
|
||||
masm_.executableCopy(debugUnlinkedCode->begin(), /* flushICache = */ false);
|
||||
|
||||
debugLinkData = std::move(linkData_);
|
||||
}
|
||||
|
||||
SharedModule module(js_new<Module>(*code,
|
||||
std::move(env_->imports),
|
||||
std::move(env_->exports),
|
||||
std::move(env_->dataSegments),
|
||||
std::move(env_->elemSegments),
|
||||
std::move(structTypes),
|
||||
bytecode));
|
||||
bytecode,
|
||||
std::move(debugUnlinkedCode),
|
||||
std::move(debugLinkData)));
|
||||
if (!module)
|
||||
return nullptr;
|
||||
|
||||
if (mode() == CompileMode::Tier1)
|
||||
module->startTier2(*compileArgs_);
|
||||
|
||||
if (linkData) {
|
||||
MOZ_ASSERT(isAsmJS());
|
||||
MOZ_ASSERT(!env_->debugEnabled());
|
||||
*linkData = std::move(linkData_);
|
||||
}
|
||||
|
||||
return module;
|
||||
}
|
||||
|
||||
@ -1022,7 +1027,7 @@ ModuleGenerator::finishTier2(Module& module)
|
||||
std::this_thread::sleep_for(std::chrono::milliseconds(500));
|
||||
}
|
||||
|
||||
return module.finishTier2(std::move(linkDataTier_), std::move(tier2), env_);
|
||||
return module.finishTier2(*linkData_, std::move(tier2), std::move(*env_));
|
||||
}
|
||||
|
||||
size_t
|
||||
|
@ -154,8 +154,7 @@ class MOZ_STACK_CLASS ModuleGenerator
|
||||
ModuleEnvironment* const env_;
|
||||
|
||||
// Data that is moved into the result of finish()
|
||||
Assumptions assumptions_;
|
||||
UniqueLinkDataTier linkDataTier_;
|
||||
UniqueLinkData linkData_;
|
||||
UniqueMetadataTier metadataTier_;
|
||||
MutableMetadata metadata_;
|
||||
|
||||
@ -225,7 +224,7 @@ class MOZ_STACK_CLASS ModuleGenerator
|
||||
// a new Module. Otherwise, if env->mode is Tier2, finishTier2() must be
|
||||
// called to augment the given Module with tier 2 code.
|
||||
|
||||
SharedModule finishModule(const ShareableBytes& bytecode);
|
||||
SharedModule finishModule(const ShareableBytes& bytecode, UniqueLinkData* linkData = nullptr);
|
||||
MOZ_MUST_USE bool finishTier2(Module& module);
|
||||
};
|
||||
|
||||
|
@ -363,8 +363,8 @@ wasm::Eval(JSContext* cx, Handle<TypedArrayObject*> code, HandleObject importObj
|
||||
if (!DescribeScriptedCaller(cx, &scriptedCaller, "wasm_eval"))
|
||||
return false;
|
||||
|
||||
MutableCompileArgs compileArgs = cx->new_<CompileArgs>();
|
||||
if (!compileArgs || !compileArgs->initFromContext(cx, std::move(scriptedCaller)))
|
||||
MutableCompileArgs compileArgs = cx->new_<CompileArgs>(cx, std::move(scriptedCaller));
|
||||
if (!compileArgs)
|
||||
return false;
|
||||
|
||||
UniqueChars error;
|
||||
@ -894,14 +894,7 @@ InitCompileArgs(JSContext* cx, const char* introducer)
|
||||
if (!DescribeScriptedCaller(cx, &scriptedCaller, introducer))
|
||||
return nullptr;
|
||||
|
||||
MutableCompileArgs compileArgs = cx->new_<CompileArgs>();
|
||||
if (!compileArgs)
|
||||
return nullptr;
|
||||
|
||||
if (!compileArgs->initFromContext(cx, std::move(scriptedCaller)))
|
||||
return nullptr;
|
||||
|
||||
return compileArgs;
|
||||
return cx->new_<CompileArgs>(cx, std::move(scriptedCaller));
|
||||
}
|
||||
|
||||
static bool
|
||||
|
@ -37,139 +37,6 @@ using namespace js;
|
||||
using namespace js::jit;
|
||||
using namespace js::wasm;
|
||||
|
||||
size_t
|
||||
LinkDataTier::SymbolicLinkArray::serializedSize() const
|
||||
{
|
||||
size_t size = 0;
|
||||
for (const Uint32Vector& offsets : *this)
|
||||
size += SerializedPodVectorSize(offsets);
|
||||
return size;
|
||||
}
|
||||
|
||||
uint8_t*
|
||||
LinkDataTier::SymbolicLinkArray::serialize(uint8_t* cursor) const
|
||||
{
|
||||
for (const Uint32Vector& offsets : *this)
|
||||
cursor = SerializePodVector(cursor, offsets);
|
||||
return cursor;
|
||||
}
|
||||
|
||||
const uint8_t*
|
||||
LinkDataTier::SymbolicLinkArray::deserialize(const uint8_t* cursor)
|
||||
{
|
||||
for (Uint32Vector& offsets : *this) {
|
||||
cursor = DeserializePodVector(cursor, &offsets);
|
||||
if (!cursor)
|
||||
return nullptr;
|
||||
}
|
||||
return cursor;
|
||||
}
|
||||
|
||||
size_t
|
||||
LinkDataTier::SymbolicLinkArray::sizeOfExcludingThis(MallocSizeOf mallocSizeOf) const
|
||||
{
|
||||
size_t size = 0;
|
||||
for (const Uint32Vector& offsets : *this)
|
||||
size += offsets.sizeOfExcludingThis(mallocSizeOf);
|
||||
return size;
|
||||
}
|
||||
|
||||
size_t
|
||||
LinkDataTier::serializedSize() const
|
||||
{
|
||||
return sizeof(pod()) +
|
||||
SerializedPodVectorSize(internalLinks) +
|
||||
symbolicLinks.serializedSize();
|
||||
}
|
||||
|
||||
uint8_t*
|
||||
LinkDataTier::serialize(uint8_t* cursor) const
|
||||
{
|
||||
MOZ_ASSERT(tier == Tier::Serialized);
|
||||
|
||||
cursor = WriteBytes(cursor, &pod(), sizeof(pod()));
|
||||
cursor = SerializePodVector(cursor, internalLinks);
|
||||
cursor = symbolicLinks.serialize(cursor);
|
||||
return cursor;
|
||||
}
|
||||
|
||||
const uint8_t*
|
||||
LinkDataTier::deserialize(const uint8_t* cursor)
|
||||
{
|
||||
(cursor = ReadBytes(cursor, &pod(), sizeof(pod()))) &&
|
||||
(cursor = DeserializePodVector(cursor, &internalLinks)) &&
|
||||
(cursor = symbolicLinks.deserialize(cursor));
|
||||
return cursor;
|
||||
}
|
||||
|
||||
size_t
|
||||
LinkDataTier::sizeOfExcludingThis(MallocSizeOf mallocSizeOf) const
|
||||
{
|
||||
return internalLinks.sizeOfExcludingThis(mallocSizeOf) +
|
||||
symbolicLinks.sizeOfExcludingThis(mallocSizeOf);
|
||||
}
|
||||
|
||||
void
|
||||
LinkData::setTier2(UniqueLinkDataTier tier) const
|
||||
{
|
||||
MOZ_RELEASE_ASSERT(tier->tier == Tier::Ion && tier1_->tier == Tier::Baseline);
|
||||
MOZ_RELEASE_ASSERT(!tier2_.get());
|
||||
tier2_ = std::move(tier);
|
||||
}
|
||||
|
||||
const LinkDataTier&
|
||||
LinkData::tier(Tier tier) const
|
||||
{
|
||||
switch (tier) {
|
||||
case Tier::Baseline:
|
||||
if (tier1_->tier == Tier::Baseline)
|
||||
return *tier1_;
|
||||
MOZ_CRASH("No linkData at this tier");
|
||||
case Tier::Ion:
|
||||
if (tier1_->tier == Tier::Ion)
|
||||
return *tier1_;
|
||||
if (tier2_)
|
||||
return *tier2_;
|
||||
MOZ_CRASH("No linkData at this tier");
|
||||
default:
|
||||
MOZ_CRASH();
|
||||
}
|
||||
}
|
||||
|
||||
size_t
|
||||
LinkData::serializedSize() const
|
||||
{
|
||||
return tier(Tier::Serialized).serializedSize();
|
||||
}
|
||||
|
||||
uint8_t*
|
||||
LinkData::serialize(uint8_t* cursor) const
|
||||
{
|
||||
cursor = tier(Tier::Serialized).serialize(cursor);
|
||||
return cursor;
|
||||
}
|
||||
|
||||
const uint8_t*
|
||||
LinkData::deserialize(const uint8_t* cursor)
|
||||
{
|
||||
MOZ_ASSERT(!tier1_);
|
||||
tier1_ = js::MakeUnique<LinkDataTier>(Tier::Serialized);
|
||||
if (!tier1_)
|
||||
return nullptr;
|
||||
cursor = tier1_->deserialize(cursor);
|
||||
return cursor;
|
||||
}
|
||||
|
||||
size_t
|
||||
LinkData::sizeOfExcludingThis(MallocSizeOf mallocSizeOf) const
|
||||
{
|
||||
size_t sum = 0;
|
||||
sum += tier1_->sizeOfExcludingThis(mallocSizeOf);
|
||||
if (tier2_)
|
||||
sum += tier2_->sizeOfExcludingThis(mallocSizeOf);
|
||||
return sum;
|
||||
}
|
||||
|
||||
class Module::Tier2GeneratorTaskImpl : public Tier2GeneratorTask
|
||||
{
|
||||
SharedModule module_;
|
||||
@ -213,18 +80,17 @@ Module::startTier2(const CompileArgs& args)
|
||||
}
|
||||
|
||||
bool
|
||||
Module::finishTier2(UniqueLinkDataTier linkData2, UniqueCodeTier tier2Arg, ModuleEnvironment* env2)
|
||||
Module::finishTier2(const LinkData& linkData, UniqueCodeTier tier2Arg, ModuleEnvironment&& env2)
|
||||
{
|
||||
MOZ_ASSERT(code().bestTier() == Tier::Baseline && tier2Arg->tier() == Tier::Ion);
|
||||
|
||||
// Install the data in the data structures. They will not be visible
|
||||
// until commitTier2().
|
||||
|
||||
if (!code().setTier2(std::move(tier2Arg), *bytecode_, *linkData2))
|
||||
if (!code().setTier2(std::move(tier2Arg), *bytecode_, linkData))
|
||||
return false;
|
||||
linkData().setTier2(std::move(linkData2));
|
||||
for (uint32_t i = 0; i < elemSegments_.length(); i++)
|
||||
elemSegments_[i].setTier2(std::move(env2->elemSegments[i].elemCodeRangeIndices(Tier::Ion)));
|
||||
elemSegments_[i].setTier2(std::move(env2.elemSegments[i].elemCodeRangeIndices(Tier::Ion)));
|
||||
|
||||
// Before we can make tier-2 live, we need to compile tier2 versions of any
|
||||
// extant tier1 lazy stubs (otherwise, tiering would break the assumption
|
||||
@ -247,7 +113,7 @@ Module::finishTier2(UniqueLinkDataTier linkData2, UniqueCodeTier tier2Arg, Modul
|
||||
const FuncExport& fe = metadataTier1.funcExports[i];
|
||||
if (fe.hasEagerStubs())
|
||||
continue;
|
||||
MOZ_ASSERT(!env2->isAsmJS(), "only wasm functions are lazily exported");
|
||||
MOZ_ASSERT(!env2.isAsmJS(), "only wasm functions are lazily exported");
|
||||
if (!stubs1->hasStub(fe.funcIndex()))
|
||||
continue;
|
||||
if (!funcExportIndices.emplaceBack(i))
|
||||
@ -294,21 +160,9 @@ Module::testingBlockOnTier2Complete() const
|
||||
}
|
||||
|
||||
/* virtual */ size_t
|
||||
Module::compiledSerializedSize() const
|
||||
Module::serializedSize(const LinkData& linkData) const
|
||||
{
|
||||
MOZ_ASSERT(!testingTier2Active_);
|
||||
|
||||
// The compiled debug code must not be saved, set compiled size to 0,
|
||||
// so Module::assumptionsMatch will return false during assumptions
|
||||
// deserialization.
|
||||
if (metadata().debugEnabled)
|
||||
return 0;
|
||||
|
||||
if (!code_->hasTier(Tier::Serialized))
|
||||
return 0;
|
||||
|
||||
return assumptions_.serializedSize() +
|
||||
linkData_.serializedSize() +
|
||||
return linkData.serializedSize() +
|
||||
SerializedVectorSize(imports_) +
|
||||
SerializedVectorSize(exports_) +
|
||||
SerializedPodVectorSize(dataSegments_) +
|
||||
@ -318,59 +172,26 @@ Module::compiledSerializedSize() const
|
||||
}
|
||||
|
||||
/* virtual */ void
|
||||
Module::compiledSerialize(uint8_t* compiledBegin, size_t compiledSize) const
|
||||
Module::serialize(const LinkData& linkData, uint8_t* begin, size_t size) const
|
||||
{
|
||||
MOZ_ASSERT(!testingTier2Active_);
|
||||
MOZ_RELEASE_ASSERT(!testingTier2Active_);
|
||||
MOZ_RELEASE_ASSERT(!metadata().debugEnabled);
|
||||
MOZ_RELEASE_ASSERT(code_->hasTier(Tier::Serialized));
|
||||
|
||||
if (metadata().debugEnabled) {
|
||||
MOZ_RELEASE_ASSERT(compiledSize == 0);
|
||||
return;
|
||||
}
|
||||
|
||||
if (!code_->hasTier(Tier::Serialized)) {
|
||||
MOZ_RELEASE_ASSERT(compiledSize == 0);
|
||||
return;
|
||||
}
|
||||
|
||||
uint8_t* cursor = compiledBegin;
|
||||
cursor = assumptions_.serialize(cursor);
|
||||
cursor = linkData_.serialize(cursor);
|
||||
uint8_t* cursor = begin;
|
||||
cursor = linkData.serialize(cursor);
|
||||
cursor = SerializeVector(cursor, imports_);
|
||||
cursor = SerializeVector(cursor, exports_);
|
||||
cursor = SerializePodVector(cursor, dataSegments_);
|
||||
cursor = SerializeVector(cursor, elemSegments_);
|
||||
cursor = SerializeVector(cursor, structTypes_);
|
||||
cursor = code_->serialize(cursor, linkData_);
|
||||
MOZ_RELEASE_ASSERT(cursor == compiledBegin + compiledSize);
|
||||
}
|
||||
|
||||
/* static */ bool
|
||||
Module::assumptionsMatch(const Assumptions& current, const uint8_t* compiledBegin, size_t remain)
|
||||
{
|
||||
Assumptions cached;
|
||||
if (!cached.deserialize(compiledBegin, remain))
|
||||
return false;
|
||||
|
||||
return current == cached;
|
||||
cursor = code_->serialize(cursor, linkData);
|
||||
MOZ_RELEASE_ASSERT(cursor == begin + size);
|
||||
}
|
||||
|
||||
/* static */ SharedModule
|
||||
Module::deserialize(const uint8_t* bytecodeBegin, size_t bytecodeSize,
|
||||
const uint8_t* compiledBegin, size_t compiledSize,
|
||||
Metadata* maybeMetadata)
|
||||
Module::deserialize(const uint8_t* begin, size_t size, Metadata* maybeMetadata)
|
||||
{
|
||||
MutableBytes bytecode = js_new<ShareableBytes>();
|
||||
if (!bytecode || !bytecode->bytes.initLengthUninitialized(bytecodeSize))
|
||||
return nullptr;
|
||||
|
||||
if (bytecodeSize)
|
||||
memcpy(bytecode->bytes.begin(), bytecodeBegin, bytecodeSize);
|
||||
|
||||
Assumptions assumptions;
|
||||
const uint8_t* cursor = assumptions.deserialize(compiledBegin, compiledSize);
|
||||
if (!cursor)
|
||||
return nullptr;
|
||||
|
||||
MutableMetadata metadata(maybeMetadata);
|
||||
if (!metadata) {
|
||||
metadata = js_new<Metadata>();
|
||||
@ -378,7 +199,15 @@ Module::deserialize(const uint8_t* bytecodeBegin, size_t bytecodeSize,
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
LinkData linkData;
|
||||
const uint8_t* cursor = begin;
|
||||
|
||||
// Temporary. (asm.js doesn't save bytecode)
|
||||
MOZ_RELEASE_ASSERT(maybeMetadata->isAsmJS());
|
||||
MutableBytes bytecode = js_new<ShareableBytes>();
|
||||
if (!bytecode)
|
||||
return nullptr;
|
||||
|
||||
LinkData linkData(Tier::Serialized);
|
||||
cursor = linkData.deserialize(cursor);
|
||||
if (!cursor)
|
||||
return nullptr;
|
||||
@ -413,13 +242,10 @@ Module::deserialize(const uint8_t* bytecodeBegin, size_t bytecodeSize,
|
||||
if (!cursor)
|
||||
return nullptr;
|
||||
|
||||
MOZ_RELEASE_ASSERT(cursor == compiledBegin + compiledSize);
|
||||
MOZ_RELEASE_ASSERT(cursor == begin + size);
|
||||
MOZ_RELEASE_ASSERT(!!maybeMetadata == code->metadata().isAsmJS());
|
||||
|
||||
return js_new<Module>(std::move(assumptions),
|
||||
*code,
|
||||
nullptr, // Serialized code is never debuggable
|
||||
std::move(linkData),
|
||||
return js_new<Module>(*code,
|
||||
std::move(imports),
|
||||
std::move(exports),
|
||||
std::move(dataSegments),
|
||||
@ -467,18 +293,13 @@ MapFile(PRFileDesc* file, PRFileInfo* info)
|
||||
}
|
||||
|
||||
SharedModule
|
||||
wasm::DeserializeModule(PRFileDesc* bytecodeFile, JS::BuildIdCharVector&& buildId,
|
||||
UniqueChars filename, unsigned line)
|
||||
wasm::DeserializeModule(PRFileDesc* bytecodeFile, UniqueChars filename, unsigned line)
|
||||
{
|
||||
PRFileInfo bytecodeInfo;
|
||||
UniqueMapping bytecodeMapping = MapFile(bytecodeFile, &bytecodeInfo);
|
||||
if (!bytecodeMapping)
|
||||
return nullptr;
|
||||
|
||||
// Since the compiled file's assumptions don't match, we must recompile from
|
||||
// bytecode. The bytecode file format is simply that of a .wasm (see
|
||||
// Module::bytecodeSerialize).
|
||||
|
||||
MutableBytes bytecode = js_new<ShareableBytes>();
|
||||
if (!bytecode || !bytecode->bytes.initLengthUninitialized(bytecodeInfo.size))
|
||||
return nullptr;
|
||||
@ -489,7 +310,7 @@ wasm::DeserializeModule(PRFileDesc* bytecodeFile, JS::BuildIdCharVector&& buildI
|
||||
scriptedCaller.filename = std::move(filename);
|
||||
scriptedCaller.line = line;
|
||||
|
||||
MutableCompileArgs args = js_new<CompileArgs>(Assumptions(std::move(buildId)), std::move(scriptedCaller));
|
||||
MutableCompileArgs args = js_new<CompileArgs>(std::move(scriptedCaller));
|
||||
if (!args)
|
||||
return nullptr;
|
||||
|
||||
@ -521,16 +342,14 @@ Module::addSizeOfMisc(MallocSizeOf mallocSizeOf,
|
||||
{
|
||||
code_->addSizeOfMiscIfNotSeen(mallocSizeOf, seenMetadata, seenCode, code, data);
|
||||
*data += mallocSizeOf(this) +
|
||||
assumptions_.sizeOfExcludingThis(mallocSizeOf) +
|
||||
linkData_.sizeOfExcludingThis(mallocSizeOf) +
|
||||
SizeOfVectorExcludingThis(imports_, mallocSizeOf) +
|
||||
SizeOfVectorExcludingThis(exports_, mallocSizeOf) +
|
||||
dataSegments_.sizeOfExcludingThis(mallocSizeOf) +
|
||||
SizeOfVectorExcludingThis(elemSegments_, mallocSizeOf) +
|
||||
SizeOfVectorExcludingThis(structTypes_, mallocSizeOf) +
|
||||
bytecode_->sizeOfIncludingThisIfNotSeen(mallocSizeOf, seenBytes);
|
||||
if (unlinkedCodeForDebugging_)
|
||||
*data += unlinkedCodeForDebugging_->sizeOfExcludingThis(mallocSizeOf);
|
||||
if (debugUnlinkedCode_)
|
||||
*data += debugUnlinkedCode_->sizeOfExcludingThis(mallocSizeOf);
|
||||
}
|
||||
|
||||
|
||||
@ -1115,13 +934,16 @@ Module::instantiate(JSContext* cx,
|
||||
SharedCode code(code_);
|
||||
|
||||
if (metadata().debugEnabled) {
|
||||
MOZ_ASSERT(debugUnlinkedCode_);
|
||||
MOZ_ASSERT(debugLinkData_);
|
||||
|
||||
// The first time through, use the pre-linked code in the module but
|
||||
// mark it as busy. Subsequently, instantiate the copy of the code
|
||||
// bytes that we keep around for debugging instead, because the debugger
|
||||
// may patch the pre-linked code at any time.
|
||||
if (!codeIsBusy_.compareExchange(false, true)) {
|
||||
if (!debugCodeClaimed_.compareExchange(false, true)) {
|
||||
Tier tier = Tier::Baseline;
|
||||
auto segment = ModuleSegment::create(tier, *unlinkedCodeForDebugging_, linkData(tier));
|
||||
auto segment = ModuleSegment::create(tier, *debugUnlinkedCode_, *debugLinkData_);
|
||||
if (!segment) {
|
||||
ReportOutOfMemory(cx);
|
||||
return false;
|
||||
@ -1140,7 +962,7 @@ Module::instantiate(JSContext* cx,
|
||||
return false;
|
||||
|
||||
MutableCode debugCode = js_new<Code>(std::move(codeTier), metadata(), std::move(jumpTables));
|
||||
if (!debugCode || !debugCode->initialize(*bytecode_, linkData(tier))) {
|
||||
if (!debugCode || !debugCode->initialize(*bytecode_, *debugLinkData_)) {
|
||||
ReportOutOfMemory(cx);
|
||||
return false;
|
||||
}
|
||||
|
@ -33,66 +33,6 @@ namespace wasm {
|
||||
|
||||
struct CompileArgs;
|
||||
|
||||
// LinkData contains all the metadata necessary to patch all the locations
|
||||
// that depend on the absolute address of a ModuleSegment.
|
||||
//
|
||||
// LinkData is built incrementally by ModuleGenerator and then stored immutably
|
||||
// in Module. LinkData is distinct from Metadata in that LinkData is owned and
|
||||
// destroyed by the Module since it is not needed after instantiation; Metadata
|
||||
// is needed at runtime.
|
||||
|
||||
struct LinkDataTierCacheablePod
|
||||
{
|
||||
uint32_t trapOffset = 0;
|
||||
|
||||
LinkDataTierCacheablePod() = default;
|
||||
};
|
||||
|
||||
struct LinkDataTier : LinkDataTierCacheablePod
|
||||
{
|
||||
const Tier tier;
|
||||
|
||||
explicit LinkDataTier(Tier tier) : tier(tier) {}
|
||||
|
||||
LinkDataTierCacheablePod& pod() { return *this; }
|
||||
const LinkDataTierCacheablePod& pod() const { return *this; }
|
||||
|
||||
struct InternalLink {
|
||||
uint32_t patchAtOffset;
|
||||
uint32_t targetOffset;
|
||||
#ifdef JS_CODELABEL_LINKMODE
|
||||
uint32_t mode;
|
||||
#endif
|
||||
};
|
||||
typedef Vector<InternalLink, 0, SystemAllocPolicy> InternalLinkVector;
|
||||
|
||||
struct SymbolicLinkArray : EnumeratedArray<SymbolicAddress, SymbolicAddress::Limit, Uint32Vector> {
|
||||
WASM_DECLARE_SERIALIZABLE(SymbolicLinkArray)
|
||||
};
|
||||
|
||||
InternalLinkVector internalLinks;
|
||||
SymbolicLinkArray symbolicLinks;
|
||||
|
||||
WASM_DECLARE_SERIALIZABLE(LinkData)
|
||||
};
|
||||
|
||||
typedef UniquePtr<LinkDataTier> UniqueLinkDataTier;
|
||||
|
||||
class LinkData
|
||||
{
|
||||
UniqueLinkDataTier tier1_; // Always present
|
||||
mutable UniqueLinkDataTier tier2_; // Access only if hasTier2() is true
|
||||
|
||||
public:
|
||||
LinkData() {}
|
||||
explicit LinkData(UniqueLinkDataTier tier) : tier1_(std::move(tier)) {}
|
||||
|
||||
void setTier2(UniqueLinkDataTier linkData) const;
|
||||
const LinkDataTier& tier(Tier tier) const;
|
||||
|
||||
WASM_DECLARE_SERIALIZABLE(LinkData)
|
||||
};
|
||||
|
||||
// Module represents a compiled wasm module and primarily provides two
|
||||
// operations: instantiation and serialization. A Module can be instantiated any
|
||||
// number of times to produce new Instance objects. A Module can be serialized
|
||||
@ -108,10 +48,7 @@ class LinkData
|
||||
|
||||
class Module : public JS::WasmModule
|
||||
{
|
||||
const Assumptions assumptions_;
|
||||
const SharedCode code_;
|
||||
const UniqueConstBytes unlinkedCodeForDebugging_;
|
||||
const LinkData linkData_;
|
||||
const ImportVector imports_;
|
||||
const ExportVector exports_;
|
||||
const DataSegmentVector dataSegments_;
|
||||
@ -119,18 +56,23 @@ class Module : public JS::WasmModule
|
||||
const StructTypeVector structTypes_;
|
||||
const SharedBytes bytecode_;
|
||||
|
||||
// These fields are only meaningful when code_->metadata().debugEnabled.
|
||||
// `debugCodeClaimed_` is set to false initially and then to true when
|
||||
// `code_` is already being used for an instance and can't be shared because
|
||||
// it may be patched by the debugger. Subsequent instances must then create
|
||||
// copies by linking the `debugUnlinkedCode_` using `debugLinkData_`.
|
||||
// This could all be removed if debugging didn't need to perform
|
||||
// per-instance code patching.
|
||||
|
||||
mutable Atomic<bool> debugCodeClaimed_;
|
||||
const UniqueConstBytes debugUnlinkedCode_;
|
||||
const UniqueLinkData debugLinkData_;
|
||||
|
||||
// This flag is only used for testing purposes and is racily updated as soon
|
||||
// as tier-2 compilation finishes (in success or failure).
|
||||
|
||||
mutable Atomic<bool> testingTier2Active_;
|
||||
|
||||
// `codeIsBusy_` is set to false initially and then to true when `code_` is
|
||||
// already being used for an instance and can't be shared because it may be
|
||||
// patched by the debugger. Subsequent instances must then create copies
|
||||
// by linking the `unlinkedCodeForDebugging_`.
|
||||
|
||||
mutable Atomic<bool> codeIsBusy_;
|
||||
|
||||
bool instantiateFunctions(JSContext* cx, Handle<FunctionVector> funcImports) const;
|
||||
bool instantiateMemory(JSContext* cx, MutableHandleWasmMemoryObject memory) const;
|
||||
bool instantiateTable(JSContext* cx,
|
||||
@ -147,30 +89,28 @@ class Module : public JS::WasmModule
|
||||
class Tier2GeneratorTaskImpl;
|
||||
|
||||
public:
|
||||
Module(Assumptions&& assumptions,
|
||||
const Code& code,
|
||||
UniqueConstBytes unlinkedCodeForDebugging,
|
||||
LinkData&& linkData,
|
||||
Module(const Code& code,
|
||||
ImportVector&& imports,
|
||||
ExportVector&& exports,
|
||||
DataSegmentVector&& dataSegments,
|
||||
ElemSegmentVector&& elemSegments,
|
||||
StructTypeVector&& structTypes,
|
||||
const ShareableBytes& bytecode)
|
||||
: assumptions_(std::move(assumptions)),
|
||||
code_(&code),
|
||||
unlinkedCodeForDebugging_(std::move(unlinkedCodeForDebugging)),
|
||||
linkData_(std::move(linkData)),
|
||||
const ShareableBytes& bytecode,
|
||||
UniqueConstBytes debugUnlinkedCode = nullptr,
|
||||
UniqueLinkData debugLinkData = nullptr)
|
||||
: code_(&code),
|
||||
imports_(std::move(imports)),
|
||||
exports_(std::move(exports)),
|
||||
dataSegments_(std::move(dataSegments)),
|
||||
elemSegments_(std::move(elemSegments)),
|
||||
structTypes_(std::move(structTypes)),
|
||||
bytecode_(&bytecode),
|
||||
testingTier2Active_(false),
|
||||
codeIsBusy_(false)
|
||||
debugCodeClaimed_(false),
|
||||
debugUnlinkedCode_(std::move(debugUnlinkedCode)),
|
||||
debugLinkData_(std::move(debugLinkData)),
|
||||
testingTier2Active_(false)
|
||||
{
|
||||
MOZ_ASSERT_IF(metadata().debugEnabled, unlinkedCodeForDebugging_);
|
||||
MOZ_ASSERT_IF(metadata().debugEnabled, debugUnlinkedCode_ && debugLinkData_);
|
||||
}
|
||||
~Module() override { /* Note: can be called on any thread */ }
|
||||
|
||||
@ -178,8 +118,6 @@ class Module : public JS::WasmModule
|
||||
const ModuleSegment& moduleSegment(Tier t) const { return code_->segment(t); }
|
||||
const Metadata& metadata() const { return code_->metadata(); }
|
||||
const MetadataTier& metadata(Tier t) const { return code_->metadata(t); }
|
||||
const LinkData& linkData() const { return linkData_; }
|
||||
const LinkDataTier& linkData(Tier t) const { return linkData_.tier(t); }
|
||||
const ImportVector& imports() const { return imports_; }
|
||||
const ExportVector& exports() const { return exports_; }
|
||||
const ShareableBytes& bytecode() const { return *bytecode_; }
|
||||
@ -202,7 +140,7 @@ class Module : public JS::WasmModule
|
||||
// be installed and made visible.
|
||||
|
||||
void startTier2(const CompileArgs& args);
|
||||
bool finishTier2(UniqueLinkDataTier linkData2, UniqueCodeTier tier2, ModuleEnvironment* env2);
|
||||
bool finishTier2(const LinkData& linkData, UniqueCodeTier tier2, ModuleEnvironment&& env2);
|
||||
|
||||
void testingBlockOnTier2Complete() const;
|
||||
bool testingTier2Active() const { return testingTier2Active_; }
|
||||
@ -210,13 +148,9 @@ class Module : public JS::WasmModule
|
||||
// Currently dead, but will be ressurrected with shell tests (bug 1330661)
|
||||
// and HTTP cache integration.
|
||||
|
||||
size_t compiledSerializedSize() const;
|
||||
void compiledSerialize(uint8_t* compiledBegin, size_t compiledSize) const;
|
||||
|
||||
static bool assumptionsMatch(const Assumptions& current, const uint8_t* compiledBegin,
|
||||
size_t remain);
|
||||
static RefPtr<Module> deserialize(const uint8_t* bytecodeBegin, size_t bytecodeSize,
|
||||
const uint8_t* compiledBegin, size_t compiledSize,
|
||||
size_t serializedSize(const LinkData& linkData) const;
|
||||
void serialize(const LinkData& linkData, uint8_t* begin, size_t size) const;
|
||||
static RefPtr<Module> deserialize(const uint8_t* begin, size_t size,
|
||||
Metadata* maybeMetadata = nullptr);
|
||||
|
||||
// JS API and JS::WasmModule implementation:
|
||||
@ -241,8 +175,7 @@ typedef RefPtr<Module> SharedModule;
|
||||
// JS API implementations:
|
||||
|
||||
SharedModule
|
||||
DeserializeModule(PRFileDesc* bytecode, JS::BuildIdCharVector&& buildId, UniqueChars filename,
|
||||
unsigned line);
|
||||
DeserializeModule(PRFileDesc* bytecode, UniqueChars filename, unsigned line);
|
||||
|
||||
} // namespace wasm
|
||||
} // namespace js
|
||||
|
@ -136,44 +136,6 @@ wasm::IsRoundingFunction(SymbolicAddress callee, jit::RoundingMode* mode)
|
||||
}
|
||||
}
|
||||
|
||||
static uint32_t
|
||||
GetCPUID()
|
||||
{
|
||||
enum Arch {
|
||||
X86 = 0x1,
|
||||
X64 = 0x2,
|
||||
ARM = 0x3,
|
||||
MIPS = 0x4,
|
||||
MIPS64 = 0x5,
|
||||
ARM64 = 0x6,
|
||||
ARCH_BITS = 3
|
||||
};
|
||||
|
||||
#if defined(JS_CODEGEN_X86)
|
||||
MOZ_ASSERT(uint32_t(jit::CPUInfo::GetSSEVersion()) <= (UINT32_MAX >> ARCH_BITS));
|
||||
return X86 | (uint32_t(jit::CPUInfo::GetSSEVersion()) << ARCH_BITS);
|
||||
#elif defined(JS_CODEGEN_X64)
|
||||
MOZ_ASSERT(uint32_t(jit::CPUInfo::GetSSEVersion()) <= (UINT32_MAX >> ARCH_BITS));
|
||||
return X64 | (uint32_t(jit::CPUInfo::GetSSEVersion()) << ARCH_BITS);
|
||||
#elif defined(JS_CODEGEN_ARM)
|
||||
MOZ_ASSERT(jit::GetARMFlags() <= (UINT32_MAX >> ARCH_BITS));
|
||||
return ARM | (jit::GetARMFlags() << ARCH_BITS);
|
||||
#elif defined(JS_CODEGEN_ARM64)
|
||||
MOZ_ASSERT(jit::GetARM64Flags() <= (UINT32_MAX >> ARCH_BITS));
|
||||
return ARM64 | (jit::GetARM64Flags() << ARCH_BITS);
|
||||
#elif defined(JS_CODEGEN_MIPS32)
|
||||
MOZ_ASSERT(jit::GetMIPSFlags() <= (UINT32_MAX >> ARCH_BITS));
|
||||
return MIPS | (jit::GetMIPSFlags() << ARCH_BITS);
|
||||
#elif defined(JS_CODEGEN_MIPS64)
|
||||
MOZ_ASSERT(jit::GetMIPSFlags() <= (UINT32_MAX >> ARCH_BITS));
|
||||
return MIPS64 | (jit::GetMIPSFlags() << ARCH_BITS);
|
||||
#elif defined(JS_CODEGEN_NONE)
|
||||
return 0;
|
||||
#else
|
||||
# error "unknown architecture"
|
||||
#endif
|
||||
}
|
||||
|
||||
size_t
|
||||
FuncType::serializedSize() const
|
||||
{
|
||||
@ -524,74 +486,6 @@ ElemSegment::sizeOfExcludingThis(MallocSizeOf mallocSizeOf) const
|
||||
elemCodeRangeIndices(Tier::Serialized).sizeOfExcludingThis(mallocSizeOf);
|
||||
}
|
||||
|
||||
Assumptions::Assumptions(JS::BuildIdCharVector&& buildId)
|
||||
: cpuId(GetCPUID()),
|
||||
buildId(std::move(buildId))
|
||||
{}
|
||||
|
||||
Assumptions::Assumptions()
|
||||
: cpuId(GetCPUID()),
|
||||
buildId()
|
||||
{}
|
||||
|
||||
bool
|
||||
Assumptions::initBuildIdFromContext(JSContext* cx)
|
||||
{
|
||||
if (!cx->buildIdOp() || !cx->buildIdOp()(&buildId)) {
|
||||
ReportOutOfMemory(cx);
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
bool
|
||||
Assumptions::clone(const Assumptions& other)
|
||||
{
|
||||
cpuId = other.cpuId;
|
||||
return buildId.appendAll(other.buildId);
|
||||
}
|
||||
|
||||
bool
|
||||
Assumptions::operator==(const Assumptions& rhs) const
|
||||
{
|
||||
return cpuId == rhs.cpuId &&
|
||||
buildId.length() == rhs.buildId.length() &&
|
||||
ArrayEqual(buildId.begin(), rhs.buildId.begin(), buildId.length());
|
||||
}
|
||||
|
||||
size_t
|
||||
Assumptions::serializedSize() const
|
||||
{
|
||||
return sizeof(uint32_t) +
|
||||
SerializedPodVectorSize(buildId);
|
||||
}
|
||||
|
||||
uint8_t*
|
||||
Assumptions::serialize(uint8_t* cursor) const
|
||||
{
|
||||
// The format of serialized Assumptions must never change in a way that
|
||||
// would cause old cache files written with by an old build-id to match the
|
||||
// assumptions of a different build-id.
|
||||
|
||||
cursor = WriteScalar<uint32_t>(cursor, cpuId);
|
||||
cursor = SerializePodVector(cursor, buildId);
|
||||
return cursor;
|
||||
}
|
||||
|
||||
const uint8_t*
|
||||
Assumptions::deserialize(const uint8_t* cursor, size_t remain)
|
||||
{
|
||||
(cursor = ReadScalarChecked<uint32_t>(cursor, &remain, &cpuId)) &&
|
||||
(cursor = DeserializePodVectorChecked(cursor, &remain, &buildId));
|
||||
return cursor;
|
||||
}
|
||||
|
||||
size_t
|
||||
Assumptions::sizeOfExcludingThis(MallocSizeOf mallocSizeOf) const
|
||||
{
|
||||
return buildId.sizeOfExcludingThis(mallocSizeOf);
|
||||
}
|
||||
|
||||
// Heap length on ARM should fit in an ARM immediate. We approximate the set
|
||||
// of valid ARM immediates with the predicate:
|
||||
// 2^n for n in [16, 24)
|
||||
|
@ -1815,33 +1815,6 @@ enum class SymbolicAddress
|
||||
bool
|
||||
IsRoundingFunction(SymbolicAddress callee, jit::RoundingMode* mode);
|
||||
|
||||
// Assumptions captures ambient state that must be the same when compiling and
|
||||
// deserializing a module for the compiled code to be valid. If it's not, then
|
||||
// the module must be recompiled from scratch.
|
||||
|
||||
struct Assumptions
|
||||
{
|
||||
uint32_t cpuId;
|
||||
JS::BuildIdCharVector buildId;
|
||||
|
||||
explicit Assumptions(JS::BuildIdCharVector&& buildId);
|
||||
|
||||
// If Assumptions is constructed without arguments, initBuildIdFromContext()
|
||||
// must be called to complete initialization.
|
||||
Assumptions();
|
||||
bool initBuildIdFromContext(JSContext* cx);
|
||||
|
||||
bool clone(const Assumptions& other);
|
||||
|
||||
bool operator==(const Assumptions& rhs) const;
|
||||
bool operator!=(const Assumptions& rhs) const { return !(*this == rhs); }
|
||||
|
||||
size_t serializedSize() const;
|
||||
uint8_t* serialize(uint8_t* cursor) const;
|
||||
const uint8_t* deserialize(const uint8_t* cursor, size_t remain);
|
||||
size_t sizeOfExcludingThis(mozilla::MallocSizeOf mallocSizeOf) const;
|
||||
};
|
||||
|
||||
// Represents the resizable limits of memories and tables.
|
||||
|
||||
struct Limits
|
||||
|
@ -27,6 +27,7 @@
|
||||
#include "nsIDocShell.h"
|
||||
#include "nsIDocument.h"
|
||||
#include "nsIRunnable.h"
|
||||
#include "nsIPlatformInfo.h"
|
||||
#include "nsPIDOMWindow.h"
|
||||
#include "nsPrintfCString.h"
|
||||
#include "nsWindowSizes.h"
|
||||
@ -1044,6 +1045,29 @@ OnLargeAllocationFailureCallback()
|
||||
r->BlockUntilDone();
|
||||
}
|
||||
|
||||
bool
|
||||
mozilla::GetBuildId(JS::BuildIdCharVector* aBuildID)
|
||||
{
|
||||
nsCOMPtr<nsIPlatformInfo> info = do_GetService("@mozilla.org/xre/app-info;1");
|
||||
if (!info) {
|
||||
return false;
|
||||
}
|
||||
|
||||
nsCString buildID;
|
||||
nsresult rv = info->GetPlatformBuildID(buildID);
|
||||
NS_ENSURE_SUCCESS(rv, false);
|
||||
|
||||
if (!aBuildID->resize(buildID.Length())) {
|
||||
return false;
|
||||
}
|
||||
|
||||
for (size_t i = 0; i < buildID.Length(); i++) {
|
||||
(*aBuildID)[i] = buildID[i];
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
size_t
|
||||
XPCJSRuntime::SizeOfIncludingThis(MallocSizeOf mallocSizeOf)
|
||||
{
|
||||
@ -2951,6 +2975,7 @@ XPCJSRuntime::Initialize(JSContext* cx)
|
||||
js::SetWindowProxyClass(cx, &OuterWindowProxyClass);
|
||||
js::SetXrayJitInfo(&gXrayJitInfo);
|
||||
JS::SetProcessLargeAllocationFailureCallback(OnLargeAllocationFailureCallback);
|
||||
JS::SetProcessBuildIdOp(GetBuildId);
|
||||
|
||||
// The JS engine needs to keep the source code around in order to implement
|
||||
// Function.prototype.toSource(). It'd be nice to not have to do this for
|
||||
|
@ -757,6 +757,13 @@ bool IsChromeOrXBLOrUAWidget(JSContext* cx, JSObject* /* unused */);
|
||||
bool ThreadSafeIsChromeOrXBLOrUAWidget(JSContext* cx, JSObject* obj);
|
||||
|
||||
} // namespace dom
|
||||
|
||||
/**
|
||||
* Fill the given vector with the buildid.
|
||||
*/
|
||||
bool
|
||||
GetBuildId(JS::BuildIdCharVector* aBuildID);
|
||||
|
||||
} // namespace mozilla
|
||||
|
||||
#endif
|
||||
|
@ -2247,8 +2247,8 @@ nsDocumentViewer::Show(void)
|
||||
if (history) {
|
||||
int32_t prevIndex,loadedIndex;
|
||||
nsCOMPtr<nsIDocShell> docShell = do_QueryInterface(treeItem);
|
||||
docShell->GetPreviousTransIndex(&prevIndex);
|
||||
docShell->GetLoadedTransIndex(&loadedIndex);
|
||||
docShell->GetPreviousEntryIndex(&prevIndex);
|
||||
docShell->GetLoadedEntryIndex(&loadedIndex);
|
||||
#ifdef DEBUG_PAGE_CACHE
|
||||
printf("About to evict content viewers: prev=%d, loaded=%d\n",
|
||||
prevIndex, loadedIndex);
|
||||
|
@ -122,14 +122,17 @@ public:
|
||||
return mTarget == aOther.mTarget;
|
||||
}
|
||||
|
||||
bool LessThan(const OwningElementRef& aOther) const
|
||||
bool LessThan(int32_t& aChildIndex, const OwningElementRef& aOther,
|
||||
int32_t& aOtherChildIndex) const
|
||||
{
|
||||
MOZ_ASSERT(mTarget.mElement && aOther.mTarget.mElement,
|
||||
"Elements to compare should not be null");
|
||||
|
||||
if (mTarget.mElement != aOther.mTarget.mElement) {
|
||||
return nsContentUtils::PositionIsBefore(mTarget.mElement,
|
||||
aOther.mTarget.mElement);
|
||||
aOther.mTarget.mElement,
|
||||
&aChildIndex,
|
||||
&aOtherChildIndex);
|
||||
}
|
||||
|
||||
return mTarget.mPseudoType == CSSPseudoElementType::NotPseudo ||
|
||||
|
@ -155,7 +155,10 @@ CSSAnimation::HasLowerCompositeOrderThan(const CSSAnimation& aOther) const
|
||||
|
||||
// 1. Sort by document order
|
||||
if (!mOwningElement.Equals(aOther.mOwningElement)) {
|
||||
return mOwningElement.LessThan(aOther.mOwningElement);
|
||||
return mOwningElement.LessThan(
|
||||
const_cast<CSSAnimation*>(this)->CachedChildIndexRef(),
|
||||
aOther.mOwningElement,
|
||||
const_cast<CSSAnimation*>(&aOther)->CachedChildIndexRef());
|
||||
}
|
||||
|
||||
// 2. (Same element and pseudo): Sort by position in animation-name
|
||||
|
@ -362,7 +362,10 @@ CSSTransition::HasLowerCompositeOrderThan(const CSSTransition& aOther) const
|
||||
|
||||
// 1. Sort by document order
|
||||
if (!mOwningElement.Equals(aOther.mOwningElement)) {
|
||||
return mOwningElement.LessThan(aOther.mOwningElement);
|
||||
return mOwningElement.LessThan(
|
||||
const_cast<CSSTransition*>(this)->CachedChildIndexRef(),
|
||||
aOther.mOwningElement,
|
||||
const_cast<CSSTransition*>(&aOther)->CachedChildIndexRef());
|
||||
}
|
||||
|
||||
// 2. (Same element and pseudo): Sort by transition generation
|
||||
|
@ -2270,7 +2270,7 @@ var BrowserApp = {
|
||||
let browser = this.selectedBrowser;
|
||||
let hist = browser.sessionHistory.legacySHistory;
|
||||
for (let i = toIndex; i >= fromIndex; i--) {
|
||||
let entry = hist.getEntryAtIndex(i, false);
|
||||
let entry = hist.getEntryAtIndex(i);
|
||||
let item = {
|
||||
title: entry.title || entry.URI.displaySpec,
|
||||
url: entry.URI.displaySpec,
|
||||
|
@ -6,6 +6,15 @@ ac_add_options --target=aarch64-linux-android
|
||||
|
||||
ac_add_options --with-branding=mobile/android/branding/nightly
|
||||
|
||||
export AR="$topsrcdir/clang/bin/llvm-ar"
|
||||
export NM="$topsrcdir/clang/bin/llvm-nm"
|
||||
export RANLIB="$topsrcdir/clang/bin/llvm-ranlib"
|
||||
|
||||
# Enable LTO if the NDK is available.
|
||||
if [ -z "$NO_NDK" ]; then
|
||||
ac_add_options --enable-lto
|
||||
fi
|
||||
|
||||
export MOZILLA_OFFICIAL=1
|
||||
export MOZ_TELEMETRY_REPORTING=1
|
||||
export MOZ_ANDROID_POCKET=1
|
||||
|
@ -16,4 +16,13 @@ export MOZ_TELEMETRY_REPORTING=1
|
||||
export MOZ_ANDROID_MMA=1
|
||||
export MOZ_ANDROID_POCKET=1
|
||||
|
||||
export AR="$topsrcdir/clang/bin/llvm-ar"
|
||||
export NM="$topsrcdir/clang/bin/llvm-nm"
|
||||
export RANLIB="$topsrcdir/clang/bin/llvm-ranlib"
|
||||
|
||||
# Enable LTO if the NDK is available.
|
||||
if [ -z "$NO_NDK" ]; then
|
||||
ac_add_options --enable-lto
|
||||
fi
|
||||
|
||||
. "$topsrcdir/mobile/android/config/mozconfigs/common.override"
|
||||
|
@ -14,4 +14,13 @@ export MOZILLA_OFFICIAL=1
|
||||
export MOZ_TELEMETRY_REPORTING=1
|
||||
export MOZ_ANDROID_POCKET=1
|
||||
|
||||
export AR="$topsrcdir/clang/bin/llvm-ar"
|
||||
export NM="$topsrcdir/clang/bin/llvm-nm"
|
||||
export RANLIB="$topsrcdir/clang/bin/llvm-ranlib"
|
||||
|
||||
# Enable LTO if the NDK is available.
|
||||
if [ -z "$NO_NDK" ]; then
|
||||
ac_add_options --enable-lto
|
||||
fi
|
||||
|
||||
. "$topsrcdir/mobile/android/config/mozconfigs/common.override"
|
||||
|
@ -42,6 +42,11 @@ AutomaticAuth=You are about to log in to the site “%1$S” with the username
|
||||
|
||||
TrackerUriBlocked=The resource at “%1$S” was blocked because content blocking is enabled.
|
||||
UnsafeUriBlocked=The resource at “%1$S” was blocked by Safe Browsing.
|
||||
CookieBlockedByPermission=Request to access cookies or storage on “%1$S” was blocked because of custom cookie permission.
|
||||
CookieBlockedTracker=Request to access cookie or storage on “%1$S” was blocked because it came from a tracker and content blocking is enabled.
|
||||
CookieBlockedAll=Request to access cookie or storage on “%1$S” was blocked because we are blocking all storage access requests.
|
||||
CookieBlockedForeign=Request to access cookie or storage on “%1$S” was blocked because we are blocking all third-party storage access requests and content blocking is enabled.
|
||||
CookieBlockedSlowTrackingContent=The resource at “%1$S” was blocked because content blocking is enabled and the resource was classified as a slow tracking resource.
|
||||
|
||||
# LOCALIZATION NOTE (nsICookieManagerAPIDeprecated): don't localize originAttributes.
|
||||
# %1$S is the deprecated API; %2$S is the interface suffix that the given deprecated API belongs to.
|
||||
|
51
taskcluster/ci/bouncer-locations/kind.yml
Normal file
51
taskcluster/ci/bouncer-locations/kind.yml
Normal file
@ -0,0 +1,51 @@
|
||||
# This Source Code Form is subject to the terms of the Mozilla Public
|
||||
# License, v. 2.0. If a copy of the MPL was not distributed with this
|
||||
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
|
||||
loader: taskgraph.loader.transform:loader
|
||||
|
||||
transforms:
|
||||
- taskgraph.transforms.bouncer_locations:transforms
|
||||
- taskgraph.transforms.task:transforms
|
||||
|
||||
job-defaults:
|
||||
description: nightly bouncer locations job
|
||||
attributes:
|
||||
build_platform: linux64-nightly
|
||||
nightly: true
|
||||
worker-type:
|
||||
by-project:
|
||||
mozilla-central: scriptworker-prov-v1/bouncer-v1
|
||||
default: scriptworker-prov-v1/bouncer-dev
|
||||
worker:
|
||||
implementation: bouncer-locations
|
||||
scopes:
|
||||
by-project:
|
||||
mozilla-central:
|
||||
- project:releng:bouncer:action:locations
|
||||
- project:releng:bouncer:server:production
|
||||
default:
|
||||
- project:releng:bouncer:action:locations
|
||||
- project:releng:bouncer:server:staging
|
||||
run-on-projects: ['maple', 'mozilla-central']
|
||||
treeherder:
|
||||
symbol: BncLoc
|
||||
kind: other
|
||||
tier: 2
|
||||
|
||||
jobs:
|
||||
firefox:
|
||||
bouncer-products:
|
||||
by-project:
|
||||
mozilla-central:
|
||||
- firefox-nightly-latest
|
||||
- firefox-nightly-latest-ssl
|
||||
- firefox-nightly-latest-l10n
|
||||
- firefox-nightly-latest-l10n-ssl
|
||||
default:
|
||||
- firefox-nightly-latest
|
||||
- firefox-nightly-latest-ssl
|
||||
- firefox-nightly-latest-l10n
|
||||
- firefox-nightly-latest-l10n-ssl
|
||||
treeherder:
|
||||
platform: firefox-release/opt
|
@ -311,6 +311,10 @@ bouncer-check
|
||||
-------------
|
||||
Checks Bouncer (download.mozilla.org) uptake.
|
||||
|
||||
bouncer-locations
|
||||
-----------------
|
||||
Updates nightly bouncer locations for version bump
|
||||
|
||||
release-bouncer-check
|
||||
---------------------
|
||||
Checks Bouncer (download.mozilla.org) uptake as part of the release tasks.
|
||||
|
33
taskcluster/taskgraph/transforms/bouncer_locations.py
Normal file
33
taskcluster/taskgraph/transforms/bouncer_locations.py
Normal file
@ -0,0 +1,33 @@
|
||||
# This Source Code Form is subject to the terms of the Mozilla Public
|
||||
# License, v. 2.0. If a copy of the MPL was not distributed with this
|
||||
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
from __future__ import absolute_import, print_function, unicode_literals
|
||||
|
||||
import logging
|
||||
|
||||
from taskgraph.transforms.base import TransformSequence
|
||||
from taskgraph.util.schema import resolve_keyed_by
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
transforms = TransformSequence()
|
||||
|
||||
|
||||
@transforms.add
|
||||
def make_task_worker(config, jobs):
|
||||
for job in jobs:
|
||||
resolve_keyed_by(
|
||||
job, 'worker-type', item_name=job['name'], project=config.params['project']
|
||||
)
|
||||
resolve_keyed_by(
|
||||
job, 'scopes', item_name=job['name'], project=config.params['project']
|
||||
)
|
||||
resolve_keyed_by(
|
||||
job, 'bouncer-products', item_name=job['name'], project=config.params['project']
|
||||
)
|
||||
|
||||
job['worker']['bouncer-products'] = job['bouncer-products']
|
||||
|
||||
del job['bouncer-products']
|
||||
yield job
|
@ -564,6 +564,9 @@ task_description_schema = Schema({
|
||||
}, {
|
||||
Required('implementation'): 'bouncer-aliases',
|
||||
Required('entries'): object,
|
||||
}, {
|
||||
Required('implementation'): 'bouncer-locations',
|
||||
Required('bouncer-products'): [basestring],
|
||||
}, {
|
||||
Required('implementation'): 'bouncer-submission',
|
||||
Required('locales'): [basestring],
|
||||
@ -1187,6 +1190,17 @@ def build_bouncer_aliases_payload(config, task, task_def):
|
||||
}
|
||||
|
||||
|
||||
@payload_builder('bouncer-locations')
|
||||
def build_bouncer_locations_payload(config, task, task_def):
|
||||
worker = task['worker']
|
||||
release_config = get_release_config(config)
|
||||
|
||||
task_def['payload'] = {
|
||||
'bouncer_products': worker['bouncer-products'],
|
||||
'version': release_config['version'],
|
||||
}
|
||||
|
||||
|
||||
@payload_builder('bouncer-submission')
|
||||
def build_bouncer_submission_payload(config, task, task_def):
|
||||
worker = task['worker']
|
||||
|
@ -19,6 +19,7 @@ import mozfile
|
||||
from mach.decorators import CommandProvider, Command
|
||||
from mozboot.util import get_state_dir
|
||||
from mozbuild.base import MozbuildObject, MachCommandBase
|
||||
from mozbuild.base import MachCommandConditions as conditions
|
||||
|
||||
HERE = os.path.dirname(os.path.realpath(__file__))
|
||||
BENCHMARK_REPOSITORY = 'https://github.com/mozilla/perf-automation'
|
||||
@ -161,11 +162,12 @@ class MachRaptor(MachCommandBase):
|
||||
parser=create_parser)
|
||||
def run_raptor_test(self, **kwargs):
|
||||
|
||||
from mozrunner.devices.android_device import verify_android_device
|
||||
|
||||
build_obj = MozbuildObject.from_environment(cwd=HERE)
|
||||
if not verify_android_device(build_obj, install=True, app=kwargs['binary']):
|
||||
return 1
|
||||
|
||||
if conditions.is_android(build_obj) or kwargs['app'] == 'geckoview':
|
||||
from mozrunner.devices.android_device import verify_android_device
|
||||
if not verify_android_device(build_obj, install=True, app=kwargs['binary']):
|
||||
return 1
|
||||
|
||||
debug_command = '--debug-command'
|
||||
if debug_command in sys.argv:
|
||||
|
@ -21,7 +21,7 @@ class PurgeSessionHistoryChild extends ActorChild {
|
||||
// place the entry at current index at the end of the history list, so it won't get removed
|
||||
if (sessionHistory.index < sessionHistory.count - 1) {
|
||||
let legacy = sessionHistory.legacySHistory;
|
||||
let indexEntry = legacy.getEntryAtIndex(sessionHistory.index, false);
|
||||
let indexEntry = legacy.getEntryAtIndex(sessionHistory.index);
|
||||
indexEntry.QueryInterface(Ci.nsISHEntry);
|
||||
legacy.addEntry(indexEntry, true);
|
||||
}
|
||||
|
@ -17,11 +17,13 @@
|
||||
#include "nsGlobalWindowInner.h"
|
||||
#include "nsICookiePermission.h"
|
||||
#include "nsICookieService.h"
|
||||
#include "nsIDocShell.h"
|
||||
#include "nsIHttpChannelInternal.h"
|
||||
#include "nsIIOService.h"
|
||||
#include "nsIParentChannel.h"
|
||||
#include "nsIPermissionManager.h"
|
||||
#include "nsIPrincipal.h"
|
||||
#include "nsIScriptError.h"
|
||||
#include "nsIURI.h"
|
||||
#include "nsIURL.h"
|
||||
#include "nsIWebProgressListener.h"
|
||||
@ -186,6 +188,71 @@ CheckContentBlockingAllowList(nsIHttpChannel* aChannel)
|
||||
return false;
|
||||
}
|
||||
|
||||
void
|
||||
ReportBlockingToConsole(nsPIDOMWindowOuter* aWindow, nsIHttpChannel* aChannel,
|
||||
uint32_t aRejectedReason)
|
||||
{
|
||||
MOZ_ASSERT(aWindow && aChannel);
|
||||
MOZ_ASSERT(aRejectedReason == nsIWebProgressListener::STATE_COOKIES_BLOCKED_BY_PERMISSION ||
|
||||
aRejectedReason == nsIWebProgressListener::STATE_COOKIES_BLOCKED_TRACKER ||
|
||||
aRejectedReason == nsIWebProgressListener::STATE_COOKIES_BLOCKED_ALL ||
|
||||
aRejectedReason == nsIWebProgressListener::STATE_COOKIES_BLOCKED_FOREIGN ||
|
||||
aRejectedReason == nsIWebProgressListener::STATE_BLOCKED_SLOW_TRACKING_CONTENT);
|
||||
|
||||
if (!StaticPrefs::browser_contentblocking_enabled()) {
|
||||
return;
|
||||
}
|
||||
|
||||
nsCOMPtr<nsIDocShell> docShell = aWindow->GetDocShell();
|
||||
if (NS_WARN_IF(!docShell)) {
|
||||
return;
|
||||
}
|
||||
|
||||
nsCOMPtr<nsIDocument> doc = docShell->GetDocument();
|
||||
if (NS_WARN_IF(!doc)) {
|
||||
return;
|
||||
}
|
||||
|
||||
const char* message = nullptr;
|
||||
switch (aRejectedReason) {
|
||||
case nsIWebProgressListener::STATE_COOKIES_BLOCKED_BY_PERMISSION:
|
||||
message = "CookieBlockedByPermission";
|
||||
break;
|
||||
|
||||
case nsIWebProgressListener::STATE_COOKIES_BLOCKED_TRACKER:
|
||||
message = "CookieBlockedTracker";
|
||||
break;
|
||||
|
||||
case nsIWebProgressListener::STATE_COOKIES_BLOCKED_ALL:
|
||||
message = "CookieBlockedAll";
|
||||
break;
|
||||
|
||||
case nsIWebProgressListener::STATE_COOKIES_BLOCKED_FOREIGN:
|
||||
message = "CookieBlockedForeign";
|
||||
break;
|
||||
|
||||
case nsIWebProgressListener::STATE_BLOCKED_SLOW_TRACKING_CONTENT:
|
||||
message = "CookieBlockedSlowTrackingContent";
|
||||
break;
|
||||
|
||||
default:
|
||||
return;
|
||||
}
|
||||
|
||||
MOZ_ASSERT(message);
|
||||
|
||||
nsCOMPtr<nsIURI> uri;
|
||||
aChannel->GetURI(getter_AddRefs(uri));
|
||||
NS_ConvertUTF8toUTF16 spec(uri->GetSpecOrDefault());
|
||||
const char16_t* params[] = { spec.get() };
|
||||
|
||||
nsContentUtils::ReportToConsole(nsIScriptError::warningFlag,
|
||||
NS_LITERAL_CSTRING("Content Blocking"),
|
||||
doc,
|
||||
nsContentUtils::eNECKO_PROPERTIES,
|
||||
message,
|
||||
params, ArrayLength(params));
|
||||
}
|
||||
} // anonymous
|
||||
|
||||
/* static */ RefPtr<AntiTrackingCommon::StorageAccessGrantPromise>
|
||||
@ -916,6 +983,8 @@ AntiTrackingCommon::NotifyRejection(nsIChannel* aChannel,
|
||||
}
|
||||
|
||||
pwin->NotifyContentBlockingState(aRejectedReason, aChannel);
|
||||
|
||||
ReportBlockingToConsole(pwin, httpChannel, aRejectedReason);
|
||||
}
|
||||
|
||||
/* static */ void
|
||||
@ -946,7 +1015,11 @@ AntiTrackingCommon::NotifyRejection(nsPIDOMWindowInner* aWindow,
|
||||
pwin = outer->GetTopOuter();
|
||||
}
|
||||
|
||||
if (pwin) {
|
||||
pwin->NotifyContentBlockingState(aRejectedReason, httpChannel);
|
||||
if (!pwin) {
|
||||
return;
|
||||
}
|
||||
|
||||
pwin->NotifyContentBlockingState(aRejectedReason, httpChannel);
|
||||
|
||||
ReportBlockingToConsole(pwin, httpChannel, aRejectedReason);
|
||||
}
|
||||
|
@ -32,6 +32,9 @@ the ``onManifestEntry()`` method for the API is no longer called,
|
||||
but an API can examine the new manifest after an update to detect that
|
||||
the key has been removed.
|
||||
|
||||
Handling lifecycle events
|
||||
-------------------------
|
||||
|
||||
To be notified of update and uninstall events, an extension lists these
|
||||
events in the API manifest:
|
||||
|
||||
@ -47,4 +50,11 @@ If these properties are present, the ``onUpdate()`` and ``onUninstall()``
|
||||
methods will be called for the relevant ``ExtensionAPI`` instances when
|
||||
an extension that uses the API is updated or uninstalled.
|
||||
|
||||
Note that these events can be triggered on extensions that are inactive.
|
||||
For that reason, these events can only be handled by extension APIs that
|
||||
are built into the browser. Or, in other words, these events cannot be
|
||||
handled by APIs that are implemented in WebExtension experiments. If the
|
||||
implementation of an API relies on these events for corectness, the API
|
||||
must be built into the browser and not delievered via an experiment.
|
||||
|
||||
.. Should we even document onStartup()? I think no...
|
||||
|
@ -19,6 +19,17 @@ XPCOMUtils.defineLazyGetter(this, "filenamesRegex",
|
||||
() => /^bookmarks-([0-9-]+)(?:_([0-9]+)){0,1}(?:_([a-z0-9=+-]{24})){0,1}\.(json(lz4)?)$/i
|
||||
);
|
||||
|
||||
async function limitBackups(aMaxBackups, backupFiles) {
|
||||
if (typeof aMaxBackups == "number" && aMaxBackups > -1 &&
|
||||
backupFiles.length >= aMaxBackups) {
|
||||
let numberOfBackupsToDelete = backupFiles.length - aMaxBackups;
|
||||
while (numberOfBackupsToDelete--) {
|
||||
let oldestBackup = backupFiles.pop();
|
||||
await OS.File.remove(oldestBackup);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Appends meta-data information to a given filename.
|
||||
*/
|
||||
@ -240,6 +251,13 @@ var PlacesBackups = {
|
||||
}
|
||||
this._backupFiles.unshift(aFilePath);
|
||||
} else {
|
||||
let aMaxBackup = Services.prefs.getIntPref("browser.bookmarks.max_backups");
|
||||
if (aMaxBackup === 0) {
|
||||
if (!this._backupFiles)
|
||||
await this.getBackupFiles();
|
||||
limitBackups(aMaxBackup, this._backupFiles);
|
||||
return nodeCount;
|
||||
}
|
||||
// If we are saving to a folder different than our backups folder, then
|
||||
// we also want to create a new compressed version in it.
|
||||
// This way we ensure the latest valid backup is the same saved by the
|
||||
@ -269,9 +287,9 @@ var PlacesBackups = {
|
||||
}
|
||||
let jsonString = await OS.File.read(aFilePath);
|
||||
await OS.File.writeAtomic(newFilePath, jsonString, { compression: "lz4" });
|
||||
await limitBackups(aMaxBackup, this._backupFiles);
|
||||
}
|
||||
}
|
||||
|
||||
return nodeCount;
|
||||
},
|
||||
|
||||
@ -289,22 +307,12 @@ var PlacesBackups = {
|
||||
* @return {Promise}
|
||||
*/
|
||||
create: function PB_create(aMaxBackups, aForceBackup) {
|
||||
let limitBackups = async () => {
|
||||
let backupFiles = await this.getBackupFiles();
|
||||
if (typeof aMaxBackups == "number" && aMaxBackups > -1 &&
|
||||
backupFiles.length >= aMaxBackups) {
|
||||
let numberOfBackupsToDelete = backupFiles.length - aMaxBackups;
|
||||
while (numberOfBackupsToDelete--) {
|
||||
let oldestBackup = this._backupFiles.pop();
|
||||
await OS.File.remove(oldestBackup);
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
return (async () => {
|
||||
if (aMaxBackups === 0) {
|
||||
// Backups are disabled, delete any existing one and bail out.
|
||||
await limitBackups(0);
|
||||
if (!this._backupFiles)
|
||||
await this.getBackupFiles();
|
||||
await limitBackups(0, this._backupFiles);
|
||||
return;
|
||||
}
|
||||
|
||||
@ -366,7 +374,7 @@ var PlacesBackups = {
|
||||
this._backupFiles.unshift(newBackupFileWithMetadata);
|
||||
|
||||
// Limit the number of backups.
|
||||
await limitBackups(aMaxBackups);
|
||||
await limitBackups(aMaxBackups, this._backupFiles);
|
||||
})();
|
||||
},
|
||||
|
||||
|
@ -10,11 +10,10 @@
|
||||
|
||||
const NUMBER_OF_BACKUPS = 10;
|
||||
|
||||
add_task(async function() {
|
||||
async function createBackups(nBackups, dateObj, bookmarksBackupDir) {
|
||||
// Generate random dates.
|
||||
let dateObj = new Date();
|
||||
let dates = [];
|
||||
while (dates.length < NUMBER_OF_BACKUPS) {
|
||||
while (dates.length < nBackups) {
|
||||
// Use last year to ensure today's backup is the newest.
|
||||
let randomDate = new Date(dateObj.getFullYear() - 1,
|
||||
Math.floor(12 * Math.random()),
|
||||
@ -25,10 +24,6 @@ add_task(async function() {
|
||||
// Sort dates from oldest to newest.
|
||||
dates.sort();
|
||||
|
||||
// Get and cleanup the backups folder.
|
||||
let backupFolderPath = await PlacesBackups.getBackupFolder();
|
||||
let bookmarksBackupDir = new FileUtils.File(backupFolderPath);
|
||||
|
||||
// Fake backups are created backwards to ensure we won't consider file
|
||||
// creation time.
|
||||
// Create fake backups for the newest dates.
|
||||
@ -42,10 +37,10 @@ add_task(async function() {
|
||||
do_throw("Unable to create fake backup " + backupFile.leafName);
|
||||
}
|
||||
|
||||
await PlacesBackups.create(NUMBER_OF_BACKUPS);
|
||||
// Add today's backup.
|
||||
dates.push(dateObj.getTime());
|
||||
return dates;
|
||||
}
|
||||
|
||||
async function checkBackups(dates, bookmarksBackupDir) {
|
||||
// Check backups. We have 11 dates but we the max number is 10 so the
|
||||
// oldest backup should have been removed.
|
||||
for (let i = 0; i < dates.length; i++) {
|
||||
@ -72,7 +67,9 @@ add_task(async function() {
|
||||
if (backupFile.exists() != shouldExist)
|
||||
do_throw("Backup should " + (shouldExist ? "" : "not") + " exist: " + backupFilename);
|
||||
}
|
||||
}
|
||||
|
||||
async function cleanupFiles(bookmarksBackupDir) {
|
||||
// Cleanup backups folder.
|
||||
// XXX: Can't use bookmarksBackupDir.remove(true) because file lock happens
|
||||
// on WIN XP.
|
||||
@ -81,5 +78,53 @@ add_task(async function() {
|
||||
let entry = files.nextFile;
|
||||
entry.remove(false);
|
||||
}
|
||||
// Clear cache to match the manual removing of files
|
||||
delete PlacesBackups._backupFiles;
|
||||
Assert.ok(!bookmarksBackupDir.directoryEntries.hasMoreElements());
|
||||
}
|
||||
|
||||
add_task(async function test_create_backups() {
|
||||
let backupFolderPath = await PlacesBackups.getBackupFolder();
|
||||
let bookmarksBackupDir = new FileUtils.File(backupFolderPath);
|
||||
|
||||
let dateObj = new Date();
|
||||
let dates = await createBackups(NUMBER_OF_BACKUPS, dateObj, bookmarksBackupDir);
|
||||
// Add today's backup.
|
||||
await PlacesBackups.create(NUMBER_OF_BACKUPS);
|
||||
dates.push(dateObj.getTime());
|
||||
await checkBackups(dates, bookmarksBackupDir);
|
||||
await cleanupFiles(bookmarksBackupDir);
|
||||
});
|
||||
|
||||
add_task(async function test_saveBookmarks_with_no_backups() {
|
||||
let backupFolderPath = await PlacesBackups.getBackupFolder();
|
||||
let bookmarksBackupDir = new FileUtils.File(backupFolderPath);
|
||||
|
||||
Services.prefs.setIntPref("browser.bookmarks.max_backups", 0);
|
||||
|
||||
let filePath = do_get_tempdir().path + "/backup.json";
|
||||
await PlacesBackups.saveBookmarksToJSONFile(filePath);
|
||||
let files = bookmarksBackupDir.directoryEntries;
|
||||
Assert.ok(!files.hasMoreElements(), "Should have no backup files.");
|
||||
await OS.File.remove(filePath);
|
||||
// We don't need to call cleanupFiles as we are not creating any
|
||||
// backups but need to reset the cache.
|
||||
delete PlacesBackups._backupFiles;
|
||||
});
|
||||
|
||||
add_task(async function test_saveBookmarks_with_backups() {
|
||||
let backupFolderPath = await PlacesBackups.getBackupFolder();
|
||||
let bookmarksBackupDir = new FileUtils.File(backupFolderPath);
|
||||
|
||||
Services.prefs.setIntPref("browser.bookmarks.max_backups", NUMBER_OF_BACKUPS);
|
||||
|
||||
let filePath = do_get_tempdir().path + "/backup.json";
|
||||
let dateObj = new Date();
|
||||
let dates = await createBackups(NUMBER_OF_BACKUPS, dateObj, bookmarksBackupDir);
|
||||
|
||||
await PlacesBackups.saveBookmarksToJSONFile(filePath);
|
||||
dates.push(dateObj.getTime());
|
||||
await checkBackups(dates, bookmarksBackupDir);
|
||||
await OS.File.remove(filePath);
|
||||
await cleanupFiles(bookmarksBackupDir);
|
||||
});
|
||||
|
@ -103,7 +103,7 @@ var ReaderMode = {
|
||||
let webNav = docShell.QueryInterface(Ci.nsIWebNavigation);
|
||||
let sh = webNav.sessionHistory;
|
||||
if (webNav.canGoForward) {
|
||||
let forwardEntry = sh.legacySHistory.getEntryAtIndex(sh.index + 1, false);
|
||||
let forwardEntry = sh.legacySHistory.getEntryAtIndex(sh.index + 1);
|
||||
let forwardURL = forwardEntry.URI.spec;
|
||||
if (forwardURL && (forwardURL == readerURL || !readerURL)) {
|
||||
webNav.goForward();
|
||||
@ -124,7 +124,7 @@ var ReaderMode = {
|
||||
let webNav = docShell.QueryInterface(Ci.nsIWebNavigation);
|
||||
let sh = webNav.sessionHistory;
|
||||
if (webNav.canGoBack) {
|
||||
let prevEntry = sh.legacySHistory.getEntryAtIndex(sh.index - 1, false);
|
||||
let prevEntry = sh.legacySHistory.getEntryAtIndex(sh.index - 1);
|
||||
let prevURL = prevEntry.URI.spec;
|
||||
if (prevURL && (prevURL == originalURL || !originalURL)) {
|
||||
webNav.goBack();
|
||||
|
@ -59,9 +59,9 @@ add_task(async function test_history() {
|
||||
.getInterface(Ci.nsISHistory);
|
||||
is(history.count, 2, "Should be two history items");
|
||||
is(history.index, 1, "Should be at the right place in history");
|
||||
let entry = history.getEntryAtIndex(0, false);
|
||||
let entry = history.getEntryAtIndex(0);
|
||||
is(entry.URI.spec, dummy1, "Should have the right history entry");
|
||||
entry = history.getEntryAtIndex(1, false);
|
||||
entry = history.getEntryAtIndex(1);
|
||||
is(entry.URI.spec, dummy2, "Should have the right history entry");
|
||||
});
|
||||
|
||||
@ -92,7 +92,7 @@ add_task(async function test_flags() {
|
||||
.getInterface(Ci.nsISHistory);
|
||||
is(history.count, count, "Should be one history item");
|
||||
is(history.index, index, "Should be at the right place in history");
|
||||
let entry = history.getEntryAtIndex(index, false);
|
||||
let entry = history.getEntryAtIndex(index);
|
||||
is(entry.URI.spec, dummy2, "Should have the right history entry");
|
||||
});
|
||||
}
|
||||
|
@ -277,7 +277,7 @@ var E10SUtils = {
|
||||
let sessionHistory = webNav.sessionHistory;
|
||||
let requestedIndex = sessionHistory.legacySHistory.requestedIndex;
|
||||
if (requestedIndex >= 0) {
|
||||
if (sessionHistory.legacySHistory.getEntryAtIndex(requestedIndex, false).loadedInThisProcess) {
|
||||
if (sessionHistory.legacySHistory.getEntryAtIndex(requestedIndex).loadedInThisProcess) {
|
||||
return true;
|
||||
}
|
||||
|
||||
|
@ -81,18 +81,15 @@ var SessionHistoryInternal = {
|
||||
let skippedCount = 0, entryCount = 0;
|
||||
|
||||
if (history && history.count > 0) {
|
||||
// Loop over the transactions so we can get the persist property for each
|
||||
// one.
|
||||
let shistory = history.legacySHistory.QueryInterface(Ci.nsISHistory);
|
||||
let count = shistory.count;
|
||||
for ( ; entryCount < count; entryCount++) {
|
||||
let txn = shistory.GetTransactionAtIndex(entryCount);
|
||||
let shEntry = shistory.getEntryAtIndex(entryCount);
|
||||
if (entryCount <= aFromIdx) {
|
||||
skippedCount++;
|
||||
continue;
|
||||
}
|
||||
let entry = this.serializeEntry(txn.sHEntry);
|
||||
entry.persist = txn.persist;
|
||||
let entry = this.serializeEntry(shEntry);
|
||||
data.entries.push(entry);
|
||||
}
|
||||
|
||||
@ -252,6 +249,8 @@ var SessionHistoryInternal = {
|
||||
}
|
||||
}
|
||||
|
||||
entry.persist = shEntry.persist;
|
||||
|
||||
return entry;
|
||||
},
|
||||
|
||||
@ -315,7 +314,7 @@ var SessionHistoryInternal = {
|
||||
// Select the right history entry.
|
||||
let index = tabData.index - 1;
|
||||
if (index < history.count && history.index != index) {
|
||||
history.getEntryAtIndex(index, true);
|
||||
history.index = index;
|
||||
}
|
||||
return history;
|
||||
},
|
||||
|
@ -330,7 +330,7 @@ interface nsIWebProgressListener : nsISupports
|
||||
const unsigned long STATE_COOKIES_BLOCKED_BY_PERMISSION = 0x10000000;
|
||||
const unsigned long STATE_COOKIES_BLOCKED_TRACKER = 0x20000000;
|
||||
const unsigned long STATE_COOKIES_BLOCKED_ALL = 0x40000000;
|
||||
const unsigned long STATE_COOKIES_BLOCKED_FOREIGN = 0x80000000;
|
||||
const unsigned long STATE_COOKIES_BLOCKED_FOREIGN = 0x00000080;
|
||||
const unsigned long STATE_BLOCKED_SLOW_TRACKING_CONTENT = 0x00000040;
|
||||
|
||||
/**
|
||||
|
@ -38,7 +38,6 @@
|
||||
#include "nsWrapperCache.h"
|
||||
#include "nsStringBuffer.h"
|
||||
|
||||
#include "nsIPlatformInfo.h"
|
||||
#include "nsThread.h"
|
||||
#include "nsThreadUtils.h"
|
||||
#include "xpcpublic.h"
|
||||
|
@ -91,7 +91,6 @@
|
||||
#endif
|
||||
|
||||
#include "nsIException.h"
|
||||
#include "nsIPlatformInfo.h"
|
||||
#include "nsThread.h"
|
||||
#include "nsThreadUtils.h"
|
||||
#include "xpcpublic.h"
|
||||
@ -486,29 +485,6 @@ void JSObjectsTenuredCb(JSContext* aContext, void* aData)
|
||||
static_cast<CycleCollectedJSRuntime*>(aData)->JSObjectsTenured();
|
||||
}
|
||||
|
||||
bool
|
||||
mozilla::GetBuildId(JS::BuildIdCharVector* aBuildID)
|
||||
{
|
||||
nsCOMPtr<nsIPlatformInfo> info = do_GetService("@mozilla.org/xre/app-info;1");
|
||||
if (!info) {
|
||||
return false;
|
||||
}
|
||||
|
||||
nsCString buildID;
|
||||
nsresult rv = info->GetPlatformBuildID(buildID);
|
||||
NS_ENSURE_SUCCESS(rv, false);
|
||||
|
||||
if (!aBuildID->resize(buildID.Length())) {
|
||||
return false;
|
||||
}
|
||||
|
||||
for (size_t i = 0; i < buildID.Length(); i++) {
|
||||
(*aBuildID)[i] = buildID[i];
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
static void
|
||||
MozCrashWarningReporter(JSContext*, JSErrorReport*)
|
||||
{
|
||||
@ -555,7 +531,6 @@ CycleCollectedJSRuntime::CycleCollectedJSRuntime(JSContext* aCx)
|
||||
JS_SetObjectsTenuredCallback(aCx, JSObjectsTenuredCb, this);
|
||||
JS::SetOutOfMemoryCallback(aCx, OutOfMemoryCallback, this);
|
||||
JS_SetExternalStringSizeofCallback(aCx, SizeofExternalStringCallback);
|
||||
JS::SetBuildIdOp(aCx, GetBuildId);
|
||||
JS::SetWarningReporter(aCx, MozCrashWarningReporter);
|
||||
|
||||
js::AutoEnterOOMUnsafeRegion::setAnnotateOOMAllocationSizeCallback(
|
||||
|
@ -427,9 +427,6 @@ inline bool AddToCCKind(JS::TraceKind aKind)
|
||||
aKind == JS::TraceKind::RegExpShared;
|
||||
}
|
||||
|
||||
bool
|
||||
GetBuildId(JS::BuildIdCharVector* aBuildID);
|
||||
|
||||
} // namespace mozilla
|
||||
|
||||
#endif // mozilla_CycleCollectedJSRuntime_h
|
||||
|
@ -157,6 +157,7 @@
|
||||
#include "mozilla/CycleCollectedJSContext.h"
|
||||
#include "mozilla/CycleCollectedJSRuntime.h"
|
||||
#include "mozilla/DebugOnly.h"
|
||||
#include "mozilla/HashFunctions.h"
|
||||
#include "mozilla/HashTable.h"
|
||||
#include "mozilla/HoldDropJSObjects.h"
|
||||
/* This must occur *after* base/process_util.h to avoid typedefs conflicts. */
|
||||
@ -164,6 +165,7 @@
|
||||
#include "mozilla/MemoryReporting.h"
|
||||
#include "mozilla/Move.h"
|
||||
#include "mozilla/SegmentedVector.h"
|
||||
#include "mozilla/Variant.h"
|
||||
|
||||
#include "nsCycleCollectionParticipant.h"
|
||||
#include "nsCycleCollectionNoteRootCallback.h"
|
||||
@ -2092,6 +2094,43 @@ private:
|
||||
nsAutoPtr<NodePool::Enumerator> mCurrNode;
|
||||
uint32_t mNoteChildCount;
|
||||
|
||||
class GraphCache
|
||||
{
|
||||
public:
|
||||
// This either returns a pointer if present, or an index, if it isn't.
|
||||
Variant<PtrInfo*, uint32_t> GetEntryOrIndex(void* aPtr)
|
||||
{
|
||||
uint32_t hash = mozilla::HashGeneric(aPtr);
|
||||
uint32_t index = hash % kCacheSize;
|
||||
PtrInfo* result = mCache[index];
|
||||
if (result && result->mPointer == aPtr) {
|
||||
return AsVariant(result);
|
||||
}
|
||||
|
||||
return AsVariant(index);
|
||||
}
|
||||
|
||||
void Add(uint32_t aIndex, PtrInfo* aPtrInfo)
|
||||
{
|
||||
mCache[aIndex] = aPtrInfo;
|
||||
}
|
||||
|
||||
void Remove(void* aPtr)
|
||||
{
|
||||
uint32_t hash = mozilla::HashGeneric(aPtr);
|
||||
uint32_t index = hash % kCacheSize;
|
||||
PtrInfo* pinfo = mCache[index];
|
||||
if (pinfo && pinfo->mPointer == aPtr) {
|
||||
mCache[index] = nullptr;
|
||||
}
|
||||
}
|
||||
private:
|
||||
const static uint32_t kCacheSize = 491;
|
||||
PtrInfo* mCache[kCacheSize] = {0};
|
||||
};
|
||||
|
||||
GraphCache mGraphCache;
|
||||
|
||||
public:
|
||||
CCGraphBuilder(CCGraph& aGraph,
|
||||
CycleCollectorResults& aResults,
|
||||
@ -2113,6 +2152,10 @@ public:
|
||||
// Do some work traversing nodes in the graph. Returns true if this graph building is finished.
|
||||
bool BuildGraph(SliceBudget& aBudget);
|
||||
|
||||
void RemoveCachedEntry(void* aPtr)
|
||||
{
|
||||
mGraphCache.Remove(aPtr);
|
||||
}
|
||||
private:
|
||||
PtrInfo* AddNode(void* aPtr, nsCycleCollectionParticipant* aParticipant);
|
||||
PtrInfo* AddWeakMapNode(JS::GCCellPtr aThing);
|
||||
@ -2210,6 +2253,10 @@ CCGraphBuilder::CCGraphBuilder(CCGraph& aGraph,
|
||||
, mMergeZones(aMergeZones)
|
||||
, mNoteChildCount(0)
|
||||
{
|
||||
// 4096 is an allocation bucket size.
|
||||
static_assert(sizeof(CCGraphBuilder) <= 4096,
|
||||
"Don't create too large CCGraphBuilder objects");
|
||||
|
||||
if (aCCRuntime) {
|
||||
mJSParticipant = aCCRuntime->GCThingParticipant();
|
||||
mJSZoneParticipant = aCCRuntime->ZoneParticipant();
|
||||
@ -2240,6 +2287,15 @@ CCGraphBuilder::AddNode(void* aPtr, nsCycleCollectionParticipant* aParticipant)
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
Variant<PtrInfo*, uint32_t> cacheVariant = mGraphCache.GetEntryOrIndex(aPtr);
|
||||
if (cacheVariant.is<PtrInfo*>()) {
|
||||
MOZ_ASSERT(cacheVariant.as<PtrInfo*>()->mParticipant == aParticipant,
|
||||
"nsCycleCollectionParticipant shouldn't change!");
|
||||
return cacheVariant.as<PtrInfo*>();
|
||||
}
|
||||
|
||||
MOZ_ASSERT(cacheVariant.is<uint32_t>());
|
||||
|
||||
PtrInfo* result;
|
||||
auto p = mGraph.mPtrInfoMap.lookupForAdd(aPtr);
|
||||
if (!p) {
|
||||
@ -2263,6 +2319,8 @@ CCGraphBuilder::AddNode(void* aPtr, nsCycleCollectionParticipant* aParticipant)
|
||||
"nsCycleCollectionParticipant shouldn't change!");
|
||||
}
|
||||
|
||||
mGraphCache.Add(cacheVariant.as<uint32_t>(), result);
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
@ -4044,6 +4102,9 @@ nsCycleCollector::RemoveObjectFromGraph(void* aObj)
|
||||
}
|
||||
|
||||
mGraph.RemoveObjectFromMap(aObj);
|
||||
if (mBuilder) {
|
||||
mBuilder->RemoveCachedEntry(aObj);
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
|
Loading…
x
Reference in New Issue
Block a user