mirror of
https://github.com/mozilla/gecko-dev.git
synced 2024-10-28 04:35:33 +00:00
Merge mozilla-central to autoland. a=merge CLOSED TREE
This commit is contained in:
commit
631dad2d7d
@ -1904,7 +1904,11 @@ window._gBrowser = {
|
||||
case "currentURI":
|
||||
getter = () => {
|
||||
let url = SessionStore.getLazyTabValue(aTab, "url");
|
||||
return Services.io.newURI(url);
|
||||
// Avoid recreating the same nsIURI object over and over again...
|
||||
if (browser._cachedCurrentURI) {
|
||||
return browser._cachedCurrentURI;
|
||||
}
|
||||
return browser._cachedCurrentURI = Services.io.newURI(url);
|
||||
};
|
||||
break;
|
||||
case "didStartLoadSinceLastUserTyping":
|
||||
@ -1988,6 +1992,7 @@ window._gBrowser = {
|
||||
let { uriIsAboutBlank, remoteType, usingPreloadedContent } =
|
||||
aTab._browserParams;
|
||||
delete aTab._browserParams;
|
||||
delete aTab._cachedCurrentURI;
|
||||
|
||||
let notificationbox = this.getNotificationBox(browser);
|
||||
let uniqueId = this._generateUniquePanelID();
|
||||
|
@ -1,13 +1,13 @@
|
||||
This is the debugger.html project output.
|
||||
See https://github.com/devtools-html/debugger.html
|
||||
|
||||
Version 70
|
||||
Version 71
|
||||
|
||||
Comparison: https://github.com/devtools-html/debugger.html/compare/release-69...release-70
|
||||
Comparison: https://github.com/devtools-html/debugger.html/compare/release-70...release-71
|
||||
|
||||
Packages:
|
||||
- babel-plugin-transform-es2015-modules-commonjs @6.26.2
|
||||
- babel-preset-react @6.24.1
|
||||
- react @16.2.0
|
||||
- react-dom @16.2.0
|
||||
- react @16.4.1
|
||||
- react-dom @16.4.1
|
||||
- webpack @3.11.0
|
||||
|
31
devtools/client/debugger/new/dist/debugger.css
vendored
31
devtools/client/debugger/new/dist/debugger.css
vendored
@ -2706,32 +2706,6 @@ html[dir="rtl"] .editor-mount {
|
||||
direction: ltr;
|
||||
}
|
||||
|
||||
.theme-light {
|
||||
--gutter-hover-background-color: #dde1e4;
|
||||
}
|
||||
|
||||
.theme-dark {
|
||||
--gutter-hover-background-color: #414141;
|
||||
}
|
||||
|
||||
:not(.empty-line):not(.new-breakpoint) > .CodeMirror-gutter-wrapper:hover {
|
||||
width: 60px;
|
||||
height: 13px;
|
||||
left: -55px !important;
|
||||
background-color: var(--gutter-hover-background-color) !important;
|
||||
mask: url("chrome://devtools/skin/images/debugger/breakpoint.svg") no-repeat;
|
||||
mask-size: 100%;
|
||||
mask-position: 0 1px;
|
||||
}
|
||||
|
||||
:not(.empty-line):not(.new-breakpoint)
|
||||
> .CodeMirror-gutter-wrapper:hover
|
||||
> .CodeMirror-linenumber {
|
||||
left: auto !important;
|
||||
right: 6px;
|
||||
color: var(--theme-body-color);
|
||||
}
|
||||
|
||||
.editor-wrapper .breakpoints {
|
||||
position: absolute;
|
||||
top: 0;
|
||||
@ -2779,11 +2753,6 @@ html[dir="rtl"] .editor-mount {
|
||||
border: 1px solid #00b6ff;
|
||||
}
|
||||
|
||||
.editor .breakpoint {
|
||||
position: absolute;
|
||||
right: -2px;
|
||||
}
|
||||
|
||||
.editor.new-breakpoint.folding-enabled svg {
|
||||
right: -16px;
|
||||
}
|
||||
|
@ -15,19 +15,13 @@
|
||||
<div id="mount"></div>
|
||||
<script type="application/javascript" src="chrome://devtools/content/shared/theme-switching.js"></script>
|
||||
<script type="text/javascript">
|
||||
try {
|
||||
const { BrowserLoader } = ChromeUtils.import("resource://devtools/client/shared/browser-loader.js", {});
|
||||
const { require } = BrowserLoader({
|
||||
baseURI: "resource://devtools/client/debugger/new",
|
||||
window,
|
||||
});
|
||||
Debugger = require("devtools/client/debugger/new/src/main");
|
||||
} catch(e) {
|
||||
dump("Exception happened while loading the debugger:\n");
|
||||
dump(e + "\n");
|
||||
dump(e.stack + "\n");
|
||||
}
|
||||
const { BrowserLoader } = ChromeUtils.import("resource://devtools/client/shared/browser-loader.js", {});
|
||||
const { require } = BrowserLoader({
|
||||
baseURI: "resource://devtools/client/debugger/new",
|
||||
window,
|
||||
});
|
||||
Debugger = require("devtools/client/debugger/new/src/main");
|
||||
</script>
|
||||
</body>
|
||||
|
||||
</html>
|
||||
</html>
|
@ -101,4 +101,4 @@ const clientEvents = {
|
||||
newSource
|
||||
};
|
||||
exports.setupEvents = setupEvents;
|
||||
exports.clientEvents = clientEvents;
|
||||
exports.clientEvents = clientEvents;
|
@ -148,23 +148,23 @@ class SourceFooter extends _react.PureComponent {
|
||||
selectedSource
|
||||
} = this.props;
|
||||
|
||||
if (mappedSource) {
|
||||
const filename = (0, _source.getFilename)(mappedSource);
|
||||
const tooltip = L10N.getFormatStr("sourceFooter.mappedSourceTooltip", filename);
|
||||
const title = L10N.getFormatStr("sourceFooter.mappedSource", filename);
|
||||
const mappedSourceLocation = {
|
||||
sourceId: selectedSource.id,
|
||||
line: 1,
|
||||
column: 1
|
||||
};
|
||||
return _react2.default.createElement("button", {
|
||||
className: "mapped-source",
|
||||
onClick: () => jumpToMappedLocation(mappedSourceLocation),
|
||||
title: tooltip
|
||||
}, _react2.default.createElement("span", null, title));
|
||||
if (!mappedSource) {
|
||||
return null;
|
||||
}
|
||||
|
||||
return null;
|
||||
const filename = (0, _source.getFilename)(mappedSource);
|
||||
const tooltip = L10N.getFormatStr("sourceFooter.mappedSourceTooltip", filename);
|
||||
const title = L10N.getFormatStr("sourceFooter.mappedSource", filename);
|
||||
const mappedSourceLocation = {
|
||||
sourceId: selectedSource.id,
|
||||
line: 1,
|
||||
column: 1
|
||||
};
|
||||
return _react2.default.createElement("button", {
|
||||
className: "mapped-source",
|
||||
onClick: () => jumpToMappedLocation(mappedSourceLocation),
|
||||
title: tooltip
|
||||
}, _react2.default.createElement("span", null, title));
|
||||
}
|
||||
|
||||
render() {
|
||||
|
@ -27,12 +27,7 @@ function findBreakpointSources(sources, breakpoints) {
|
||||
return (0, _lodash.sortBy)(breakpointSources, source => (0, _source.getFilename)(source));
|
||||
}
|
||||
|
||||
function _getBreakpointSources(breakpoints, sources) {
|
||||
const breakpointSources = findBreakpointSources(sources, breakpoints);
|
||||
return breakpointSources.map(source => ({
|
||||
source,
|
||||
breakpoints: getBreakpointsForSource(source, breakpoints)
|
||||
}));
|
||||
}
|
||||
|
||||
const getBreakpointSources = exports.getBreakpointSources = (0, _reselect.createSelector)(_selectors.getBreakpoints, _selectors.getSources, _getBreakpointSources);
|
||||
const getBreakpointSources = exports.getBreakpointSources = (0, _reselect.createSelector)(_selectors.getBreakpoints, _selectors.getSources, (breakpoints, sources) => findBreakpointSources(sources, breakpoints).map(source => ({
|
||||
source,
|
||||
breakpoints: getBreakpointsForSource(source, breakpoints)
|
||||
})));
|
@ -9,25 +9,31 @@ var _selectors = require("../selectors/index");
|
||||
|
||||
var _lodash = require("devtools/client/shared/vendor/lodash");
|
||||
|
||||
var _source = require("../utils/source");
|
||||
var _sourcesTree = require("../utils/sources-tree/index");
|
||||
|
||||
var _reselect = require("devtools/client/debugger/new/dist/vendors").vendored["reselect"];
|
||||
|
||||
/* This Source Code Form is subject to the terms of the Mozilla Public
|
||||
* License, v. 2.0. If a copy of the MPL was not distributed with this
|
||||
* file, You can obtain one at <http://mozilla.org/MPL/2.0/>. */
|
||||
function getRelativeUrl(url, root) {
|
||||
function getRelativeUrl(source, root) {
|
||||
const {
|
||||
group,
|
||||
path
|
||||
} = (0, _sourcesTree.getURL)(source);
|
||||
|
||||
if (!root) {
|
||||
return (0, _source.getSourcePath)(url);
|
||||
return path;
|
||||
} // + 1 removes the leading "/"
|
||||
|
||||
|
||||
const url = group + path;
|
||||
return url.slice(url.indexOf(root) + root.length + 1);
|
||||
}
|
||||
|
||||
function formatSource(source, root) {
|
||||
return { ...source,
|
||||
relativeUrl: getRelativeUrl(source.url, root)
|
||||
relativeUrl: getRelativeUrl(source, root)
|
||||
};
|
||||
}
|
||||
|
||||
|
@ -180,7 +180,7 @@ function getFilename(source) {
|
||||
const {
|
||||
filename
|
||||
} = (0, _sourcesTree.getURL)(source);
|
||||
return filename;
|
||||
return getRawSourceURL(filename);
|
||||
}
|
||||
/**
|
||||
* Provides a middle-trunated filename
|
||||
@ -460,5 +460,5 @@ function getSourceClassnames(source, sourceMetaData) {
|
||||
return "blackBox";
|
||||
}
|
||||
|
||||
return sourceTypes[(0, _sourcesTree.getFileExtension)(source.url)] || defaultClassName;
|
||||
return sourceTypes[(0, _sourcesTree.getFileExtension)(source)] || defaultClassName;
|
||||
}
|
@ -120,9 +120,8 @@ function addSourceToNode(node, url, source) {
|
||||
*/
|
||||
|
||||
|
||||
function addToTree(tree, source, debuggeeUrl, projectRoot) {
|
||||
const url = (0, _getURL.getURL)(source, debuggeeUrl);
|
||||
const debuggeeHost = (0, _treeOrder.getDomain)(debuggeeUrl);
|
||||
function addToTree(tree, source, debuggeeHost, projectRoot) {
|
||||
const url = (0, _getURL.getURL)(source, debuggeeHost);
|
||||
|
||||
if ((0, _utils.isInvalidUrl)(url, source)) {
|
||||
return;
|
||||
|
@ -11,6 +11,8 @@ var _collapseTree = require("./collapseTree");
|
||||
|
||||
var _utils = require("./utils");
|
||||
|
||||
var _treeOrder = require("./treeOrder");
|
||||
|
||||
/* This Source Code Form is subject to the terms of the Mozilla Public
|
||||
* License, v. 2.0. If a copy of the MPL was not distributed with this
|
||||
* file, You can obtain one at <http://mozilla.org/MPL/2.0/>. */
|
||||
@ -20,10 +22,11 @@ function createTree({
|
||||
projectRoot
|
||||
}) {
|
||||
const uncollapsedTree = (0, _utils.createDirectoryNode)("root", "", []);
|
||||
const debuggeeHost = (0, _treeOrder.getDomain)(debuggeeUrl);
|
||||
|
||||
for (const sourceId in sources) {
|
||||
const source = sources[sourceId];
|
||||
(0, _addToTree.addToTree)(uncollapsedTree, source, debuggeeUrl, projectRoot);
|
||||
(0, _addToTree.addToTree)(uncollapsedTree, source, debuggeeHost, projectRoot);
|
||||
}
|
||||
|
||||
const sourceTree = (0, _collapseTree.collapseTree)(uncollapsedTree);
|
||||
|
@ -7,57 +7,48 @@ exports.getDirectories = getDirectories;
|
||||
|
||||
var _utils = require("./utils");
|
||||
|
||||
var _getURL = require("./getURL");
|
||||
|
||||
/* This Source Code Form is subject to the terms of the Mozilla Public
|
||||
* License, v. 2.0. If a copy of the MPL was not distributed with this
|
||||
* file, You can obtain one at <http://mozilla.org/MPL/2.0/>. */
|
||||
function findSource(sourceTree, sourceUrl) {
|
||||
let returnTarget = null;
|
||||
|
||||
function _traverse(subtree) {
|
||||
if (subtree.type === "directory") {
|
||||
for (const child of subtree.contents) {
|
||||
_traverse(child);
|
||||
}
|
||||
} else if (!returnTarget) {
|
||||
if (subtree.path.replace(/http(s)?:\//, "") == sourceUrl) {
|
||||
returnTarget = subtree;
|
||||
return;
|
||||
}
|
||||
function _traverse(subtree, source) {
|
||||
if (subtree.type === "source") {
|
||||
if (subtree.contents.id === source.id) {
|
||||
return subtree;
|
||||
}
|
||||
|
||||
return null;
|
||||
}
|
||||
|
||||
sourceTree.contents.forEach(node => _traverse(node));
|
||||
|
||||
if (!returnTarget) {
|
||||
return sourceTree;
|
||||
}
|
||||
|
||||
return returnTarget;
|
||||
const matches = subtree.contents.map(child => _traverse(child, source));
|
||||
return matches && matches.filter(Boolean)[0];
|
||||
}
|
||||
|
||||
function getDirectories(source, sourceTree) {
|
||||
const url = (0, _getURL.getURL)(source);
|
||||
const fullUrl = `${url.group}${url.path}`;
|
||||
const parentMap = (0, _utils.createParentMap)(sourceTree);
|
||||
const subtreeSource = findSource(sourceTree, fullUrl);
|
||||
function findSourceItem(sourceTree, source) {
|
||||
return _traverse(sourceTree, source);
|
||||
}
|
||||
|
||||
if (!subtreeSource) {
|
||||
return [];
|
||||
function getAncestors(sourceTree, item) {
|
||||
if (!item) {
|
||||
return null;
|
||||
}
|
||||
|
||||
let node = subtreeSource;
|
||||
const parentMap = (0, _utils.createParentMap)(sourceTree);
|
||||
const directories = [];
|
||||
directories.push(subtreeSource);
|
||||
directories.push(item);
|
||||
|
||||
while (true) {
|
||||
node = parentMap.get(node);
|
||||
item = parentMap.get(item);
|
||||
|
||||
if (!node) {
|
||||
if (!item) {
|
||||
return directories;
|
||||
}
|
||||
|
||||
directories.push(node);
|
||||
directories.push(item);
|
||||
}
|
||||
}
|
||||
|
||||
function getDirectories(source, sourceTree) {
|
||||
const item = findSourceItem(sourceTree, source);
|
||||
const ancestors = getAncestors(sourceTree, item);
|
||||
return ancestors || [sourceTree];
|
||||
}
|
@ -36,7 +36,7 @@ const def = {
|
||||
filename: ""
|
||||
};
|
||||
|
||||
function _getURL(source, debuggeeUrl) {
|
||||
function _getURL(source, defaultDomain) {
|
||||
const {
|
||||
url
|
||||
} = source;
|
||||
@ -98,11 +98,9 @@ function _getURL(source, debuggeeUrl) {
|
||||
group: "file://"
|
||||
};
|
||||
} else if (host === null) {
|
||||
// use anonymous group for weird URLs
|
||||
const defaultDomain = (0, _url.parse)(debuggeeUrl).host;
|
||||
return { ...def,
|
||||
path: url,
|
||||
group: defaultDomain,
|
||||
group: defaultDomain || "",
|
||||
filename
|
||||
};
|
||||
}
|
||||
@ -125,12 +123,12 @@ function _getURL(source, debuggeeUrl) {
|
||||
};
|
||||
}
|
||||
|
||||
function getURL(source, debuggeeUrl = "") {
|
||||
function getURL(source, debuggeeUrl) {
|
||||
if (urlMap.has(source)) {
|
||||
return urlMap.get(source) || def;
|
||||
}
|
||||
|
||||
const url = _getURL(source, debuggeeUrl);
|
||||
const url = _getURL(source, debuggeeUrl || "");
|
||||
|
||||
urlMap.set(source, url);
|
||||
return url;
|
||||
|
@ -13,6 +13,8 @@ var _utils = require("./utils");
|
||||
|
||||
var _lodash = require("devtools/client/shared/vendor/lodash");
|
||||
|
||||
var _treeOrder = require("./treeOrder");
|
||||
|
||||
/* This Source Code Form is subject to the terms of the Mozilla Public
|
||||
* License, v. 2.0. If a copy of the MPL was not distributed with this
|
||||
* file, You can obtain one at <http://mozilla.org/MPL/2.0/>. */
|
||||
@ -31,9 +33,10 @@ function updateTree({
|
||||
sourceTree
|
||||
}) {
|
||||
const newSet = newSourcesSet(newSources, prevSources);
|
||||
const debuggeeHost = (0, _treeOrder.getDomain)(debuggeeUrl);
|
||||
|
||||
for (const source of newSet) {
|
||||
(0, _addToTree.addToTree)(uncollapsedTree, source, debuggeeUrl, projectRoot);
|
||||
(0, _addToTree.addToTree)(uncollapsedTree, source, debuggeeHost, projectRoot);
|
||||
}
|
||||
|
||||
const newSourceTree = (0, _collapseTree.collapseTree)(uncollapsedTree);
|
||||
|
@ -22,6 +22,8 @@ var _url = require("devtools/client/debugger/new/dist/vendors").vendored["url"];
|
||||
|
||||
var _source = require("../source");
|
||||
|
||||
var _getURL = require("./getURL");
|
||||
|
||||
/* This Source Code Form is subject to the terms of the Mozilla Public
|
||||
* License, v. 2.0. If a copy of the MPL was not distributed with this
|
||||
* file, You can obtain one at <http://mozilla.org/MPL/2.0/>. */
|
||||
@ -69,8 +71,8 @@ function isSource(item) {
|
||||
return item.type === "source";
|
||||
}
|
||||
|
||||
function getFileExtension(url = "") {
|
||||
const parsedUrl = (0, _url.parse)(url).pathname;
|
||||
function getFileExtension(source) {
|
||||
const parsedUrl = (0, _getURL.getURL)(source).path;
|
||||
|
||||
if (!parsedUrl) {
|
||||
return "";
|
||||
@ -80,7 +82,7 @@ function getFileExtension(url = "") {
|
||||
}
|
||||
|
||||
function isNotJavaScript(source) {
|
||||
return ["css", "svg", "png"].includes(getFileExtension(source.url));
|
||||
return ["css", "svg", "png"].includes(getFileExtension(source));
|
||||
}
|
||||
|
||||
function isInvalidUrl(url, source) {
|
||||
|
@ -263,7 +263,6 @@ devtools.jar:
|
||||
skin/images/debugger/arrow.svg (themes/images/debugger/arrow.svg)
|
||||
skin/images/debugger/back.svg (themes/images/debugger/back.svg)
|
||||
skin/images/debugger/blackBox.svg (themes/images/debugger/blackBox.svg)
|
||||
skin/images/debugger/breakpoint.svg (themes/images/debugger/breakpoint.svg)
|
||||
skin/images/debugger/close.svg (themes/images/debugger/close.svg)
|
||||
skin/images/debugger/coffeescript.svg (themes/images/debugger/coffeescript.svg)
|
||||
skin/images/debugger/disable-pausing.svg (themes/images/debugger/disable-pausing.svg)
|
||||
|
@ -1,6 +0,0 @@
|
||||
<!-- This Source Code Form is subject to the terms of the Mozilla Public
|
||||
- License, v. 2.0. If a copy of the MPL was not distributed with this
|
||||
- file, You can obtain one at http://mozilla.org/MPL/2.0/. -->
|
||||
<svg xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" viewBox="0 0 60 12">
|
||||
<path id="base-path" d="M53.9,0H1C0.4,0,0,0.4,0,1v10c0,0.6,0.4,1,1,1h52.9c0.6,0,1.2-0.3,1.5-0.7L60,6l-4.4-5.3C55,0.3,54.5,0,53.9,0z"/>
|
||||
</svg>
|
Before Width: | Height: | Size: 460 B |
@ -27,16 +27,6 @@
|
||||
#include "mozAutoDocUpdate.h"
|
||||
#include "nsWrapperCacheInlines.h"
|
||||
|
||||
nsIAttribute::nsIAttribute(nsDOMAttributeMap* aAttrMap,
|
||||
already_AddRefed<mozilla::dom::NodeInfo>& aNodeInfo)
|
||||
: nsINode(aNodeInfo), mAttrMap(aAttrMap)
|
||||
{
|
||||
}
|
||||
|
||||
nsIAttribute::~nsIAttribute()
|
||||
{
|
||||
}
|
||||
|
||||
namespace mozilla {
|
||||
namespace dom {
|
||||
|
||||
@ -46,7 +36,7 @@ bool Attr::sInitialized;
|
||||
Attr::Attr(nsDOMAttributeMap *aAttrMap,
|
||||
already_AddRefed<mozilla::dom::NodeInfo>&& aNodeInfo,
|
||||
const nsAString &aValue)
|
||||
: nsIAttribute(aAttrMap, aNodeInfo), mValue(aValue)
|
||||
: nsINode(aNodeInfo), mAttrMap(aAttrMap), mValue(aValue)
|
||||
{
|
||||
MOZ_ASSERT(mNodeInfo, "We must get a nodeinfo here!");
|
||||
MOZ_ASSERT(mNodeInfo->NodeType() == ATTRIBUTE_NODE,
|
||||
@ -89,7 +79,7 @@ NS_IMPL_CYCLE_COLLECTION_CAN_SKIP_BEGIN(Attr)
|
||||
NS_IMPL_CYCLE_COLLECTION_CAN_SKIP_END
|
||||
|
||||
NS_IMPL_CYCLE_COLLECTION_CAN_SKIP_IN_CC_BEGIN(Attr)
|
||||
return tmp->HasKnownLiveWrapperAndDoesNotNeedTracing(static_cast<nsIAttribute*>(tmp));
|
||||
return tmp->HasKnownLiveWrapperAndDoesNotNeedTracing(tmp);
|
||||
NS_IMPL_CYCLE_COLLECTION_CAN_SKIP_IN_CC_END
|
||||
|
||||
NS_IMPL_CYCLE_COLLECTION_CAN_SKIP_THIS_BEGIN(Attr)
|
||||
@ -99,7 +89,7 @@ NS_IMPL_CYCLE_COLLECTION_CAN_SKIP_THIS_END
|
||||
// QueryInterface implementation for Attr
|
||||
NS_INTERFACE_TABLE_HEAD(Attr)
|
||||
NS_WRAPPERCACHE_INTERFACE_TABLE_ENTRY
|
||||
NS_INTERFACE_TABLE(Attr, nsINode, nsIAttribute, EventTarget)
|
||||
NS_INTERFACE_TABLE(Attr, nsINode, EventTarget)
|
||||
NS_INTERFACE_TABLE_TO_MAP_SEGUE_CYCLE_COLLECTION(Attr)
|
||||
NS_INTERFACE_MAP_ENTRY_TEAROFF(nsISupportsWeakReference,
|
||||
new nsNodeSupportsWeakRefTearoff(this))
|
||||
|
@ -12,7 +12,7 @@
|
||||
#define mozilla_dom_Attr_h
|
||||
|
||||
#include "mozilla/Attributes.h"
|
||||
#include "nsIAttribute.h"
|
||||
#include "nsINode.h"
|
||||
#include "nsString.h"
|
||||
#include "nsCOMPtr.h"
|
||||
#include "nsCycleCollectionParticipant.h"
|
||||
@ -26,7 +26,7 @@ namespace dom {
|
||||
|
||||
// Attribute helper class used to wrap up an attribute with a dom
|
||||
// object that implements the DOM Attr interface.
|
||||
class Attr final : public nsIAttribute
|
||||
class Attr final : public nsINode
|
||||
{
|
||||
virtual ~Attr() {}
|
||||
|
||||
@ -51,10 +51,20 @@ public:
|
||||
|
||||
void GetEventTargetParent(EventChainPreVisitor& aVisitor) override;
|
||||
|
||||
// nsIAttribute interface
|
||||
void SetMap(nsDOMAttributeMap *aMap) override;
|
||||
nsDOMAttributeMap* GetMap()
|
||||
{
|
||||
return mAttrMap;
|
||||
}
|
||||
|
||||
void SetMap(nsDOMAttributeMap *aMap);
|
||||
|
||||
Element* GetElement() const;
|
||||
nsresult SetOwnerDocument(nsIDocument* aDocument) override;
|
||||
|
||||
/**
|
||||
* Called when our ownerElement is moved into a new document.
|
||||
* Updates the nodeinfo of this node.
|
||||
*/
|
||||
nsresult SetOwnerDocument(nsIDocument* aDocument);
|
||||
|
||||
// nsINode interface
|
||||
virtual bool IsNodeOfType(uint32_t aFlags) const override;
|
||||
@ -71,8 +81,7 @@ public:
|
||||
static void Initialize();
|
||||
static void Shutdown();
|
||||
|
||||
NS_DECL_CYCLE_COLLECTION_SKIPPABLE_SCRIPT_HOLDER_CLASS_AMBIGUOUS(Attr,
|
||||
nsIAttribute)
|
||||
NS_DECL_CYCLE_COLLECTION_SKIPPABLE_SCRIPT_HOLDER_CLASS(Attr)
|
||||
|
||||
// WebIDL
|
||||
virtual JSObject* WrapNode(JSContext* aCx, JS::Handle<JSObject*> aGivenProto) override;
|
||||
@ -100,6 +109,7 @@ protected:
|
||||
static bool sInitialized;
|
||||
|
||||
private:
|
||||
RefPtr<nsDOMAttributeMap> mAttrMap;
|
||||
nsString mValue;
|
||||
};
|
||||
|
||||
|
@ -1194,7 +1194,7 @@ EventSourceImpl::ReestablishConnection()
|
||||
} else {
|
||||
RefPtr<CallRestartConnection> runnable = new CallRestartConnection(this);
|
||||
ErrorResult result;
|
||||
runnable->Dispatch(Terminating, result);
|
||||
runnable->Dispatch(Canceling, result);
|
||||
MOZ_ASSERT(!result.Failed());
|
||||
rv = result.StealNSResult();
|
||||
}
|
||||
@ -1994,7 +1994,7 @@ EventSource::Constructor(const GlobalObject& aGlobal, const nsAString& aURL,
|
||||
|
||||
RefPtr<InitRunnable> initRunnable =
|
||||
new InitRunnable(workerPrivate, eventSourceImp, aURL);
|
||||
initRunnable->Dispatch(Terminating, aRv);
|
||||
initRunnable->Dispatch(Canceling, aRv);
|
||||
if (NS_WARN_IF(aRv.Failed())) {
|
||||
return nullptr;
|
||||
}
|
||||
@ -2022,7 +2022,7 @@ EventSource::Constructor(const GlobalObject& aGlobal, const nsAString& aURL,
|
||||
// Let's connect to the server.
|
||||
RefPtr<ConnectRunnable> connectRunnable =
|
||||
new ConnectRunnable(workerPrivate, eventSourceImp);
|
||||
connectRunnable->Dispatch(Terminating, aRv);
|
||||
connectRunnable->Dispatch(Canceling, aRv);
|
||||
if (NS_WARN_IF(aRv.Failed())) {
|
||||
return nullptr;
|
||||
}
|
||||
|
@ -7,8 +7,10 @@
|
||||
#include "ProcessGlobal.h"
|
||||
|
||||
#include "nsContentCID.h"
|
||||
#include "mozilla/dom/ContentChild.h"
|
||||
#include "mozilla/dom/MessageManagerBinding.h"
|
||||
#include "mozilla/dom/ResolveSystemBinding.h"
|
||||
#include "mozilla/dom/ipc/SharedMap.h"
|
||||
|
||||
using namespace mozilla;
|
||||
using namespace mozilla::dom;
|
||||
@ -72,6 +74,16 @@ ProcessGlobal::Get()
|
||||
return global;
|
||||
}
|
||||
|
||||
already_AddRefed<mozilla::dom::ipc::SharedMap>
|
||||
ProcessGlobal::SharedData()
|
||||
{
|
||||
if (ContentChild* child = ContentChild::GetSingleton()) {
|
||||
return do_AddRef(child->SharedData());
|
||||
}
|
||||
auto* ppmm = nsFrameMessageManager::sParentProcessManager;
|
||||
return do_AddRef(ppmm->SharedData()->GetReadOnly());
|
||||
}
|
||||
|
||||
bool
|
||||
ProcessGlobal::WasCreated()
|
||||
{
|
||||
|
@ -25,6 +25,10 @@
|
||||
namespace mozilla {
|
||||
namespace dom {
|
||||
|
||||
namespace ipc {
|
||||
class SharedMap;
|
||||
}
|
||||
|
||||
class ProcessGlobal :
|
||||
public nsIMessageSender,
|
||||
public nsMessageManagerScriptExecutor,
|
||||
@ -84,6 +88,8 @@ public:
|
||||
mMessageManager->GetInitialProcessData(aCx, aInitialProcessData, aError);
|
||||
}
|
||||
|
||||
already_AddRefed<ipc::SharedMap> SharedData();
|
||||
|
||||
NS_FORWARD_SAFE_NSIMESSAGESENDER(mMessageManager)
|
||||
|
||||
virtual void LoadScript(const nsAString& aURL);
|
||||
|
@ -70,7 +70,6 @@ EXPORTS += [
|
||||
'nsGlobalWindowInner.h', # Because binding headers include it.
|
||||
'nsGlobalWindowOuter.h', # Because binding headers include it.
|
||||
'nsIAnimationObserver.h',
|
||||
'nsIAttribute.h',
|
||||
'nsIContent.h',
|
||||
'nsIContentInlines.h',
|
||||
'nsIContentIterator.h',
|
||||
|
@ -6755,7 +6755,7 @@ nsDOMAttributeMap::BlastSubtreeToPieces(nsINode* aNode)
|
||||
if (Element* element = Element::FromNode(aNode)) {
|
||||
if (const nsDOMAttributeMap* map = element->GetAttributeMap()) {
|
||||
while (true) {
|
||||
nsCOMPtr<nsIAttribute> attr;
|
||||
RefPtr<Attr> attr;
|
||||
{
|
||||
// Use an iterator to get an arbitrary attribute from the
|
||||
// cache. The iterator must be destroyed before any other
|
||||
@ -6767,8 +6767,6 @@ nsDOMAttributeMap::BlastSubtreeToPieces(nsINode* aNode)
|
||||
}
|
||||
attr = iter.UserData();
|
||||
}
|
||||
NS_ASSERTION(attr.get(),
|
||||
"non-nsIAttribute somehow made it into the hashmap?!");
|
||||
|
||||
BlastSubtreeToPieces(attr);
|
||||
|
||||
|
@ -48,6 +48,7 @@
|
||||
#include "mozilla/dom/SameProcessMessageQueue.h"
|
||||
#include "mozilla/dom/ScriptSettings.h"
|
||||
#include "mozilla/dom/ToJSValue.h"
|
||||
#include "mozilla/dom/ipc/SharedMap.h"
|
||||
#include "mozilla/dom/ipc/StructuredCloneData.h"
|
||||
#include "mozilla/dom/DOMStringList.h"
|
||||
#include "mozilla/jsipc/CrossProcessObjectWrappers.h"
|
||||
@ -999,6 +1000,19 @@ nsFrameMessageManager::GetInitialProcessData(JSContext* aCx,
|
||||
aInitialProcessData.set(init);
|
||||
}
|
||||
|
||||
WritableSharedMap*
|
||||
nsFrameMessageManager::SharedData()
|
||||
{
|
||||
if (!mChrome || !mIsProcessManager) {
|
||||
MOZ_ASSERT(false, "Should only call this binding method on ppmm");
|
||||
return nullptr;
|
||||
}
|
||||
if (!mSharedData) {
|
||||
mSharedData = new WritableSharedMap();
|
||||
}
|
||||
return mSharedData;
|
||||
}
|
||||
|
||||
already_AddRefed<ProcessMessageManager>
|
||||
nsFrameMessageManager::GetProcessMessageManager(ErrorResult& aError)
|
||||
{
|
||||
|
@ -37,6 +37,11 @@
|
||||
class nsFrameLoader;
|
||||
|
||||
namespace mozilla {
|
||||
|
||||
namespace ipc {
|
||||
class FileDescriptor;
|
||||
}
|
||||
|
||||
namespace dom {
|
||||
|
||||
class nsIContentParent;
|
||||
@ -54,6 +59,8 @@ class ProcessMessageManager;
|
||||
|
||||
namespace ipc {
|
||||
|
||||
class WritableSharedMap;
|
||||
|
||||
// Note: we round the time we spend to the nearest millisecond. So a min value
|
||||
// of 1 ms actually captures from 500us and above.
|
||||
static const uint32_t kMinTelemetrySyncMessageManagerLatencyMs = 1;
|
||||
@ -233,6 +240,8 @@ public:
|
||||
JS::MutableHandle<JS::Value> aInitialProcessData,
|
||||
mozilla::ErrorResult& aError);
|
||||
|
||||
mozilla::dom::ipc::WritableSharedMap* SharedData();
|
||||
|
||||
NS_DECL_NSIMESSAGESENDER
|
||||
NS_DECL_NSICONTENTFRAMEMESSAGEMANAGER
|
||||
|
||||
@ -340,6 +349,7 @@ protected:
|
||||
nsTArray<nsString> mPendingScripts;
|
||||
nsTArray<bool> mPendingScriptsGlobalStates;
|
||||
JS::Heap<JS::Value> mInitialProcessData;
|
||||
RefPtr<mozilla::dom::ipc::WritableSharedMap> mSharedData;
|
||||
|
||||
void LoadPendingScripts(nsFrameMessageManager* aManager,
|
||||
nsFrameMessageManager* aChildMM);
|
||||
|
@ -1,53 +0,0 @@
|
||||
/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
|
||||
/* vim: set ts=8 sts=2 et sw=2 tw=80: */
|
||||
/* This Source Code Form is subject to the terms of the Mozilla Public
|
||||
* License, v. 2.0. If a copy of the MPL was not distributed with this
|
||||
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
|
||||
|
||||
#ifndef nsIAttribute_h___
|
||||
#define nsIAttribute_h___
|
||||
|
||||
#include "nsINode.h"
|
||||
|
||||
class nsDOMAttributeMap;
|
||||
|
||||
#define NS_IATTRIBUTE_IID \
|
||||
{ 0x84d43da7, 0xb45d, 0x47ae, \
|
||||
{ 0x8f, 0xbf, 0x95, 0x26, 0x78, 0x4d, 0x5e, 0x47 } }
|
||||
|
||||
class nsIAttribute : public nsINode
|
||||
{
|
||||
public:
|
||||
NS_DECLARE_STATIC_IID_ACCESSOR(NS_IATTRIBUTE_IID)
|
||||
|
||||
virtual void SetMap(nsDOMAttributeMap *aMap) = 0;
|
||||
|
||||
nsDOMAttributeMap *GetMap()
|
||||
{
|
||||
return mAttrMap;
|
||||
}
|
||||
|
||||
mozilla::dom::NodeInfo *NodeInfo() const
|
||||
{
|
||||
return mNodeInfo;
|
||||
}
|
||||
|
||||
/**
|
||||
* Called when our ownerElement is moved into a new document.
|
||||
* Updates the nodeinfo of this node.
|
||||
*/
|
||||
virtual nsresult SetOwnerDocument(nsIDocument* aDocument) = 0;
|
||||
|
||||
protected:
|
||||
#ifdef MOZILLA_INTERNAL_API
|
||||
nsIAttribute(nsDOMAttributeMap *aAttrMap,
|
||||
already_AddRefed<mozilla::dom::NodeInfo>& aNodeInfo);
|
||||
#endif //MOZILLA_INTERNAL_API
|
||||
virtual ~nsIAttribute();
|
||||
|
||||
RefPtr<nsDOMAttributeMap> mAttrMap;
|
||||
};
|
||||
|
||||
NS_DEFINE_STATIC_IID_ACCESSOR(nsIAttribute, NS_IATTRIBUTE_IID)
|
||||
|
||||
#endif /* nsIAttribute_h___ */
|
@ -283,7 +283,7 @@ nsINode::SubtreeRoot() const
|
||||
// or mSubtreeRoot is updated in BindToTree/UnbindFromTree.
|
||||
// 2.b nsIContent nodes in a shadow tree - Are never in the document,
|
||||
// ignore mSubtreeRoot and return the containing shadow root.
|
||||
// 4. nsIAttribute nodes - Are never in the document, and mSubtreeRoot
|
||||
// 4. Attr nodes - Are never in the document, and mSubtreeRoot
|
||||
// is always 'this' (as set in nsINode's ctor).
|
||||
nsINode* node;
|
||||
if (IsInUncomposedDoc()) {
|
||||
|
@ -946,7 +946,7 @@ public:
|
||||
|
||||
/**
|
||||
* Get the parent nsINode for this node. This can be either an nsIContent,
|
||||
* an nsIDocument or an nsIAttribute.
|
||||
* an nsIDocument or an Attr.
|
||||
* @return the parent node
|
||||
*/
|
||||
nsINode* GetParentNode() const
|
||||
|
@ -577,6 +577,19 @@ DOMInterfaces = {
|
||||
'notflattened': True
|
||||
},
|
||||
|
||||
'MozSharedMap': {
|
||||
'nativeType': 'mozilla::dom::ipc::SharedMap',
|
||||
},
|
||||
|
||||
'MozWritableSharedMap': {
|
||||
'headerFile': 'mozilla/dom/ipc/SharedMap.h',
|
||||
'nativeType': 'mozilla::dom::ipc::WritableSharedMap',
|
||||
},
|
||||
|
||||
'MozSharedMapChangeEvent': {
|
||||
'nativeType': 'mozilla::dom::ipc::SharedMapChangeEvent',
|
||||
},
|
||||
|
||||
'MozStorageAsyncStatementParams': {
|
||||
'headerFile': 'mozilla/storage/mozStorageAsyncStatementParams.h',
|
||||
'nativeType': 'mozilla::storage::AsyncStatementParams',
|
||||
|
8
dom/cache/CacheWorkerHolder.cpp
vendored
8
dom/cache/CacheWorkerHolder.cpp
vendored
@ -20,7 +20,7 @@ CacheWorkerHolder::Create(WorkerPrivate* aWorkerPrivate, Behavior aBehavior)
|
||||
MOZ_DIAGNOSTIC_ASSERT(aWorkerPrivate);
|
||||
|
||||
RefPtr<CacheWorkerHolder> workerHolder = new CacheWorkerHolder(aBehavior);
|
||||
if (NS_WARN_IF(!workerHolder->HoldWorker(aWorkerPrivate, Terminating))) {
|
||||
if (NS_WARN_IF(!workerHolder->HoldWorker(aWorkerPrivate, Canceling))) {
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
@ -93,9 +93,9 @@ CacheWorkerHolder::Notify(WorkerStatus aStatus)
|
||||
{
|
||||
NS_ASSERT_OWNINGTHREAD(CacheWorkerHolder);
|
||||
|
||||
// When the service worker thread is stopped we will get Terminating,
|
||||
// but nothing higher than that. We must shut things down at Terminating.
|
||||
if (aStatus < Terminating || mNotified) {
|
||||
// When the service worker thread is stopped we will get Canceling,
|
||||
// but nothing higher than that. We must shut things down at Canceling.
|
||||
if (aStatus < Canceling || mNotified) {
|
||||
return true;
|
||||
}
|
||||
|
||||
|
@ -1056,7 +1056,7 @@ ImageBitmap::CreateInternal(nsIGlobalObject* aGlobal, ImageData& aImageData,
|
||||
imageSize,
|
||||
aCropRect,
|
||||
getter_AddRefs(data));
|
||||
task->Dispatch(Terminating, aRv);
|
||||
task->Dispatch(Canceling, aRv);
|
||||
}
|
||||
|
||||
if (NS_WARN_IF(!data)) {
|
||||
@ -2083,7 +2083,7 @@ ImageBitmap::Create(nsIGlobalObject* aGlobal,
|
||||
aFormat,
|
||||
aLayout,
|
||||
getter_AddRefs(data));
|
||||
task->Dispatch(Terminating, aRv);
|
||||
task->Dispatch(Canceling, aRv);
|
||||
if (aRv.Failed()) {
|
||||
return promise.forget();
|
||||
}
|
||||
|
@ -457,6 +457,8 @@ interface GlobalProcessScriptLoader : ProcessScriptLoader
|
||||
*/
|
||||
[Throws]
|
||||
readonly attribute any initialProcessData;
|
||||
|
||||
readonly attribute MozWritableSharedMap sharedData;
|
||||
};
|
||||
|
||||
[ChromeOnly, Global, NeedResolve]
|
||||
@ -502,6 +504,8 @@ interface ContentProcessMessageManager
|
||||
*/
|
||||
[Throws]
|
||||
readonly attribute any initialProcessData;
|
||||
|
||||
readonly attribute MozSharedMap sharedData;
|
||||
};
|
||||
// MessageManagerGlobal inherits from SyncMessageSender, which is a real interface, not a
|
||||
// mixin. This will need to change when we implement mixins according to the current
|
||||
|
54
dom/chrome-webidl/MozSharedMap.webidl
Normal file
54
dom/chrome-webidl/MozSharedMap.webidl
Normal file
@ -0,0 +1,54 @@
|
||||
/* -*- Mode: IDL; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
|
||||
/* This Source Code Form is subject to the terms of the Mozilla Public
|
||||
* License, v. 2.0. If a copy of the MPL was not distributed with this
|
||||
* file, You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
*/
|
||||
|
||||
typedef any StructuredClonable;
|
||||
|
||||
[ChromeOnly]
|
||||
interface MozSharedMapChangeEvent : Event {
|
||||
[Cached, Constant]
|
||||
readonly attribute sequence<DOMString> changedKeys;
|
||||
};
|
||||
|
||||
dictionary MozSharedMapChangeEventInit : EventInit {
|
||||
required sequence<DOMString> changedKeys;
|
||||
};
|
||||
|
||||
[ChromeOnly]
|
||||
interface MozSharedMap : EventTarget {
|
||||
boolean has(DOMString name);
|
||||
|
||||
[Throws]
|
||||
StructuredClonable get(DOMString name);
|
||||
|
||||
iterable<DOMString, StructuredClonable>;
|
||||
};
|
||||
|
||||
[ChromeOnly]
|
||||
interface MozWritableSharedMap : MozSharedMap {
|
||||
/**
|
||||
* Sets the given key to the given structured-clonable value. The value is
|
||||
* synchronously structured cloned, and the serialized value is saved in the
|
||||
* map.
|
||||
*
|
||||
* Unless flush() is called, the new value will be broadcast to content
|
||||
* processes after a short delay.
|
||||
*/
|
||||
[Throws]
|
||||
void set(DOMString name, StructuredClonable value);
|
||||
|
||||
/**
|
||||
* Removes the given key from the map.
|
||||
*
|
||||
* Unless flush() is called, the removal will be broadcast to content
|
||||
* processes after a short delay.
|
||||
*/
|
||||
void delete(DOMString name);
|
||||
|
||||
/**
|
||||
* Broadcasts any pending changes to all content processes.
|
||||
*/
|
||||
void flush();
|
||||
};
|
@ -37,6 +37,7 @@ WEBIDL_FILES = [
|
||||
'MatchGlob.webidl',
|
||||
'MatchPattern.webidl',
|
||||
'MessageManager.webidl',
|
||||
'MozSharedMap.webidl',
|
||||
'MozStorageAsyncStatementParams.webidl',
|
||||
'MozStorageStatementParams.webidl',
|
||||
'MozStorageStatementRow.webidl',
|
||||
|
@ -67,7 +67,7 @@ ClientManager::ClientManager()
|
||||
// We need AllowIdleShutdownStart since every worker thread will
|
||||
// have a ClientManager to support creating its ClientSource.
|
||||
workerHolderToken =
|
||||
WorkerHolderToken::Create(workerPrivate, Terminating,
|
||||
WorkerHolderToken::Create(workerPrivate, Canceling,
|
||||
WorkerHolderToken::AllowIdleShutdownStart);
|
||||
if (NS_WARN_IF(!workerHolderToken)) {
|
||||
Shutdown();
|
||||
|
@ -398,7 +398,7 @@ Request::Constructor(const GlobalObject& aGlobal,
|
||||
RefPtr<ReferrerSameOriginChecker> checker =
|
||||
new ReferrerSameOriginChecker(worker, referrerURL, rv);
|
||||
IgnoredErrorResult error;
|
||||
checker->Dispatch(Terminating, error);
|
||||
checker->Dispatch(Canceling, error);
|
||||
if (error.Failed() || NS_FAILED(rv)) {
|
||||
referrerURL.AssignLiteral(kFETCH_CLIENT_REFERRER_STR);
|
||||
}
|
||||
|
@ -178,7 +178,7 @@ FileBlobImpl::GetType(nsAString& aType)
|
||||
new GetTypeRunnable(workerPrivate, this);
|
||||
|
||||
ErrorResult rv;
|
||||
runnable->Dispatch(Terminating, rv);
|
||||
runnable->Dispatch(Canceling, rv);
|
||||
if (NS_WARN_IF(rv.Failed())) {
|
||||
rv.SuppressException();
|
||||
}
|
||||
|
@ -444,7 +444,7 @@ FileReaderSync::SyncRead(nsIInputStream* aStream, char* aBuffer,
|
||||
WorkerPrivate* workerPrivate = GetCurrentThreadWorkerPrivate();
|
||||
MOZ_ASSERT(workerPrivate);
|
||||
|
||||
AutoSyncLoopHolder syncLoop(workerPrivate, Terminating);
|
||||
AutoSyncLoopHolder syncLoop(workerPrivate, Canceling);
|
||||
|
||||
nsCOMPtr<nsIEventTarget> syncLoopTarget = syncLoop.GetEventTarget();
|
||||
if (!syncLoopTarget) {
|
||||
|
@ -48,6 +48,7 @@
|
||||
#include "mozilla/dom/URLClassifierChild.h"
|
||||
#include "mozilla/dom/WorkerDebugger.h"
|
||||
#include "mozilla/dom/WorkerDebuggerManager.h"
|
||||
#include "mozilla/dom/ipc/SharedMap.h"
|
||||
#include "mozilla/gfx/gfxVars.h"
|
||||
#include "mozilla/gfx/Logging.h"
|
||||
#include "mozilla/psm/PSMContentListener.h"
|
||||
@ -592,7 +593,9 @@ mozilla::ipc::IPCResult
|
||||
ContentChild::RecvSetXPCOMProcessAttributes(const XPCOMInitData& aXPCOMInit,
|
||||
const StructuredCloneData& aInitialData,
|
||||
nsTArray<LookAndFeelInt>&& aLookAndFeelIntCache,
|
||||
nsTArray<SystemFontListEntry>&& aFontList)
|
||||
nsTArray<SystemFontListEntry>&& aFontList,
|
||||
const FileDescriptor& aSharedDataMapFile,
|
||||
const uint32_t& aSharedDataMapSize)
|
||||
{
|
||||
if (!sShutdownCanary) {
|
||||
return IPC_OK();
|
||||
@ -604,6 +607,9 @@ ContentChild::RecvSetXPCOMProcessAttributes(const XPCOMInitData& aXPCOMInit,
|
||||
InitXPCOM(aXPCOMInit, aInitialData);
|
||||
InitGraphicsDeviceData(aXPCOMInit.contentDeviceData());
|
||||
|
||||
mSharedData = new SharedMap(ProcessGlobal::Get(), aSharedDataMapFile,
|
||||
aSharedDataMapSize);
|
||||
|
||||
return IPC_OK();
|
||||
}
|
||||
|
||||
@ -2378,6 +2384,8 @@ ContentChild::ActorDestroy(ActorDestroyReason why)
|
||||
|
||||
BlobURLProtocolHandler::RemoveDataEntries();
|
||||
|
||||
mSharedData = nullptr;
|
||||
|
||||
mAlertObservers.Clear();
|
||||
|
||||
mIdleObservers.Clear();
|
||||
@ -2557,6 +2565,26 @@ ContentChild::RecvRegisterStringBundles(nsTArray<mozilla::dom::StringBundleDescr
|
||||
return IPC_OK();
|
||||
}
|
||||
|
||||
mozilla::ipc::IPCResult
|
||||
ContentChild::RecvUpdateSharedData(const FileDescriptor& aMapFile,
|
||||
const uint32_t& aMapSize,
|
||||
nsTArray<IPCBlob>&& aBlobs,
|
||||
nsTArray<nsCString>&& aChangedKeys)
|
||||
{
|
||||
if (mSharedData) {
|
||||
nsTArray<RefPtr<BlobImpl>> blobImpls(aBlobs.Length());
|
||||
for (auto& ipcBlob : aBlobs) {
|
||||
blobImpls.AppendElement(IPCBlobUtils::Deserialize(ipcBlob));
|
||||
}
|
||||
|
||||
mSharedData->Update(aMapFile, aMapSize,
|
||||
std::move(blobImpls),
|
||||
std::move(aChangedKeys));
|
||||
}
|
||||
|
||||
return IPC_OK();
|
||||
}
|
||||
|
||||
mozilla::ipc::IPCResult
|
||||
ContentChild::RecvGeolocationUpdate(nsIDOMGeoPosition* aPosition)
|
||||
{
|
||||
|
@ -66,6 +66,10 @@ class URIParams;
|
||||
|
||||
namespace dom {
|
||||
|
||||
namespace ipc {
|
||||
class SharedMap;
|
||||
}
|
||||
|
||||
class AlertObserver;
|
||||
class ConsoleListener;
|
||||
class ClonedMessageData;
|
||||
@ -164,6 +168,8 @@ public:
|
||||
|
||||
bool IsShuttingDown() const;
|
||||
|
||||
ipc::SharedMap* SharedData() { return mSharedData; };
|
||||
|
||||
static void AppendProcessId(nsACString& aName);
|
||||
|
||||
static void UpdateCookieStatus(nsIChannel *aChannel);
|
||||
@ -396,6 +402,11 @@ public:
|
||||
|
||||
mozilla::ipc::IPCResult RecvRegisterStringBundles(nsTArray<StringBundleDescriptor>&& stringBundles) override;
|
||||
|
||||
mozilla::ipc::IPCResult RecvUpdateSharedData(const FileDescriptor& aMapFile,
|
||||
const uint32_t& aMapSize,
|
||||
nsTArray<IPCBlob>&& aBlobs,
|
||||
nsTArray<nsCString>&& aChangedKeys) override;
|
||||
|
||||
virtual mozilla::ipc::IPCResult RecvGeolocationUpdate(nsIDOMGeoPosition* aPosition) override;
|
||||
|
||||
virtual mozilla::ipc::IPCResult RecvGeolocationError(const uint16_t& errorCode) override;
|
||||
@ -615,7 +626,9 @@ public:
|
||||
RecvSetXPCOMProcessAttributes(const XPCOMInitData& aXPCOMInit,
|
||||
const StructuredCloneData& aInitialData,
|
||||
nsTArray<LookAndFeelInt>&& aLookAndFeelIntCache,
|
||||
nsTArray<SystemFontListEntry>&& aFontList) override;
|
||||
nsTArray<SystemFontListEntry>&& aFontList,
|
||||
const FileDescriptor& aSharedDataMapFile,
|
||||
const uint32_t& aSharedDataMapSize) override;
|
||||
|
||||
virtual mozilla::ipc::IPCResult
|
||||
RecvProvideAnonymousTemporaryFile(const uint64_t& aID, const FileDescOrError& aFD) override;
|
||||
@ -815,6 +828,8 @@ private:
|
||||
nsCOMPtr<nsIDomainPolicy> mPolicy;
|
||||
nsCOMPtr<nsITimer> mForceKillTimer;
|
||||
|
||||
RefPtr<ipc::SharedMap> mSharedData;
|
||||
|
||||
#ifdef MOZ_GECKO_PROFILER
|
||||
RefPtr<ChildProfilerController> mProfilerController;
|
||||
#endif
|
||||
|
@ -61,6 +61,7 @@
|
||||
#include "mozilla/dom/quota/QuotaManagerService.h"
|
||||
#include "mozilla/dom/ServiceWorkerUtils.h"
|
||||
#include "mozilla/dom/URLClassifierParent.h"
|
||||
#include "mozilla/dom/ipc/SharedMap.h"
|
||||
#include "mozilla/embedding/printingui/PrintingParent.h"
|
||||
#include "mozilla/extensions/StreamFilterParent.h"
|
||||
#include "mozilla/gfx/gfxVars.h"
|
||||
@ -2323,8 +2324,12 @@ ContentParent::InitInternal(ProcessPriority aInitialPriority)
|
||||
ScreenManager& screenManager = ScreenManager::GetSingleton();
|
||||
screenManager.CopyScreensToRemote(this);
|
||||
|
||||
ipc::WritableSharedMap* sharedData = nsFrameMessageManager::sParentProcessManager->SharedData();
|
||||
sharedData->Flush();
|
||||
|
||||
Unused << SendSetXPCOMProcessAttributes(xpcomInit, initialData, lnfCache,
|
||||
fontList);
|
||||
fontList, sharedData->CloneMapFile(),
|
||||
sharedData->MapSize());
|
||||
|
||||
nsCOMPtr<nsIChromeRegistry> registrySvc = nsChromeRegistry::GetService();
|
||||
nsChromeRegistryChrome* chromeRegistry =
|
||||
@ -2873,6 +2878,7 @@ ContentParent::Observe(nsISupports* aSubject,
|
||||
|
||||
// Okay to call ShutDownProcess multiple times.
|
||||
ShutDownProcess(SEND_SHUTDOWN_MESSAGE);
|
||||
MarkAsDead();
|
||||
|
||||
// Wait for shutdown to complete, so that we receive any shutdown
|
||||
// data (e.g. telemetry) from the child before we quit.
|
||||
|
@ -118,7 +118,19 @@ MemMapSnapshot::Freeze(AutoMemMap& aMem)
|
||||
MOZ_TRY(NS_NewNativeLocalFile(mPath, /* followLinks = */ false,
|
||||
getter_AddRefs(file)));
|
||||
|
||||
return aMem.init(file);
|
||||
auto result = aMem.init(file);
|
||||
#ifdef XP_LINUX
|
||||
// On Linux automation runs, every few hundred thousand calls, our attempt to
|
||||
// stat the file that we just successfully opened fails with EBADF (bug
|
||||
// 1472889). Presumably this is a race with a background thread that double
|
||||
// closes a file, but is difficult to diagnose, so work around it by making a
|
||||
// second mapping attempt if the first one fails.
|
||||
if (!result.isOk()) {
|
||||
aMem.reset();
|
||||
result = aMem.init(file);
|
||||
}
|
||||
#endif
|
||||
return result;
|
||||
}
|
||||
|
||||
#else
|
||||
|
@ -466,6 +466,10 @@ child:
|
||||
|
||||
async RegisterStringBundles(StringBundleDescriptor[] stringBundles);
|
||||
|
||||
async UpdateSharedData(FileDescriptor mapFile, uint32_t aSize,
|
||||
IPCBlob[] blobs,
|
||||
nsCString[] changedKeys);
|
||||
|
||||
// nsIPermissionManager messages
|
||||
async AddPermission(Permission permission);
|
||||
async RemoveAllPermissions();
|
||||
@ -513,7 +517,9 @@ child:
|
||||
StructuredCloneData initialData,
|
||||
LookAndFeelInt[] lookAndFeelIntCache,
|
||||
/* used on MacOSX and Linux only: */
|
||||
SystemFontListEntry[] systemFontList);
|
||||
SystemFontListEntry[] systemFontList,
|
||||
FileDescriptor sharedDataMapFile,
|
||||
uint32_t sharedDataMapSize);
|
||||
|
||||
// Notify child that last-pb-context-exited notification was observed
|
||||
async LastPrivateDocShellDestroyed();
|
||||
|
519
dom/ipc/SharedMap.cpp
Normal file
519
dom/ipc/SharedMap.cpp
Normal file
@ -0,0 +1,519 @@
|
||||
/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
|
||||
/* vim: set ts=8 sts=2 et sw=2 tw=80: */
|
||||
/* This Source Code Form is subject to the terms of the Mozilla Public
|
||||
* License, v. 2.0. If a copy of the MPL was not distributed with this
|
||||
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
|
||||
|
||||
#include "SharedMap.h"
|
||||
#include "SharedMapChangeEvent.h"
|
||||
|
||||
#include "MemMapSnapshot.h"
|
||||
#include "ScriptPreloader-inl.h"
|
||||
|
||||
#include "mozilla/dom/ContentParent.h"
|
||||
#include "mozilla/dom/IPCBlobUtils.h"
|
||||
#include "mozilla/dom/ProcessGlobal.h"
|
||||
#include "mozilla/dom/ScriptSettings.h"
|
||||
|
||||
using namespace mozilla::loader;
|
||||
|
||||
namespace mozilla {
|
||||
|
||||
using namespace ipc;
|
||||
|
||||
namespace dom {
|
||||
namespace ipc {
|
||||
|
||||
// Align to size of uintptr_t here, to be safe. It's probably not strictly
|
||||
// necessary, though.
|
||||
constexpr size_t kStructuredCloneAlign = sizeof(uintptr_t);
|
||||
|
||||
|
||||
static inline void
|
||||
AlignTo(size_t* aOffset, size_t aAlign)
|
||||
{
|
||||
if (auto mod = *aOffset % aAlign) {
|
||||
*aOffset += aAlign - mod;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
SharedMap::SharedMap()
|
||||
: DOMEventTargetHelper()
|
||||
{}
|
||||
|
||||
SharedMap::SharedMap(nsIGlobalObject* aGlobal, const FileDescriptor& aMapFile,
|
||||
size_t aMapSize)
|
||||
: DOMEventTargetHelper(aGlobal)
|
||||
{
|
||||
mMapFile.reset(new FileDescriptor(aMapFile));
|
||||
mMapSize = aMapSize;
|
||||
}
|
||||
|
||||
|
||||
bool
|
||||
SharedMap::Has(const nsACString& aName)
|
||||
{
|
||||
return mEntries.Contains(aName);
|
||||
}
|
||||
|
||||
void
|
||||
SharedMap::Get(JSContext* aCx,
|
||||
const nsACString& aName,
|
||||
JS::MutableHandleValue aRetVal,
|
||||
ErrorResult& aRv)
|
||||
{
|
||||
auto res = MaybeRebuild();
|
||||
if (res.isErr()) {
|
||||
aRv.Throw(res.unwrapErr());
|
||||
return;
|
||||
}
|
||||
|
||||
Entry* entry = mEntries.Get(aName);
|
||||
if (!entry) {
|
||||
aRetVal.setNull();
|
||||
return;
|
||||
}
|
||||
|
||||
entry->Read(aCx, aRetVal, aRv);
|
||||
}
|
||||
|
||||
void
|
||||
SharedMap::Entry::Read(JSContext* aCx,
|
||||
JS::MutableHandleValue aRetVal,
|
||||
ErrorResult& aRv)
|
||||
{
|
||||
if (mData.is<StructuredCloneData>()) {
|
||||
// We have a temporary buffer for a key that was changed after the last
|
||||
// snapshot. Just decode it directly.
|
||||
auto& holder = mData.as<StructuredCloneData>();
|
||||
holder.Read(aCx, aRetVal, aRv);
|
||||
return;
|
||||
}
|
||||
|
||||
// We have a pointer to a shared memory region containing our structured
|
||||
// clone data. Create a temporary buffer to decode that data, and then
|
||||
// discard it so that we don't keep a separate process-local copy around any
|
||||
// longer than necessary.
|
||||
StructuredCloneData holder;
|
||||
if (!holder.CopyExternalData(Data(), Size())) {
|
||||
aRv.Throw(NS_ERROR_OUT_OF_MEMORY);
|
||||
return;
|
||||
}
|
||||
if (mBlobCount) {
|
||||
holder.BlobImpls().AppendElements(Blobs());
|
||||
}
|
||||
holder.Read(aCx, aRetVal, aRv);
|
||||
}
|
||||
|
||||
FileDescriptor
|
||||
SharedMap::CloneMapFile()
|
||||
{
|
||||
if (mMap.initialized()) {
|
||||
return mMap.cloneHandle();
|
||||
}
|
||||
return *mMapFile;
|
||||
}
|
||||
|
||||
void
|
||||
SharedMap::Update(const FileDescriptor& aMapFile, size_t aMapSize,
|
||||
nsTArray<RefPtr<BlobImpl>>&& aBlobs,
|
||||
nsTArray<nsCString>&& aChangedKeys)
|
||||
{
|
||||
MOZ_DIAGNOSTIC_ASSERT(!mWritable);
|
||||
|
||||
mMap.reset();
|
||||
if (mMapFile) {
|
||||
*mMapFile = aMapFile;
|
||||
} else {
|
||||
mMapFile.reset(new FileDescriptor(aMapFile));
|
||||
}
|
||||
mMapSize = aMapSize;
|
||||
mEntries.Clear();
|
||||
mEntryArray.reset();
|
||||
|
||||
mBlobImpls = std::move(aBlobs);
|
||||
|
||||
|
||||
AutoEntryScript aes(GetParentObject(), "SharedMap change event");
|
||||
JSContext* cx = aes.cx();
|
||||
|
||||
RootedDictionary<MozSharedMapChangeEventInit> init(cx);
|
||||
if (!init.mChangedKeys.SetCapacity(aChangedKeys.Length(), fallible)) {
|
||||
NS_WARNING("Failed to dispatch SharedMap change event");
|
||||
return;
|
||||
}
|
||||
for (auto& key : aChangedKeys) {
|
||||
Unused << init.mChangedKeys.AppendElement(NS_ConvertUTF8toUTF16(key),
|
||||
fallible);
|
||||
}
|
||||
|
||||
RefPtr<SharedMapChangeEvent> event =
|
||||
SharedMapChangeEvent::Constructor(this, NS_LITERAL_STRING("change"), init);
|
||||
event->SetTrusted(true);
|
||||
|
||||
DispatchEvent(*event);
|
||||
}
|
||||
|
||||
|
||||
const nsTArray<SharedMap::Entry*>&
|
||||
SharedMap::EntryArray() const
|
||||
{
|
||||
if (mEntryArray.isNothing()) {
|
||||
MaybeRebuild();
|
||||
|
||||
mEntryArray.emplace(mEntries.Count());
|
||||
auto& array = mEntryArray.ref();
|
||||
for (auto& entry : IterHash(mEntries)) {
|
||||
array.AppendElement(entry);
|
||||
}
|
||||
}
|
||||
|
||||
return mEntryArray.ref();
|
||||
}
|
||||
|
||||
const nsString
|
||||
SharedMap::GetKeyAtIndex(uint32_t aIndex) const
|
||||
{
|
||||
return NS_ConvertUTF8toUTF16(EntryArray()[aIndex]->Name());
|
||||
}
|
||||
|
||||
JS::Value
|
||||
SharedMap::GetValueAtIndex(uint32_t aIndex) const
|
||||
{
|
||||
JSObject* wrapper = GetWrapper();
|
||||
MOZ_ASSERT(wrapper,
|
||||
"Should never see GetValueAtIndex on a SharedMap without a live "
|
||||
"wrapper");
|
||||
if (!wrapper) {
|
||||
return JS::NullValue();
|
||||
}
|
||||
|
||||
AutoJSContext cx;
|
||||
|
||||
JSAutoRealm ar(cx, wrapper);
|
||||
|
||||
JS::RootedValue val(cx);
|
||||
EntryArray()[aIndex]->Read(cx, &val, IgnoreErrors());
|
||||
|
||||
return val;
|
||||
}
|
||||
|
||||
void
|
||||
SharedMap::Entry::TakeData(StructuredCloneData&& aHolder)
|
||||
{
|
||||
mData = AsVariant(std::move(aHolder));
|
||||
|
||||
mSize = Holder().Data().Size();
|
||||
mBlobCount = Holder().BlobImpls().Length();
|
||||
}
|
||||
|
||||
void
|
||||
SharedMap::Entry::ExtractData(char* aDestPtr, uint32_t aNewOffset, uint16_t aNewBlobOffset)
|
||||
{
|
||||
if (mData.is<StructuredCloneData>()) {
|
||||
char* ptr = aDestPtr;
|
||||
Holder().Data().ForEachDataChunk([&](const char* aData, size_t aSize) {
|
||||
memcpy(ptr, aData, aSize);
|
||||
ptr += aSize;
|
||||
return true;
|
||||
});
|
||||
MOZ_ASSERT(uint32_t(ptr - aDestPtr) == mSize);
|
||||
} else {
|
||||
memcpy(aDestPtr, Data(), mSize);
|
||||
}
|
||||
|
||||
mData = AsVariant(aNewOffset);
|
||||
mBlobOffset = aNewBlobOffset;
|
||||
}
|
||||
|
||||
Result<Ok, nsresult>
|
||||
SharedMap::MaybeRebuild()
|
||||
{
|
||||
if (!mMapFile) {
|
||||
return Ok();
|
||||
}
|
||||
|
||||
// This function maps a shared memory region created by Serialize() and reads
|
||||
// its header block to build a new mEntries hashtable of its contents.
|
||||
//
|
||||
// The entries created by this function contain a pointer to this SharedMap
|
||||
// instance, and the offsets and sizes of their structured clone data within
|
||||
// its shared memory region. When needed, that structured clone data is
|
||||
// retrieved directly as indexes into the SharedMap's shared memory region.
|
||||
|
||||
MOZ_TRY(mMap.initWithHandle(*mMapFile, mMapSize));
|
||||
mMapFile.reset();
|
||||
|
||||
// We should be able to pass this range as an initializer list or an immediate
|
||||
// param, but gcc currently chokes on that if optimization is enabled, and
|
||||
// initializes everything to 0.
|
||||
Range<uint8_t> range(&mMap.get<uint8_t>()[0], mMap.size());
|
||||
InputBuffer buffer(range);
|
||||
|
||||
uint32_t count;
|
||||
buffer.codeUint32(count);
|
||||
|
||||
for (uint32_t i = 0; i < count; i++) {
|
||||
auto entry = MakeUnique<Entry>(*this);
|
||||
entry->Code(buffer);
|
||||
|
||||
// This buffer was created at runtime, during this session, so any errors
|
||||
// indicate memory corruption, and are fatal.
|
||||
MOZ_RELEASE_ASSERT(!buffer.error());
|
||||
|
||||
// Note: Order of evaluation of function arguments is not guaranteed, so we
|
||||
// can't use entry.release() in place of entry.get() without entry->Name()
|
||||
// sometimes resulting in a null dereference.
|
||||
mEntries.Put(entry->Name(), entry.get());
|
||||
Unused << entry.release();
|
||||
}
|
||||
|
||||
return Ok();
|
||||
}
|
||||
|
||||
void
|
||||
SharedMap::MaybeRebuild() const
|
||||
{
|
||||
Unused << const_cast<SharedMap*>(this)->MaybeRebuild();
|
||||
}
|
||||
|
||||
WritableSharedMap::WritableSharedMap()
|
||||
: SharedMap()
|
||||
{
|
||||
mWritable = true;
|
||||
// Serialize the initial empty contents of the map immediately so that we
|
||||
// always have a file descriptor to send to callers of CloneMapFile().
|
||||
Unused << Serialize();
|
||||
MOZ_RELEASE_ASSERT(mMap.initialized());
|
||||
}
|
||||
|
||||
SharedMap*
|
||||
WritableSharedMap::GetReadOnly()
|
||||
{
|
||||
if (!mReadOnly) {
|
||||
mReadOnly = new SharedMap(ProcessGlobal::Get(), CloneMapFile(),
|
||||
MapSize());
|
||||
}
|
||||
return mReadOnly;
|
||||
}
|
||||
|
||||
Result<Ok, nsresult>
|
||||
WritableSharedMap::Serialize()
|
||||
{
|
||||
// Serializes a new snapshot of the map, initializes a new read-only shared
|
||||
// memory region with its contents, and updates all entries to point to that
|
||||
// new snapshot.
|
||||
//
|
||||
// The layout of the snapshot is as follows:
|
||||
//
|
||||
// - A header containing a uint32 count field containing the number of
|
||||
// entries in the map, followed by that number of serialized entry headers,
|
||||
// as produced by Entry::Code.
|
||||
//
|
||||
// - A data block containing structured clone data for each of the entries'
|
||||
// values. This data is referenced by absolute byte offsets from the start
|
||||
// of the shared memory region, encoded in each of the entry header values.
|
||||
// Each entry's data is aligned to kStructuredCloneAlign, and therefore may
|
||||
// have alignment padding before it.
|
||||
//
|
||||
// This serialization format is decoded by the MaybeRebuild() method of
|
||||
// read-only SharedMap() instances, and used to populate their mEntries
|
||||
// hashtables.
|
||||
//
|
||||
// Writable instances never read the header blocks, but instead directly
|
||||
// update their Entry instances to point to the appropriate offsets in the
|
||||
// shared memory region created by this function.
|
||||
|
||||
uint32_t count = mEntries.Count();
|
||||
|
||||
size_t dataSize = 0;
|
||||
size_t headerSize = sizeof(count);
|
||||
size_t blobCount = 0;
|
||||
|
||||
for (auto& entry : IterHash(mEntries)) {
|
||||
headerSize += entry->HeaderSize();
|
||||
blobCount += entry->BlobCount();
|
||||
|
||||
dataSize += entry->Size();
|
||||
AlignTo(&dataSize, kStructuredCloneAlign);
|
||||
}
|
||||
|
||||
size_t offset = headerSize;
|
||||
AlignTo(&offset, kStructuredCloneAlign);
|
||||
|
||||
OutputBuffer header;
|
||||
header.codeUint32(count);
|
||||
|
||||
MemMapSnapshot mem;
|
||||
MOZ_TRY(mem.Init(offset + dataSize));
|
||||
|
||||
auto ptr = mem.Get<char>();
|
||||
|
||||
// We need to build the new array of blobs before we overwrite the existing
|
||||
// one, since previously-serialized entries will store their blob references
|
||||
// as indexes into our blobs array.
|
||||
nsTArray<RefPtr<BlobImpl>> blobImpls(blobCount);
|
||||
|
||||
for (auto& entry : IterHash(mEntries)) {
|
||||
AlignTo(&offset, kStructuredCloneAlign);
|
||||
|
||||
entry->ExtractData(&ptr[offset], offset, blobImpls.Length());
|
||||
entry->Code(header);
|
||||
|
||||
offset += entry->Size();
|
||||
|
||||
if (entry->BlobCount()) {
|
||||
mBlobImpls.AppendElements(entry->Blobs());
|
||||
}
|
||||
}
|
||||
|
||||
mBlobImpls = std::move(blobImpls);
|
||||
|
||||
// FIXME: We should create a separate OutputBuffer class which can encode to
|
||||
// a static memory region rather than dynamically allocating and then
|
||||
// copying.
|
||||
MOZ_ASSERT(header.cursor() == headerSize);
|
||||
memcpy(ptr.get(), header.Get(), header.cursor());
|
||||
|
||||
// We've already updated offsets at this point. We need this to succeed.
|
||||
mMap.reset();
|
||||
MOZ_RELEASE_ASSERT(mem.Finalize(mMap).isOk());
|
||||
|
||||
return Ok();
|
||||
}
|
||||
|
||||
void
|
||||
WritableSharedMap::BroadcastChanges()
|
||||
{
|
||||
if (mChangedKeys.IsEmpty()) {
|
||||
return;
|
||||
}
|
||||
|
||||
if (!Serialize().isOk()) {
|
||||
return;
|
||||
}
|
||||
|
||||
nsTArray<ContentParent*> parents;
|
||||
ContentParent::GetAll(parents);
|
||||
for (auto& parent : parents) {
|
||||
nsTArray<IPCBlob> blobs(mBlobImpls.Length());
|
||||
|
||||
for (auto& blobImpl : mBlobImpls) {
|
||||
nsresult rv = IPCBlobUtils::Serialize(blobImpl, parent,
|
||||
*blobs.AppendElement());
|
||||
if (NS_WARN_IF(NS_FAILED(rv))) {
|
||||
continue;
|
||||
}
|
||||
}
|
||||
|
||||
Unused << parent->SendUpdateSharedData(CloneMapFile(), mMap.size(),
|
||||
blobs, mChangedKeys);
|
||||
}
|
||||
|
||||
if (mReadOnly) {
|
||||
nsTArray<RefPtr<BlobImpl>> blobImpls(mBlobImpls);
|
||||
mReadOnly->Update(CloneMapFile(), mMap.size(),
|
||||
std::move(blobImpls),
|
||||
std::move(mChangedKeys));
|
||||
}
|
||||
|
||||
mChangedKeys.Clear();
|
||||
}
|
||||
|
||||
void
|
||||
WritableSharedMap::Delete(const nsACString& aName)
|
||||
{
|
||||
if (mEntries.Remove(aName)) {
|
||||
KeyChanged(aName);
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
WritableSharedMap::Set(JSContext* aCx,
|
||||
const nsACString& aName,
|
||||
JS::HandleValue aValue,
|
||||
ErrorResult& aRv)
|
||||
{
|
||||
StructuredCloneData holder;
|
||||
|
||||
holder.Write(aCx, aValue, aRv);
|
||||
if (aRv.Failed()) {
|
||||
return;
|
||||
}
|
||||
|
||||
if (!holder.InputStreams().IsEmpty()) {
|
||||
aRv.Throw(NS_ERROR_INVALID_ARG);
|
||||
return;
|
||||
}
|
||||
|
||||
Entry* entry = mEntries.LookupOrAdd(aName, *this, aName);
|
||||
entry->TakeData(std::move(holder));
|
||||
|
||||
KeyChanged(aName);
|
||||
}
|
||||
|
||||
void
|
||||
WritableSharedMap::Flush()
|
||||
{
|
||||
BroadcastChanges();
|
||||
}
|
||||
|
||||
void
|
||||
WritableSharedMap::IdleFlush()
|
||||
{
|
||||
mPendingFlush = false;
|
||||
Flush();
|
||||
}
|
||||
|
||||
nsresult
|
||||
WritableSharedMap::KeyChanged(const nsACString& aName)
|
||||
{
|
||||
if (!mChangedKeys.ContainsSorted(aName)) {
|
||||
mChangedKeys.InsertElementSorted(aName);
|
||||
}
|
||||
mEntryArray.reset();
|
||||
|
||||
if (!mPendingFlush) {
|
||||
MOZ_TRY(NS_IdleDispatchToCurrentThread(
|
||||
NewRunnableMethod("WritableSharedMap::IdleFlush",
|
||||
this,
|
||||
&WritableSharedMap::IdleFlush)));
|
||||
mPendingFlush = true;
|
||||
}
|
||||
return NS_OK;
|
||||
}
|
||||
|
||||
|
||||
JSObject*
|
||||
SharedMap::WrapObject(JSContext* aCx, JS::HandleObject aGivenProto)
|
||||
{
|
||||
return MozSharedMap_Binding::Wrap(aCx, this, aGivenProto);
|
||||
}
|
||||
|
||||
JSObject*
|
||||
WritableSharedMap::WrapObject(JSContext* aCx, JS::HandleObject aGivenProto)
|
||||
{
|
||||
return MozWritableSharedMap_Binding::Wrap(aCx, this, aGivenProto);
|
||||
}
|
||||
|
||||
/* static */ already_AddRefed<SharedMapChangeEvent>
|
||||
SharedMapChangeEvent::Constructor(EventTarget* aEventTarget,
|
||||
const nsAString& aType,
|
||||
const MozSharedMapChangeEventInit& aInit)
|
||||
{
|
||||
RefPtr<SharedMapChangeEvent> event = new SharedMapChangeEvent(aEventTarget);
|
||||
|
||||
bool trusted = event->Init(aEventTarget);
|
||||
event->InitEvent(aType, aInit.mBubbles, aInit.mCancelable);
|
||||
event->SetTrusted(trusted);
|
||||
event->SetComposed(aInit.mComposed);
|
||||
|
||||
event->mChangedKeys = aInit.mChangedKeys;
|
||||
|
||||
return event.forget();
|
||||
}
|
||||
|
||||
} // ipc
|
||||
} // dom
|
||||
} // mozilla
|
393
dom/ipc/SharedMap.h
Normal file
393
dom/ipc/SharedMap.h
Normal file
@ -0,0 +1,393 @@
|
||||
/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
|
||||
/* vim: set ts=8 sts=2 et sw=2 tw=80: */
|
||||
/* This Source Code Form is subject to the terms of the Mozilla Public
|
||||
* License, v. 2.0. If a copy of the MPL was not distributed with this
|
||||
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
|
||||
|
||||
#ifndef dom_ipc_SharedMap_h
|
||||
#define dom_ipc_SharedMap_h
|
||||
|
||||
#include "mozilla/dom/MozSharedMapBinding.h"
|
||||
|
||||
#include "mozilla/AutoMemMap.h"
|
||||
#include "mozilla/dom/ipc/StructuredCloneData.h"
|
||||
#include "mozilla/DOMEventTargetHelper.h"
|
||||
#include "mozilla/Maybe.h"
|
||||
#include "mozilla/UniquePtr.h"
|
||||
#include "mozilla/Variant.h"
|
||||
#include "nsClassHashtable.h"
|
||||
#include "nsTArray.h"
|
||||
|
||||
class nsIGlobalObject;
|
||||
|
||||
namespace mozilla {
|
||||
namespace dom {
|
||||
namespace ipc {
|
||||
|
||||
/**
|
||||
* Together, the SharedMap and WritableSharedMap classes allow sharing a
|
||||
* dynamically-updated, shared-memory key-value store across processes.
|
||||
*
|
||||
* The maps may only ever be updated in the parent process, via
|
||||
* WritableSharedMap instances. When that map changes, its entire contents are
|
||||
* serialized into a contiguous shared memory buffer, and broadcast to all child
|
||||
* processes, which in turn update their entire map contents wholesale.
|
||||
*
|
||||
* Keys are arbitrary UTF-8 strings (currently exposed to JavaScript as UTF-16),
|
||||
* and values are structured clone buffers. Values are eagerly encoded whenever
|
||||
* they are updated, and lazily decoded each time they're read.
|
||||
*
|
||||
* Updates are batched. Rather than each key change triggering an immediate
|
||||
* update, combined updates are broadcast after a delay. Changes are flushed
|
||||
* immediately any time a new process is created. Additionally, any time a key
|
||||
* is changed, a flush task is scheduled for the next time the event loop
|
||||
* becomes idle. Changes can be flushed immediately by calling the flush()
|
||||
* method.
|
||||
*
|
||||
*
|
||||
* Whenever a read-only SharedMap is updated, it dispatches a "change" event.
|
||||
* The event contains a "changedKeys" property with a list of all keys which
|
||||
* were changed in the last update batch. Change events are never dispatched to
|
||||
* WritableSharedMap instances.
|
||||
*/
|
||||
class SharedMap : public DOMEventTargetHelper
|
||||
{
|
||||
using FileDescriptor = mozilla::ipc::FileDescriptor;
|
||||
|
||||
public:
|
||||
|
||||
SharedMap();
|
||||
|
||||
SharedMap(nsIGlobalObject* aGlobal, const FileDescriptor&, size_t);
|
||||
|
||||
// Returns true if the map contains the given (UTF-8) key.
|
||||
bool Has(const nsACString& name);
|
||||
|
||||
// If the map contains the given (UTF-8) key, decodes and returns a new copy
|
||||
// of its value. Otherwise returns null.
|
||||
void Get(JSContext* cx, const nsACString& name, JS::MutableHandleValue aRetVal,
|
||||
ErrorResult& aRv);
|
||||
|
||||
|
||||
// Conversion helpers for WebIDL callers
|
||||
bool Has(const nsAString& aName)
|
||||
{
|
||||
return Has(NS_ConvertUTF16toUTF8(aName));
|
||||
}
|
||||
|
||||
void Get(JSContext* aCx, const nsAString& aName, JS::MutableHandleValue aRetVal,
|
||||
ErrorResult& aRv)
|
||||
{
|
||||
return Get(aCx, NS_ConvertUTF16toUTF8(aName), aRetVal, aRv);
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* WebIDL iterator glue.
|
||||
*/
|
||||
uint32_t GetIterableLength() const
|
||||
{
|
||||
return EntryArray().Length();
|
||||
}
|
||||
|
||||
/**
|
||||
* These functions return the key or value, respectively, at the given index.
|
||||
* The index *must* be less than the value returned by GetIterableLength(), or
|
||||
* the program will crash.
|
||||
*/
|
||||
const nsString GetKeyAtIndex(uint32_t aIndex) const;
|
||||
// Note: This function should only be called if the instance has a live,
|
||||
// cached wrapper. If it does not, this function will return null, and assert
|
||||
// in debug builds.
|
||||
// The returned value will always be in the same Realm as that wrapper.
|
||||
JS::Value GetValueAtIndex(uint32_t aIndex) const;
|
||||
|
||||
|
||||
/**
|
||||
* Returns a copy of the read-only file descriptor which backs the shared
|
||||
* memory region for this map. The file descriptor may be passed between
|
||||
* processes, and used to update corresponding instances in child processes.
|
||||
*/
|
||||
FileDescriptor CloneMapFile();
|
||||
|
||||
/**
|
||||
* Returns the size of the memory mapped region that backs this map. Must be
|
||||
* passed to the SharedMap() constructor or Update() method along with the
|
||||
* descriptor returned by CloneMapFile() in order to initialize or update a
|
||||
* child SharedMap.
|
||||
*/
|
||||
size_t MapSize() const { return mMap.size(); }
|
||||
|
||||
/**
|
||||
* Updates this instance to reflect the contents of the shared memory region
|
||||
* in the given map file, and broadcasts a change event for the given set of
|
||||
* changed (UTF-8-encoded) keys.
|
||||
*/
|
||||
void Update(const FileDescriptor& aMapFile, size_t aMapSize,
|
||||
nsTArray<RefPtr<BlobImpl>>&& aBlobs,
|
||||
nsTArray<nsCString>&& aChangedKeys);
|
||||
|
||||
|
||||
JSObject* WrapObject(JSContext* aCx, JS::HandleObject aGivenProto) override;
|
||||
|
||||
protected:
|
||||
~SharedMap() override = default;
|
||||
|
||||
class Entry
|
||||
{
|
||||
public:
|
||||
Entry(Entry&&) = delete;
|
||||
|
||||
explicit Entry(SharedMap& aMap, const nsACString& aName = EmptyCString())
|
||||
: mMap(aMap)
|
||||
, mName(aName)
|
||||
, mData(AsVariant(uint32_t(0)))
|
||||
{
|
||||
}
|
||||
|
||||
~Entry() = default;
|
||||
|
||||
/**
|
||||
* Encodes or decodes this entry into or from the given OutputBuffer or
|
||||
* InputBuffer.
|
||||
*/
|
||||
template<typename Buffer>
|
||||
void Code(Buffer& buffer)
|
||||
{
|
||||
DebugOnly<size_t> startOffset = buffer.cursor();
|
||||
|
||||
buffer.codeString(mName);
|
||||
buffer.codeUint32(DataOffset());
|
||||
buffer.codeUint32(mSize);
|
||||
buffer.codeUint16(mBlobOffset);
|
||||
buffer.codeUint16(mBlobCount);
|
||||
|
||||
MOZ_ASSERT(buffer.cursor() == startOffset + HeaderSize());
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the size that this entry will take up in the map header. This
|
||||
* must be equal to the number of bytes encoded by Code().
|
||||
*/
|
||||
size_t HeaderSize() const
|
||||
{
|
||||
return (sizeof(uint16_t) + mName.Length() +
|
||||
sizeof(DataOffset()) +
|
||||
sizeof(mSize) +
|
||||
sizeof(mBlobOffset) +
|
||||
sizeof(mBlobCount));
|
||||
}
|
||||
|
||||
/**
|
||||
* Updates the value of this entry to the given structured clone data, of
|
||||
* which it takes ownership. The passed StructuredCloneData object must not
|
||||
* be used after this call.
|
||||
*/
|
||||
void TakeData(StructuredCloneData&&);
|
||||
|
||||
/**
|
||||
* This is called while building a new snapshot of the SharedMap. aDestPtr
|
||||
* must point to a buffer within the new snapshot with Size() bytes reserved
|
||||
* for it, and `aNewOffset` must be the offset of that buffer from the start
|
||||
* of the snapshot's memory region.
|
||||
*
|
||||
* This function copies the raw structured clone data for the entry's value
|
||||
* to the new buffer, and updates its internal state for use with the new
|
||||
* data. Its offset is updated to aNewOffset, and any StructuredCloneData
|
||||
* object it holds is destroyed.
|
||||
*
|
||||
* After this call, the entry is only valid in reference to the new
|
||||
* snapshot, and must not be accessed again until the SharedMap mMap has been
|
||||
* updated to point to it.
|
||||
*/
|
||||
void ExtractData(char* aDestPtr, uint32_t aNewOffset, uint16_t aNewBlobOffset);
|
||||
|
||||
// Returns the UTF-8-encoded name of the entry, which is used as its key in
|
||||
// the map.
|
||||
const nsCString& Name() const { return mName; }
|
||||
|
||||
// Decodes the entry's value into the current Realm of the given JS context
|
||||
// and puts the result in aRetVal on success.
|
||||
void Read(JSContext* aCx, JS::MutableHandleValue aRetVal,
|
||||
ErrorResult& aRv);
|
||||
|
||||
// Returns the byte size of the entry's raw structured clone data.
|
||||
uint32_t Size() const { return mSize; }
|
||||
|
||||
private:
|
||||
// Returns a pointer to the entry value's structured clone data within the
|
||||
// SharedMap's mapped memory region. This is *only* valid shen mData
|
||||
// contains a uint32_t.
|
||||
const char* Data() const
|
||||
{
|
||||
return mMap.Data() + DataOffset();
|
||||
}
|
||||
|
||||
// Returns the offset of the entry value's structured clone data within the
|
||||
// SharedMap's mapped memory region. This is *only* valid shen mData
|
||||
// contains a uint32_t.
|
||||
uint32_t& DataOffset()
|
||||
{
|
||||
return mData.as<uint32_t>();
|
||||
}
|
||||
const uint32_t& DataOffset() const
|
||||
{
|
||||
return mData.as<uint32_t>();
|
||||
}
|
||||
|
||||
public:
|
||||
uint16_t BlobOffset() const { return mBlobOffset; }
|
||||
uint16_t BlobCount() const { return mBlobCount; }
|
||||
|
||||
Span<const RefPtr<BlobImpl>> Blobs()
|
||||
{
|
||||
if (mData.is<StructuredCloneData>()) {
|
||||
return mData.as<StructuredCloneData>().BlobImpls();
|
||||
}
|
||||
return {&mMap.mBlobImpls[mBlobOffset], BlobCount()};
|
||||
}
|
||||
|
||||
private:
|
||||
// Returns the temporary StructuredCloneData object containing the entry's
|
||||
// value. This is *only* value when mData contains a StructuredCloneDAta
|
||||
// object.
|
||||
const StructuredCloneData& Holder() const
|
||||
{
|
||||
return mData.as<StructuredCloneData>();
|
||||
}
|
||||
|
||||
SharedMap& mMap;
|
||||
|
||||
// The entry's (UTF-8 encoded) name, which serves as its key in the map.
|
||||
nsCString mName;
|
||||
|
||||
/**
|
||||
* This member provides a reference to the entry's structured clone data.
|
||||
* Its type varies depending on the state of the entry:
|
||||
*
|
||||
* - For entries which have been snapshotted into a shared memory region,
|
||||
* this is a uint32_t offset into the parent SharedMap's Data() buffer.
|
||||
*
|
||||
* - For entries which have been changed in a WritableSharedMap instance,
|
||||
* but not serialized to a shared memory snapshot yet, this is a
|
||||
* StructuredCloneData instance, containing a process-local copy of the
|
||||
* data. This will be discarded the next time the map is serialized, and
|
||||
* replaced with a buffer offset, as described above.
|
||||
*/
|
||||
Variant<uint32_t, StructuredCloneData> mData;
|
||||
|
||||
// The size, in bytes, of the entry's structured clone data.
|
||||
uint32_t mSize = 0;
|
||||
|
||||
uint16_t mBlobOffset = 0;
|
||||
uint16_t mBlobCount = 0;
|
||||
};
|
||||
|
||||
const nsTArray<Entry*>& EntryArray() const;
|
||||
|
||||
nsTArray<RefPtr<BlobImpl>> mBlobImpls;
|
||||
|
||||
// Rebuilds the entry hashtable mEntries from the values serialized in the
|
||||
// current snapshot, if necessary. The hashtable is rebuilt lazily after
|
||||
// construction and after every Update() call, so this function must be called
|
||||
// before any attempt to access mEntries.
|
||||
Result<Ok, nsresult> MaybeRebuild();
|
||||
void MaybeRebuild() const;
|
||||
|
||||
// Note: This header is included by WebIDL binding headers, and therefore
|
||||
// can't include "windows.h". Since FileDescriptor.h does include "windows.h"
|
||||
// on Windows, we can only forward declare FileDescriptor, and can't include
|
||||
// it as an inline member.
|
||||
UniquePtr<FileDescriptor> mMapFile;
|
||||
// The size of the memory-mapped region backed by mMapFile, in bytes.
|
||||
size_t mMapSize = 0;
|
||||
|
||||
mutable nsClassHashtable<nsCStringHashKey, Entry> mEntries;
|
||||
mutable Maybe<nsTArray<Entry*>> mEntryArray;
|
||||
|
||||
// Manages the memory mapping of the current snapshot. This is initialized
|
||||
// lazily after each SharedMap construction or updated, based on the values in
|
||||
// mMapFile and mMapSize.
|
||||
loader::AutoMemMap mMap;
|
||||
|
||||
bool mWritable = false;
|
||||
|
||||
// Returns a pointer to the beginning of the memory mapped snapshot. Entry
|
||||
// offsets are relative to this pointer, and Entry objects access their
|
||||
// structured clone data by indexing this pointer.
|
||||
char* Data() { return mMap.get<char>().get(); }
|
||||
};
|
||||
|
||||
class WritableSharedMap final : public SharedMap
|
||||
{
|
||||
public:
|
||||
|
||||
WritableSharedMap();
|
||||
|
||||
// Sets the value of the given (UTF-8 encoded) key to a structured clone
|
||||
// snapshot of the given value.
|
||||
void Set(JSContext* cx, const nsACString& name, JS::HandleValue value, ErrorResult& aRv);
|
||||
|
||||
// Deletes the given (UTF-8 encoded) key from the map.
|
||||
void Delete(const nsACString& name);
|
||||
|
||||
|
||||
// Conversion helpers for WebIDL callers
|
||||
void Set(JSContext* aCx, const nsAString& aName, JS::HandleValue aValue, ErrorResult& aRv)
|
||||
{
|
||||
return Set(aCx, NS_ConvertUTF16toUTF8(aName), aValue, aRv);
|
||||
}
|
||||
|
||||
void Delete(const nsAString& aName)
|
||||
{
|
||||
return Delete(NS_ConvertUTF16toUTF8(aName));
|
||||
}
|
||||
|
||||
|
||||
// Flushes any queued changes to a new snapshot, and broadcasts it to all
|
||||
// child SharedMap instances.
|
||||
void Flush();
|
||||
|
||||
|
||||
/**
|
||||
* Returns the read-only SharedMap instance corresponding to this
|
||||
* WritableSharedMap for use in the parent process.
|
||||
*/
|
||||
SharedMap* GetReadOnly();
|
||||
|
||||
|
||||
JSObject* WrapObject(JSContext* aCx, JS::HandleObject aGivenProto) override;
|
||||
|
||||
protected:
|
||||
~WritableSharedMap() override = default;
|
||||
|
||||
private:
|
||||
// The set of (UTF-8 encoded) keys which have changed, or been deleted, since
|
||||
// the last snapshot.
|
||||
nsTArray<nsCString> mChangedKeys;
|
||||
|
||||
RefPtr<SharedMap> mReadOnly;
|
||||
|
||||
bool mPendingFlush = false;
|
||||
|
||||
// Creates a new snapshot of the map, and updates all Entry instance to
|
||||
// reference its data.
|
||||
Result<Ok, nsresult> Serialize();
|
||||
|
||||
void IdleFlush();
|
||||
|
||||
// If there have been any changes since the last snapshot, creates a new
|
||||
// serialization and broadcasts it to all child SharedMap instances.
|
||||
void BroadcastChanges();
|
||||
|
||||
// Marks the given (UTF-8 encoded) key as having changed. This adds it to
|
||||
// mChangedKeys, if not already present, and schedules a flush for the next
|
||||
// time the event loop is idle.
|
||||
nsresult KeyChanged(const nsACString& aName);
|
||||
};
|
||||
|
||||
} // ipc
|
||||
} // dom
|
||||
} // mozilla
|
||||
|
||||
#endif // dom_ipc_SharedMap_h
|
54
dom/ipc/SharedMapChangeEvent.h
Normal file
54
dom/ipc/SharedMapChangeEvent.h
Normal file
@ -0,0 +1,54 @@
|
||||
/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
|
||||
/* vim: set ts=8 sts=4 et sw=4 tw=99: */
|
||||
/* This Source Code Form is subject to the terms of the Mozilla Public
|
||||
* License, v. 2.0. If a copy of the MPL was not distributed with this
|
||||
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
|
||||
|
||||
#ifndef dom_ipc_SharedMapChangeEvent_h
|
||||
#define dom_ipc_SharedMapChangeEvent_h
|
||||
|
||||
#include "mozilla/dom/MozSharedMapBinding.h"
|
||||
|
||||
#include "mozilla/dom/Event.h"
|
||||
#include "nsTArray.h"
|
||||
|
||||
namespace mozilla {
|
||||
namespace dom {
|
||||
namespace ipc {
|
||||
|
||||
class SharedMapChangeEvent final : public Event
|
||||
{
|
||||
public:
|
||||
NS_INLINE_DECL_REFCOUNTING_INHERITED(SharedMapChangeEvent, Event)
|
||||
|
||||
JSObject* WrapObjectInternal(JSContext* aCx, JS::Handle<JSObject*> aGivenProto) override
|
||||
{
|
||||
return MozSharedMapChangeEvent_Binding::Wrap(aCx, this, aGivenProto);
|
||||
}
|
||||
|
||||
static already_AddRefed<SharedMapChangeEvent>
|
||||
Constructor(EventTarget* aEventTarget,
|
||||
const nsAString& aType,
|
||||
const MozSharedMapChangeEventInit& aInit);
|
||||
|
||||
void GetChangedKeys(nsTArray<nsString>& aChangedKeys) const
|
||||
{
|
||||
aChangedKeys.AppendElements(mChangedKeys);
|
||||
}
|
||||
|
||||
protected:
|
||||
~SharedMapChangeEvent() override = default;
|
||||
|
||||
private:
|
||||
explicit SharedMapChangeEvent(EventTarget* aEventTarget)
|
||||
: Event(aEventTarget, nullptr, nullptr)
|
||||
{}
|
||||
|
||||
nsTArray<nsString> mChangedKeys;
|
||||
};
|
||||
|
||||
} // ipc
|
||||
} // dom
|
||||
} // mozilla
|
||||
|
||||
#endif // dom_ipc_SharedMapChangeEvent_h
|
@ -29,6 +29,11 @@ namespace mozilla {
|
||||
namespace dom {
|
||||
namespace ipc {
|
||||
|
||||
using mozilla::ipc::AutoIPCStream;
|
||||
using mozilla::ipc::IPCStream;
|
||||
using mozilla::ipc::PBackgroundChild;
|
||||
using mozilla::ipc::PBackgroundParent;
|
||||
|
||||
StructuredCloneData::StructuredCloneData()
|
||||
: StructuredCloneData(StructuredCloneHolder::TransferringSupported)
|
||||
{}
|
||||
|
@ -15,6 +15,8 @@ XPIDL_MODULE = 'dom'
|
||||
|
||||
EXPORTS.mozilla.dom.ipc += [
|
||||
'IdType.h',
|
||||
'SharedMap.h',
|
||||
'SharedMapChangeEvent.h',
|
||||
'SharedStringMap.h',
|
||||
'StructuredCloneData.h',
|
||||
]
|
||||
@ -69,6 +71,7 @@ UNIFIED_SOURCES += [
|
||||
'PermissionMessageUtils.cpp',
|
||||
'PreallocatedProcessManager.cpp',
|
||||
'ProcessPriorityManager.cpp',
|
||||
'SharedMap.cpp',
|
||||
'SharedStringMap.cpp',
|
||||
'StructuredCloneData.cpp',
|
||||
'TabChild.cpp',
|
||||
@ -172,6 +175,7 @@ JAR_MANIFESTS += ['jar.mn']
|
||||
BROWSER_CHROME_MANIFESTS += ['tests/browser.ini']
|
||||
MOCHITEST_CHROME_MANIFESTS += ['tests/chrome.ini']
|
||||
MOCHITEST_MANIFESTS += ['tests/mochitest.ini']
|
||||
XPCSHELL_TESTS_MANIFESTS += ['tests/xpcshell.ini']
|
||||
|
||||
CXXFLAGS += CONFIG['TK_CFLAGS']
|
||||
|
||||
|
162
dom/ipc/tests/test_sharedMap.js
Normal file
162
dom/ipc/tests/test_sharedMap.js
Normal file
@ -0,0 +1,162 @@
|
||||
"use strict";
|
||||
|
||||
ChromeUtils.import("resource://gre/modules/AppConstants.jsm");
|
||||
ChromeUtils.import("resource://gre/modules/Services.jsm");
|
||||
ChromeUtils.import("resource://testing-common/ExtensionXPCShellUtils.jsm");
|
||||
|
||||
const remote = AppConstants.platform !== "android";
|
||||
|
||||
ExtensionTestUtils.init(this);
|
||||
|
||||
let contentPage;
|
||||
|
||||
function getContents(sharedMap = Services.cpmm.sharedData) {
|
||||
return {
|
||||
keys: Array.from(sharedMap.keys()),
|
||||
values: Array.from(sharedMap.values()),
|
||||
entries: Array.from(sharedMap.entries()),
|
||||
getValues: Array.from(sharedMap.keys(),
|
||||
key => sharedMap.get(key)),
|
||||
};
|
||||
}
|
||||
|
||||
function checkMap(contents, expected) {
|
||||
expected = Array.from(expected);
|
||||
|
||||
equal(contents.keys.length, expected.length,
|
||||
"Got correct number of keys");
|
||||
equal(contents.values.length, expected.length,
|
||||
"Got correct number of values");
|
||||
equal(contents.entries.length, expected.length,
|
||||
"Got correct number of entries");
|
||||
|
||||
for (let [i, [key, val]] of contents.entries.entries()) {
|
||||
equal(key, contents.keys[i], `keys()[${i}] matches entries()[${i}]`);
|
||||
deepEqual(val, contents.values[i], `values()[${i}] matches entries()[${i}]`);
|
||||
}
|
||||
|
||||
expected.sort(([a], [b]) => a.localeCompare(b));
|
||||
contents.entries.sort(([a], [b]) => a.localeCompare(b));
|
||||
|
||||
for (let [i, [key, val]] of contents.entries.entries()) {
|
||||
equal(key, expected[i][0], `expected[${i}].key matches entries()[${i}].key`);
|
||||
deepEqual(val, expected[i][1], `expected[${i}].value matches entries()[${i}].value`);
|
||||
}
|
||||
}
|
||||
|
||||
function checkParentMap(expected) {
|
||||
info("Checking parent map");
|
||||
checkMap(getContents(Services.ppmm.sharedData), expected);
|
||||
}
|
||||
|
||||
async function checkContentMaps(expected, parentOnly = false) {
|
||||
info("Checking in-process content map");
|
||||
checkMap(getContents(Services.cpmm.sharedData), expected);
|
||||
|
||||
if (!parentOnly) {
|
||||
info("Checking out-of-process content map");
|
||||
let contents = await contentPage.spawn(undefined, getContents);
|
||||
checkMap(contents, expected);
|
||||
}
|
||||
}
|
||||
|
||||
add_task(async function setup() {
|
||||
contentPage = await ExtensionTestUtils.loadContentPage("about:blank", {remote});
|
||||
registerCleanupFunction(() => contentPage.close());
|
||||
});
|
||||
|
||||
add_task(async function test_sharedMap() {
|
||||
let {sharedData} = Services.ppmm;
|
||||
|
||||
info("Check that parent and child maps are both initially empty");
|
||||
|
||||
checkParentMap([]);
|
||||
await checkContentMaps([]);
|
||||
|
||||
let expected = [
|
||||
["foo-a", {"foo": "a"}],
|
||||
["foo-b", {"foo": "b"}],
|
||||
["bar-c", null],
|
||||
["bar-d", 42],
|
||||
];
|
||||
|
||||
function setKey(key, val) {
|
||||
sharedData.set(key, val);
|
||||
expected = expected.filter(([k]) => k != key);
|
||||
expected.push([key, val]);
|
||||
}
|
||||
function deleteKey(key) {
|
||||
sharedData.delete(key);
|
||||
expected = expected.filter(([k]) => k != key);
|
||||
}
|
||||
|
||||
for (let [key, val] of expected) {
|
||||
sharedData.set(key, val);
|
||||
}
|
||||
|
||||
info("Add some entries, test that they are initially only available in the parent");
|
||||
|
||||
checkParentMap(expected);
|
||||
await checkContentMaps([]);
|
||||
|
||||
info("Flush. Check that changes are visible in both parent and children");
|
||||
|
||||
sharedData.flush();
|
||||
|
||||
checkParentMap(expected);
|
||||
await checkContentMaps(expected);
|
||||
|
||||
info("Add another entry. Check that it is initially only available in the parent");
|
||||
|
||||
let oldExpected = Array.from(expected);
|
||||
|
||||
setKey("baz-a", {meh: "meh"});
|
||||
|
||||
// When we do several checks in a row, we can't check the values in
|
||||
// the content process, since the async checks may allow the idle
|
||||
// flush task to run, and update it before we're ready.
|
||||
|
||||
checkParentMap(expected);
|
||||
checkContentMaps(oldExpected, true);
|
||||
|
||||
info("Add another entry. Check that both new entries are only available in the parent");
|
||||
|
||||
setKey("baz-a", {meh: 12});
|
||||
|
||||
checkParentMap(expected);
|
||||
checkContentMaps(oldExpected, true);
|
||||
|
||||
info("Delete an entry. Check that all changes are only visible in the parent");
|
||||
|
||||
deleteKey("foo-b");
|
||||
|
||||
checkParentMap(expected);
|
||||
checkContentMaps(oldExpected, true);
|
||||
|
||||
info("Flush. Check that all entries are available in both parent and children");
|
||||
|
||||
sharedData.flush();
|
||||
|
||||
checkParentMap(expected);
|
||||
await checkContentMaps(expected);
|
||||
|
||||
|
||||
info("Test that entries are automatically flushed on idle:");
|
||||
|
||||
info("Add a new entry. Check that it is initially only available in the parent");
|
||||
|
||||
// Test the idle flush task.
|
||||
oldExpected = Array.from(expected);
|
||||
|
||||
setKey("thing", "stuff");
|
||||
|
||||
checkParentMap(expected);
|
||||
checkContentMaps(oldExpected, true);
|
||||
|
||||
info("Wait for an idle timeout. Check that changes are now visible in all children");
|
||||
|
||||
await new Promise(resolve => ChromeUtils.idleDispatch(resolve));
|
||||
|
||||
checkParentMap(expected);
|
||||
await checkContentMaps(expected);
|
||||
});
|
2
dom/ipc/tests/xpcshell.ini
Normal file
2
dom/ipc/tests/xpcshell.ini
Normal file
@ -0,0 +1,2 @@
|
||||
|
||||
[test_sharedMap.js]
|
@ -171,7 +171,7 @@ ConnectionWorker::Create(WorkerPrivate* aWorkerPrivate, ErrorResult& aRv)
|
||||
RefPtr<InitializeRunnable> runnable =
|
||||
new InitializeRunnable(aWorkerPrivate, c->mProxy, networkInfo);
|
||||
|
||||
runnable->Dispatch(Terminating, aRv);
|
||||
runnable->Dispatch(Canceling, aRv);
|
||||
if (NS_WARN_IF(aRv.Failed())) {
|
||||
return nullptr;
|
||||
}
|
||||
|
@ -1833,7 +1833,7 @@ Notification::GetPermission(nsIGlobalObject* aGlobal, ErrorResult& aRv)
|
||||
MOZ_ASSERT(worker);
|
||||
RefPtr<GetPermissionRunnable> r =
|
||||
new GetPermissionRunnable(worker);
|
||||
r->Dispatch(Terminating, aRv);
|
||||
r->Dispatch(Canceling, aRv);
|
||||
if (aRv.Failed()) {
|
||||
return NotificationPermission::Denied;
|
||||
}
|
||||
@ -2589,7 +2589,7 @@ Notification::ShowPersistentNotification(JSContext* aCx,
|
||||
worker->AssertIsOnWorkerThread();
|
||||
RefPtr<CheckLoadRunnable> loadChecker =
|
||||
new CheckLoadRunnable(worker, NS_ConvertUTF16toUTF8(aScope));
|
||||
loadChecker->Dispatch(Terminating, aRv);
|
||||
loadChecker->Dispatch(Canceling, aRv);
|
||||
if (aRv.Failed()) {
|
||||
return nullptr;
|
||||
}
|
||||
|
@ -358,7 +358,7 @@ ExecuteOpOnMainOrWorkerThread(nsIGlobalObject* aGlobal,
|
||||
RefPtr<EstimateWorkerMainThreadRunnable> runnnable =
|
||||
new EstimateWorkerMainThreadRunnable(promiseProxy->GetWorkerPrivate(),
|
||||
promiseProxy);
|
||||
runnnable->Dispatch(Terminating, aRv);
|
||||
runnnable->Dispatch(Canceling, aRv);
|
||||
|
||||
break;
|
||||
}
|
||||
@ -367,7 +367,7 @@ ExecuteOpOnMainOrWorkerThread(nsIGlobalObject* aGlobal,
|
||||
RefPtr<PersistedWorkerMainThreadRunnable> runnnable =
|
||||
new PersistedWorkerMainThreadRunnable(promiseProxy->GetWorkerPrivate(),
|
||||
promiseProxy);
|
||||
runnnable->Dispatch(Terminating, aRv);
|
||||
runnnable->Dispatch(Canceling, aRv);
|
||||
|
||||
break;
|
||||
}
|
||||
|
@ -198,7 +198,7 @@ RemoteServiceWorkerContainerImpl::RemoteServiceWorkerContainerImpl()
|
||||
MOZ_DIAGNOSTIC_ASSERT(workerPrivate);
|
||||
|
||||
workerHolderToken =
|
||||
WorkerHolderToken::Create(workerPrivate, Terminating,
|
||||
WorkerHolderToken::Create(workerPrivate, Canceling,
|
||||
WorkerHolderToken::AllowIdleShutdownStart);
|
||||
|
||||
if (NS_WARN_IF(!workerHolderToken)) {
|
||||
|
@ -102,7 +102,7 @@ RemoteServiceWorkerImpl::RemoteServiceWorkerImpl(const ServiceWorkerDescriptor&
|
||||
MOZ_DIAGNOSTIC_ASSERT(workerPrivate);
|
||||
|
||||
workerHolderToken =
|
||||
WorkerHolderToken::Create(workerPrivate, Terminating,
|
||||
WorkerHolderToken::Create(workerPrivate, Canceling,
|
||||
WorkerHolderToken::AllowIdleShutdownStart);
|
||||
|
||||
if (NS_WARN_IF(!workerHolderToken)) {
|
||||
|
@ -123,7 +123,7 @@ RemoteServiceWorkerRegistrationImpl::RemoteServiceWorkerRegistrationImpl(const S
|
||||
MOZ_DIAGNOSTIC_ASSERT(workerPrivate);
|
||||
|
||||
workerHolderToken =
|
||||
WorkerHolderToken::Create(workerPrivate, Terminating,
|
||||
WorkerHolderToken::Create(workerPrivate, Canceling,
|
||||
WorkerHolderToken::AllowIdleShutdownStart);
|
||||
|
||||
if (NS_WARN_IF(!workerHolderToken)) {
|
||||
|
@ -284,7 +284,7 @@ public:
|
||||
MOZ_ASSERT(mWorkerPrivate);
|
||||
mWorkerPrivate->AssertIsOnWorkerThread();
|
||||
MOZ_ASSERT(!mWorkerHolderAdded);
|
||||
mWorkerHolderAdded = HoldWorker(mWorkerPrivate, Terminating);
|
||||
mWorkerHolderAdded = HoldWorker(mWorkerPrivate, Canceling);
|
||||
return mWorkerHolderAdded;
|
||||
}
|
||||
|
||||
@ -323,7 +323,7 @@ public:
|
||||
{
|
||||
MOZ_ASSERT(mWorkerPrivate);
|
||||
mWorkerPrivate->AssertIsOnWorkerThread();
|
||||
if (aStatus < Terminating) {
|
||||
if (aStatus < Canceling) {
|
||||
return true;
|
||||
}
|
||||
|
||||
@ -719,7 +719,7 @@ public:
|
||||
// case the registration/update promise will be rejected
|
||||
// 2. A new service worker is registered which will terminate the current
|
||||
// installing worker.
|
||||
if (NS_WARN_IF(!HoldWorker(mWorkerPrivate, Terminating))) {
|
||||
if (NS_WARN_IF(!HoldWorker(mWorkerPrivate, Canceling))) {
|
||||
NS_WARNING("LifeCycleEventWatcher failed to add feature.");
|
||||
ReportResult(false);
|
||||
return false;
|
||||
@ -731,7 +731,7 @@ public:
|
||||
bool
|
||||
Notify(WorkerStatus aStatus) override
|
||||
{
|
||||
if (aStatus < Terminating) {
|
||||
if (aStatus < Canceling) {
|
||||
return true;
|
||||
}
|
||||
|
||||
@ -1974,7 +1974,7 @@ ServiceWorkerPrivate::TerminateWorker()
|
||||
}
|
||||
}
|
||||
|
||||
Unused << NS_WARN_IF(!mWorkerPrivate->Terminate());
|
||||
Unused << NS_WARN_IF(!mWorkerPrivate->Cancel());
|
||||
mWorkerPrivate = nullptr;
|
||||
mSupportsArray.Clear();
|
||||
|
||||
|
@ -271,7 +271,7 @@ public:
|
||||
void
|
||||
Dispatch(ErrorResult& aRv)
|
||||
{
|
||||
WorkerMainThreadRunnable::Dispatch(Terminating, aRv);
|
||||
WorkerMainThreadRunnable::Dispatch(Canceling, aRv);
|
||||
}
|
||||
|
||||
private:
|
||||
@ -325,7 +325,7 @@ public:
|
||||
void
|
||||
Dispatch(ErrorResult& aRv)
|
||||
{
|
||||
WorkerMainThreadRunnable::Dispatch(Terminating, aRv);
|
||||
WorkerMainThreadRunnable::Dispatch(Canceling, aRv);
|
||||
}
|
||||
|
||||
nsIURI*
|
||||
@ -381,7 +381,7 @@ URLWorker::CreateObjectURL(const GlobalObject& aGlobal, Blob& aBlob,
|
||||
RefPtr<CreateURLRunnable> runnable =
|
||||
new CreateURLRunnable(workerPrivate, blobImpl, aResult);
|
||||
|
||||
runnable->Dispatch(Terminating, aRv);
|
||||
runnable->Dispatch(Canceling, aRv);
|
||||
if (NS_WARN_IF(aRv.Failed())) {
|
||||
return;
|
||||
}
|
||||
@ -404,7 +404,7 @@ URLWorker::RevokeObjectURL(const GlobalObject& aGlobal, const nsAString& aUrl,
|
||||
RefPtr<RevokeURLRunnable> runnable =
|
||||
new RevokeURLRunnable(workerPrivate, aUrl);
|
||||
|
||||
runnable->Dispatch(Terminating, aRv);
|
||||
runnable->Dispatch(Canceling, aRv);
|
||||
if (NS_WARN_IF(aRv.Failed())) {
|
||||
return;
|
||||
}
|
||||
@ -427,7 +427,7 @@ URLWorker::IsValidURL(const GlobalObject& aGlobal, const nsAString& aUrl,
|
||||
RefPtr<IsValidURLRunnable> runnable =
|
||||
new IsValidURLRunnable(workerPrivate, aUrl);
|
||||
|
||||
runnable->Dispatch(Terminating, aRv);
|
||||
runnable->Dispatch(Canceling, aRv);
|
||||
if (NS_WARN_IF(aRv.Failed())) {
|
||||
return false;
|
||||
}
|
||||
@ -462,7 +462,7 @@ URLWorker::Init(const nsAString& aURL, const Optional<nsAString>& aBase,
|
||||
// create url proxy
|
||||
RefPtr<ConstructorRunnable> runnable =
|
||||
new ConstructorRunnable(mWorkerPrivate, aURL, aBase);
|
||||
runnable->Dispatch(Terminating, aRv);
|
||||
runnable->Dispatch(Canceling, aRv);
|
||||
if (NS_WARN_IF(aRv.Failed())) {
|
||||
return;
|
||||
}
|
||||
@ -489,7 +489,7 @@ URLWorker::SetHref(const nsAString& aHref, ErrorResult& aRv)
|
||||
|
||||
RefPtr<ConstructorRunnable> runnable =
|
||||
new ConstructorRunnable(mWorkerPrivate, aHref, Optional<nsAString>());
|
||||
runnable->Dispatch(Terminating, aRv);
|
||||
runnable->Dispatch(Canceling, aRv);
|
||||
if (NS_WARN_IF(aRv.Failed())) {
|
||||
return;
|
||||
}
|
||||
|
@ -1398,7 +1398,7 @@ WebSocket::ConstructorCommon(const GlobalObject& aGlobal,
|
||||
new InitRunnable(workerPrivate, webSocketImpl, !!aTransportProvider, aUrl,
|
||||
protocolArray, nsDependentCString(file.get()), lineno,
|
||||
column);
|
||||
runnable->Dispatch(Terminating, aRv);
|
||||
runnable->Dispatch(Canceling, aRv);
|
||||
if (NS_WARN_IF(aRv.Failed())) {
|
||||
return nullptr;
|
||||
}
|
||||
@ -1501,7 +1501,7 @@ WebSocket::ConstructorCommon(const GlobalObject& aGlobal,
|
||||
"not yet implemented");
|
||||
RefPtr<AsyncOpenRunnable> runnable =
|
||||
new AsyncOpenRunnable(webSocket->mImpl);
|
||||
runnable->Dispatch(Terminating, aRv);
|
||||
runnable->Dispatch(Canceling, aRv);
|
||||
if (NS_WARN_IF(aRv.Failed())) {
|
||||
return nullptr;
|
||||
}
|
||||
|
@ -2214,7 +2214,7 @@ LoadAllScripts(WorkerPrivate* aWorkerPrivate,
|
||||
aWorkerPrivate->AssertIsOnWorkerThread();
|
||||
NS_ASSERTION(!aLoadInfos.IsEmpty(), "Bad arguments!");
|
||||
|
||||
AutoSyncLoopHolder syncLoop(aWorkerPrivate, Terminating);
|
||||
AutoSyncLoopHolder syncLoop(aWorkerPrivate, Canceling);
|
||||
nsCOMPtr<nsIEventTarget> syncLoopTarget = syncLoop.GetEventTarget();
|
||||
if (!syncLoopTarget) {
|
||||
aRv.Throw(NS_ERROR_DOM_INVALID_STATE_ERR);
|
||||
@ -2298,7 +2298,7 @@ ChannelFromScriptURLWorkerThread(JSContext* aCx,
|
||||
new ChannelGetterRunnable(aParent, aScriptURL, aLoadInfo);
|
||||
|
||||
ErrorResult rv;
|
||||
getter->Dispatch(Terminating, rv);
|
||||
getter->Dispatch(Canceling, rv);
|
||||
if (rv.Failed()) {
|
||||
NS_ERROR("Failed to dispatch!");
|
||||
return rv.StealNSResult();
|
||||
|
@ -130,7 +130,7 @@ Worker::Terminate()
|
||||
NS_ASSERT_OWNINGTHREAD(Worker);
|
||||
|
||||
if (mWorkerPrivate) {
|
||||
mWorkerPrivate->Terminate();
|
||||
mWorkerPrivate->Cancel();
|
||||
mWorkerPrivate = nullptr;
|
||||
}
|
||||
}
|
||||
|
@ -43,7 +43,7 @@ WorkerHolder::HoldWorker(WorkerPrivate* aWorkerPrivate,
|
||||
{
|
||||
AssertOnOwningThread(mThread);
|
||||
MOZ_ASSERT(aWorkerPrivate);
|
||||
MOZ_ASSERT(aFailStatus >= Terminating);
|
||||
MOZ_ASSERT(aFailStatus >= Canceling);
|
||||
|
||||
aWorkerPrivate->AssertIsOnWorkerThread();
|
||||
|
||||
|
@ -25,8 +25,6 @@ class WorkerPrivate;
|
||||
* +=============+=============+=================+==========+
|
||||
* | Closing | yes | no | no |
|
||||
* +-------------+-------------+-----------------+----------+
|
||||
* | Terminating | yes | yes | yes |
|
||||
* +-------------+-------------+-----------------+----------+
|
||||
* | Canceling | yes | yes | yes |
|
||||
* +-------------+-------------+-----------------+----------+
|
||||
* | Killing | yes | yes | yes |
|
||||
@ -48,11 +46,6 @@ enum WorkerStatus
|
||||
// during this status yet.
|
||||
Closing,
|
||||
|
||||
// Outer script called terminate() on the worker or the worker object was
|
||||
// garbage collected in its outer script. Setting this status causes the
|
||||
// worker to abort immediately and clear its queue of events.
|
||||
Terminating,
|
||||
|
||||
// Either the user navigated away from the owning page or the owning page fell
|
||||
// out of bfcache. Setting this status causes the worker to abort immediately.
|
||||
// Since the page has gone away the worker may not post any messages.
|
||||
|
@ -92,8 +92,8 @@ WorkerHolderToken::Notify(WorkerStatus aStatus)
|
||||
{
|
||||
NS_ASSERT_OWNINGTHREAD(WorkerHolderToken);
|
||||
|
||||
// When the service worker thread is stopped we will get Terminating,
|
||||
// but nothing higher than that. We must shut things down at Terminating.
|
||||
// When the service worker thread is stopped we will get Canceling,
|
||||
// but nothing higher than that. We must shut things down at Canceling.
|
||||
if (aStatus < mShutdownStatus || mShuttingDown) {
|
||||
return true;
|
||||
}
|
||||
|
@ -176,7 +176,7 @@ WorkerNavigator::GetUserAgent(nsString& aUserAgent, CallerType aCallerType,
|
||||
RefPtr<GetUserAgentRunnable> runnable =
|
||||
new GetUserAgentRunnable(workerPrivate, aUserAgent);
|
||||
|
||||
runnable->Dispatch(Terminating, aRv);
|
||||
runnable->Dispatch(Canceling, aRv);
|
||||
}
|
||||
|
||||
uint64_t
|
||||
|
@ -507,8 +507,8 @@ public:
|
||||
: WorkerControlRunnable(aWorkerPrivate, WorkerThreadUnchangedBusyCount),
|
||||
mStatus(aStatus)
|
||||
{
|
||||
MOZ_ASSERT(aStatus == Closing || aStatus == Terminating ||
|
||||
aStatus == Canceling || aStatus == Killing);
|
||||
MOZ_ASSERT(aStatus == Closing || aStatus == Canceling ||
|
||||
aStatus == Killing);
|
||||
}
|
||||
|
||||
private:
|
||||
@ -1834,7 +1834,7 @@ WorkerPrivate::Notify(WorkerStatus aStatus)
|
||||
return true;
|
||||
}
|
||||
|
||||
NS_ASSERTION(aStatus != Terminating || mQueuedRunnables.IsEmpty(),
|
||||
NS_ASSERTION(aStatus != Canceling || mQueuedRunnables.IsEmpty(),
|
||||
"Shouldn't have anything queued!");
|
||||
|
||||
// Anything queued will be discarded.
|
||||
@ -1889,7 +1889,7 @@ WorkerPrivate::Freeze(nsPIDOMWindowInner* aWindow)
|
||||
{
|
||||
MutexAutoLock lock(mMutex);
|
||||
|
||||
if (mParentStatus >= Terminating) {
|
||||
if (mParentStatus >= Canceling) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
@ -1946,7 +1946,7 @@ WorkerPrivate::Thaw(nsPIDOMWindowInner* aWindow)
|
||||
{
|
||||
MutexAutoLock lock(mMutex);
|
||||
|
||||
if (mParentStatus >= Terminating) {
|
||||
if (mParentStatus >= Canceling) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
@ -1997,7 +1997,7 @@ WorkerPrivate::ParentWindowResumed()
|
||||
{
|
||||
MutexAutoLock lock(mMutex);
|
||||
|
||||
if (mParentStatus >= Terminating) {
|
||||
if (mParentStatus >= Canceling) {
|
||||
return;
|
||||
}
|
||||
}
|
||||
@ -2024,7 +2024,7 @@ WorkerPrivate::PropagateFirstPartyStorageAccessGranted()
|
||||
{
|
||||
MutexAutoLock lock(mMutex);
|
||||
|
||||
if (mParentStatus >= Terminating) {
|
||||
if (mParentStatus >= Canceling) {
|
||||
return;
|
||||
}
|
||||
}
|
||||
@ -2062,7 +2062,7 @@ WorkerPrivate::ModifyBusyCount(bool aIncrease)
|
||||
bool shouldCancel;
|
||||
{
|
||||
MutexAutoLock lock(mMutex);
|
||||
shouldCancel = mParentStatus == Terminating;
|
||||
shouldCancel = mParentStatus == Canceling;
|
||||
}
|
||||
|
||||
if (shouldCancel && !Cancel()) {
|
||||
@ -3255,13 +3255,12 @@ WorkerPrivate::DoRunLoop(JSContext* aCx)
|
||||
Maybe<JSAutoRealm> workerCompartment;
|
||||
|
||||
for (;;) {
|
||||
WorkerStatus currentStatus, previousStatus;
|
||||
WorkerStatus currentStatus;
|
||||
bool debuggerRunnablesPending = false;
|
||||
bool normalRunnablesPending = false;
|
||||
|
||||
{
|
||||
MutexAutoLock lock(mMutex);
|
||||
previousStatus = mStatus;
|
||||
|
||||
while (mControlQueue.IsEmpty() &&
|
||||
!(debuggerRunnablesPending = !mDebuggerQueue.IsEmpty()) &&
|
||||
@ -3285,8 +3284,8 @@ WorkerPrivate::DoRunLoop(JSContext* aCx)
|
||||
// if all holders are done then we can kill this thread.
|
||||
if (currentStatus != Running && !HasActiveHolders()) {
|
||||
|
||||
// If we just changed status, we must schedule the current runnables.
|
||||
if (previousStatus != Running && currentStatus != Killing) {
|
||||
// Now we are ready to kill the worker thread.
|
||||
if (currentStatus == Canceling) {
|
||||
NotifyInternal(Killing);
|
||||
|
||||
#ifdef DEBUG
|
||||
@ -3303,7 +3302,7 @@ WorkerPrivate::DoRunLoop(JSContext* aCx)
|
||||
// If we're supposed to die then we should exit the loop.
|
||||
if (currentStatus == Killing) {
|
||||
// The ClientSource should be cleared in NotifyInternal() when we reach
|
||||
// or pass Terminating.
|
||||
// or pass Canceling.
|
||||
MOZ_DIAGNOSTIC_ASSERT(!mClientSource);
|
||||
|
||||
// Flush uncaught rejections immediately, without
|
||||
@ -3543,7 +3542,7 @@ WorkerPrivate::GetClientInfo() const
|
||||
AssertIsOnWorkerThread();
|
||||
Maybe<ClientInfo> clientInfo;
|
||||
if (!mClientSource) {
|
||||
MOZ_DIAGNOSTIC_ASSERT(mStatus >= Terminating);
|
||||
MOZ_DIAGNOSTIC_ASSERT(mStatus >= Canceling);
|
||||
return clientInfo;
|
||||
}
|
||||
clientInfo.emplace(mClientSource->Info());
|
||||
@ -3566,7 +3565,7 @@ WorkerPrivate::GetController()
|
||||
AssertIsOnWorkerThread();
|
||||
{
|
||||
MutexAutoLock lock(mMutex);
|
||||
if (mStatus >= Terminating) {
|
||||
if (mStatus >= Canceling) {
|
||||
return Maybe<ServiceWorkerDescriptor>();
|
||||
}
|
||||
}
|
||||
@ -3582,7 +3581,7 @@ WorkerPrivate::Control(const ServiceWorkerDescriptor& aServiceWorker)
|
||||
MOZ_DIAGNOSTIC_ASSERT(Type() != WorkerTypeService);
|
||||
{
|
||||
MutexAutoLock lock(mMutex);
|
||||
if (mStatus >= Terminating) {
|
||||
if (mStatus >= Canceling) {
|
||||
return;
|
||||
}
|
||||
}
|
||||
@ -3605,7 +3604,7 @@ WorkerPrivate::ExecutionReady()
|
||||
AssertIsOnWorkerThread();
|
||||
{
|
||||
MutexAutoLock lock(mMutex);
|
||||
if (mStatus >= Terminating) {
|
||||
if (mStatus >= Canceling) {
|
||||
return;
|
||||
}
|
||||
}
|
||||
@ -4195,7 +4194,7 @@ already_AddRefed<nsIEventTarget>
|
||||
WorkerPrivate::CreateNewSyncLoop(WorkerStatus aFailStatus)
|
||||
{
|
||||
AssertIsOnWorkerThread();
|
||||
MOZ_ASSERT(aFailStatus >= Terminating,
|
||||
MOZ_ASSERT(aFailStatus >= Canceling,
|
||||
"Sync loops can be created when the worker is in Running/Closing state!");
|
||||
|
||||
{
|
||||
@ -4592,7 +4591,7 @@ WorkerPrivate::NotifyInternal(WorkerStatus aStatus)
|
||||
return true;
|
||||
}
|
||||
|
||||
if (aStatus >= Terminating) {
|
||||
if (aStatus >= Canceling) {
|
||||
MutexAutoUnlock unlock(mMutex);
|
||||
mClientSource.reset();
|
||||
if (mScope) {
|
||||
@ -4680,9 +4679,7 @@ WorkerPrivate::NotifyInternal(WorkerStatus aStatus)
|
||||
return true;
|
||||
}
|
||||
|
||||
MOZ_ASSERT(aStatus == Terminating ||
|
||||
aStatus == Canceling ||
|
||||
aStatus == Killing);
|
||||
MOZ_ASSERT(aStatus == Canceling || aStatus == Killing);
|
||||
|
||||
// Always abort the script.
|
||||
return false;
|
||||
@ -5076,7 +5073,7 @@ WorkerPrivate::StartCancelingTimer()
|
||||
// This is not needed if we are already in an advanced shutdown state.
|
||||
{
|
||||
MutexAutoLock lock(mMutex);
|
||||
if (ParentStatus() >= Terminating) {
|
||||
if (ParentStatus() >= Canceling) {
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
@ -173,12 +173,6 @@ public:
|
||||
return Notify(Killing);
|
||||
}
|
||||
|
||||
bool
|
||||
Terminate()
|
||||
{
|
||||
return Notify(Terminating);
|
||||
}
|
||||
|
||||
bool
|
||||
Close();
|
||||
|
||||
@ -590,7 +584,7 @@ public:
|
||||
AssertIsOnParentThread();
|
||||
|
||||
MutexAutoLock lock(mMutex);
|
||||
return mParentStatus < Terminating;
|
||||
return mParentStatus < Canceling;
|
||||
}
|
||||
|
||||
WorkerStatus
|
||||
@ -1226,7 +1220,7 @@ private:
|
||||
status = mStatus;
|
||||
}
|
||||
|
||||
if (status < Terminating) {
|
||||
if (status < Canceling) {
|
||||
return true;
|
||||
}
|
||||
|
||||
@ -1342,9 +1336,8 @@ private:
|
||||
// 1. GC/CC - When the worker is in idle state (busycount == 0), it allows to
|
||||
// traverse the 'hidden' mParentEventTargetRef pointer. This is the exposed
|
||||
// Worker webidl object. Doing this, CC will be able to detect a cycle and
|
||||
// Unlink is called. In Unlink, Worker calls Terminate().
|
||||
// 2. Worker::Terminate() is called - the shutdown procedure starts
|
||||
// immediately.
|
||||
// Unlink is called. In Unlink, Worker calls Cancel().
|
||||
// 2. Worker::Cancel() is called - the shutdown procedure starts immediately.
|
||||
// 3. WorkerScope::Close() is called - Similar to point 2.
|
||||
// 4. xpcom-shutdown notification - We call Kill().
|
||||
RefPtr<Worker> mParentEventTargetRef;
|
||||
|
@ -409,7 +409,7 @@ protected:
|
||||
public:
|
||||
// Dispatch the runnable to the main thread. If dispatch to main thread
|
||||
// fails, or if the worker is in a state equal or greater of aFailStatus, an
|
||||
// error will be reported on aRv. Normally you want to use 'Terminating' for
|
||||
// error will be reported on aRv. Normally you want to use 'Canceling' for
|
||||
// aFailStatus, except if you want an infallible runnable. In this case, use
|
||||
// 'Killing'.
|
||||
// In that case the error MUST be propagated out to script.
|
||||
|
@ -1804,7 +1804,7 @@ XMLHttpRequestWorker::SendInternal(SendRunnable* aRunnable,
|
||||
nsCOMPtr<nsIEventTarget> syncLoopTarget;
|
||||
bool isSyncXHR = mProxy->mIsSyncXHR;
|
||||
if (isSyncXHR) {
|
||||
autoSyncLoop.emplace(mWorkerPrivate, Terminating);
|
||||
autoSyncLoop.emplace(mWorkerPrivate, Canceling);
|
||||
syncLoopTarget = autoSyncLoop->GetEventTarget();
|
||||
if (!syncLoopTarget) {
|
||||
aRv.Throw(NS_ERROR_DOM_INVALID_STATE_ERR);
|
||||
@ -1819,7 +1819,7 @@ XMLHttpRequestWorker::SendInternal(SendRunnable* aRunnable,
|
||||
|
||||
mStateData.mFlagSend = true;
|
||||
|
||||
aRunnable->Dispatch(Terminating, aRv);
|
||||
aRunnable->Dispatch(Canceling, aRv);
|
||||
if (aRv.Failed()) {
|
||||
// Dispatch() may have spun the event loop and we may have already unrooted.
|
||||
// If so we don't want autoUnpin to try again.
|
||||
@ -1888,7 +1888,7 @@ XMLHttpRequestWorker::Open(const nsACString& aMethod,
|
||||
mTimeout, mResponseType);
|
||||
|
||||
++mProxy->mOpenCount;
|
||||
runnable->Dispatch(Terminating, aRv);
|
||||
runnable->Dispatch(Canceling, aRv);
|
||||
if (aRv.Failed()) {
|
||||
if (mProxy && !--mProxy->mOpenCount) {
|
||||
ReleaseProxy();
|
||||
@ -1925,7 +1925,7 @@ XMLHttpRequestWorker::SetRequestHeader(const nsACString& aHeader,
|
||||
|
||||
RefPtr<SetRequestHeaderRunnable> runnable =
|
||||
new SetRequestHeaderRunnable(mWorkerPrivate, mProxy, aHeader, aValue);
|
||||
runnable->Dispatch(Terminating, aRv);
|
||||
runnable->Dispatch(Canceling, aRv);
|
||||
}
|
||||
|
||||
void
|
||||
@ -1948,7 +1948,7 @@ XMLHttpRequestWorker::SetTimeout(uint32_t aTimeout, ErrorResult& aRv)
|
||||
|
||||
RefPtr<SetTimeoutRunnable> runnable =
|
||||
new SetTimeoutRunnable(mWorkerPrivate, mProxy, aTimeout);
|
||||
runnable->Dispatch(Terminating, aRv);
|
||||
runnable->Dispatch(Canceling, aRv);
|
||||
}
|
||||
|
||||
void
|
||||
@ -1971,7 +1971,7 @@ XMLHttpRequestWorker::SetWithCredentials(bool aWithCredentials, ErrorResult& aRv
|
||||
|
||||
RefPtr<SetWithCredentialsRunnable> runnable =
|
||||
new SetWithCredentialsRunnable(mWorkerPrivate, mProxy, aWithCredentials);
|
||||
runnable->Dispatch(Terminating, aRv);
|
||||
runnable->Dispatch(Canceling, aRv);
|
||||
}
|
||||
|
||||
void
|
||||
@ -1996,7 +1996,7 @@ XMLHttpRequestWorker::SetMozBackgroundRequest(bool aBackgroundRequest,
|
||||
RefPtr<SetBackgroundRequestRunnable> runnable =
|
||||
new SetBackgroundRequestRunnable(mWorkerPrivate, mProxy,
|
||||
aBackgroundRequest);
|
||||
runnable->Dispatch(Terminating, aRv);
|
||||
runnable->Dispatch(Canceling, aRv);
|
||||
}
|
||||
|
||||
XMLHttpRequestUpload*
|
||||
@ -2189,7 +2189,7 @@ XMLHttpRequestWorker::Abort(ErrorResult& aRv)
|
||||
mProxy->mOuterEventStreamId++;
|
||||
|
||||
RefPtr<AbortRunnable> runnable = new AbortRunnable(mWorkerPrivate, mProxy);
|
||||
runnable->Dispatch(Terminating, aRv);
|
||||
runnable->Dispatch(Canceling, aRv);
|
||||
}
|
||||
|
||||
void
|
||||
@ -2212,7 +2212,7 @@ XMLHttpRequestWorker::GetResponseHeader(const nsACString& aHeader,
|
||||
RefPtr<GetResponseHeaderRunnable> runnable =
|
||||
new GetResponseHeaderRunnable(mWorkerPrivate, mProxy, aHeader,
|
||||
responseHeader);
|
||||
runnable->Dispatch(Terminating, aRv);
|
||||
runnable->Dispatch(Canceling, aRv);
|
||||
if (aRv.Failed()) {
|
||||
return;
|
||||
}
|
||||
@ -2238,7 +2238,7 @@ XMLHttpRequestWorker::GetAllResponseHeaders(nsACString& aResponseHeaders,
|
||||
nsCString responseHeaders;
|
||||
RefPtr<GetAllResponseHeadersRunnable> runnable =
|
||||
new GetAllResponseHeadersRunnable(mWorkerPrivate, mProxy, responseHeaders);
|
||||
runnable->Dispatch(Terminating, aRv);
|
||||
runnable->Dispatch(Canceling, aRv);
|
||||
if (aRv.Failed()) {
|
||||
return;
|
||||
}
|
||||
@ -2270,7 +2270,7 @@ XMLHttpRequestWorker::OverrideMimeType(const nsAString& aMimeType, ErrorResult&
|
||||
|
||||
RefPtr<OverrideMimeTypeRunnable> runnable =
|
||||
new OverrideMimeTypeRunnable(mWorkerPrivate, mProxy, aMimeType);
|
||||
runnable->Dispatch(Terminating, aRv);
|
||||
runnable->Dispatch(Canceling, aRv);
|
||||
}
|
||||
|
||||
void
|
||||
@ -2306,7 +2306,7 @@ XMLHttpRequestWorker::SetResponseType(XMLHttpRequestResponseType aResponseType,
|
||||
|
||||
RefPtr<SetResponseTypeRunnable> runnable =
|
||||
new SetResponseTypeRunnable(mWorkerPrivate, mProxy, aResponseType);
|
||||
runnable->Dispatch(Terminating, aRv);
|
||||
runnable->Dispatch(Canceling, aRv);
|
||||
if (aRv.Failed()) {
|
||||
return;
|
||||
}
|
||||
|
@ -5,7 +5,6 @@
|
||||
|
||||
#include "txXPathTreeWalker.h"
|
||||
#include "nsAtom.h"
|
||||
#include "nsIAttribute.h"
|
||||
#include "nsINode.h"
|
||||
#include "nsPrintfCString.h"
|
||||
#include "nsReadableUtils.h"
|
||||
@ -638,12 +637,10 @@ txXPathNativeNode::createXPathNode(nsINode* aNode, bool aKeepRootAlive)
|
||||
{
|
||||
uint16_t nodeType = aNode->NodeType();
|
||||
if (nodeType == nsINode::ATTRIBUTE_NODE) {
|
||||
nsCOMPtr<nsIAttribute> attr = do_QueryInterface(aNode);
|
||||
NS_ASSERTION(attr, "doesn't implement nsIAttribute");
|
||||
auto* attr = static_cast<Attr*>(aNode);
|
||||
|
||||
mozilla::dom::NodeInfo *nodeInfo = attr->NodeInfo();
|
||||
mozilla::dom::Element* parent =
|
||||
static_cast<Attr*>(attr.get())->GetElement();
|
||||
NodeInfo* nodeInfo = attr->NodeInfo();
|
||||
Element* parent = attr->GetElement();
|
||||
if (!parent) {
|
||||
return nullptr;
|
||||
}
|
||||
|
@ -1462,7 +1462,7 @@ gfxUtils::ThreadSafeGetFeatureStatus(const nsCOMPtr<nsIGfxInfo>& gfxInfo,
|
||||
status);
|
||||
|
||||
ErrorResult rv;
|
||||
runnable->Dispatch(dom::WorkerStatus::Terminating, rv);
|
||||
runnable->Dispatch(dom::WorkerStatus::Canceling, rv);
|
||||
if (rv.Failed()) {
|
||||
// XXXbz This is totally broken, since we're supposed to just abort
|
||||
// everything up the callstack but the callers basically eat the
|
||||
|
@ -268,6 +268,7 @@ struct VRDisplayState
|
||||
VRFieldOfView mEyeFOV[VRDisplayState::NumEyes];
|
||||
Point3D_POD mEyeTranslation[VRDisplayState::NumEyes];
|
||||
IntSize_POD mEyeResolution;
|
||||
bool mSuppressFrames;
|
||||
bool mIsConnected;
|
||||
bool mIsMounted;
|
||||
FloatSize_POD mStageSize;
|
||||
|
@ -235,8 +235,8 @@ VRDisplayExternal::SubmitFrame(const layers::SurfaceDescriptor& aTexture,
|
||||
memset(&displayState, 0, sizeof(VRDisplayState));
|
||||
while (displayState.mLastSubmittedFrameId < aFrameId) {
|
||||
if (manager->PullState(&displayState, &mLastSensorState, mDisplayInfo.mControllerState)) {
|
||||
if (!displayState.mIsConnected) {
|
||||
// Service has shut down or hardware has been disconnected
|
||||
if (displayState.mSuppressFrames || !displayState.mIsConnected) {
|
||||
// External implementation wants to supress frames, service has shut down or hardware has been disconnected.
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
@ -44,6 +44,7 @@ struct ParamTraits<mozilla::gfx::VRDisplayState>
|
||||
WriteParam(aMsg, aParam.mCapabilityFlags);
|
||||
WriteParam(aMsg, aParam.mEyeResolution.width);
|
||||
WriteParam(aMsg, aParam.mEyeResolution.height);
|
||||
WriteParam(aMsg, aParam.mSuppressFrames);
|
||||
WriteParam(aMsg, aParam.mIsConnected);
|
||||
WriteParam(aMsg, aParam.mIsMounted);
|
||||
WriteParam(aMsg, aParam.mStageSize.width);
|
||||
@ -71,6 +72,7 @@ struct ParamTraits<mozilla::gfx::VRDisplayState>
|
||||
!ReadParam(aMsg, aIter, &(aResult->mCapabilityFlags)) ||
|
||||
!ReadParam(aMsg, aIter, &(aResult->mEyeResolution.width)) ||
|
||||
!ReadParam(aMsg, aIter, &(aResult->mEyeResolution.height)) ||
|
||||
!ReadParam(aMsg, aIter, &(aResult->mSuppressFrames)) ||
|
||||
!ReadParam(aMsg, aIter, &(aResult->mIsConnected)) ||
|
||||
!ReadParam(aMsg, aIter, &(aResult->mIsMounted)) ||
|
||||
!ReadParam(aMsg, aIter, &(aResult->mStageSize.width)) ||
|
||||
|
@ -920,7 +920,7 @@ class SpecializedRegSet<Accessors, RegisterSet> : public Accessors
|
||||
takeUnchecked(reg);
|
||||
}
|
||||
void take(FloatRegister reg) {
|
||||
MOZ_ASSERT(has(reg));
|
||||
MOZ_ASSERT(this->has(reg));
|
||||
takeUnchecked(reg);
|
||||
}
|
||||
void take(AnyRegister reg) {
|
||||
|
@ -0,0 +1,76 @@
|
||||
// Any copyright is dedicated to the Public Domain.
|
||||
// http://creativecommons.org/licenses/publicdomain/
|
||||
|
||||
//-----------------------------------------------------------------------------
|
||||
var gTestfile = "for-in-with-gc-and-unvisited-deletion.js";
|
||||
var BUGNUMBER = 1462939;
|
||||
var summary =
|
||||
"Don't mishandle deletion of a property from the internal iterator " +
|
||||
"created for a for-in loop, when a gc occurs just after it";
|
||||
|
||||
print(BUGNUMBER + ": " + summary);
|
||||
|
||||
/**************
|
||||
* BEGIN TEST *
|
||||
**************/
|
||||
|
||||
function testOneDeletion()
|
||||
{
|
||||
var o = {
|
||||
p: 1,
|
||||
r: 3,
|
||||
s: 4,
|
||||
};
|
||||
|
||||
for (var i in o)
|
||||
{
|
||||
gc();
|
||||
delete o.s;
|
||||
}
|
||||
}
|
||||
testOneDeletion();
|
||||
|
||||
function testTwoDeletions()
|
||||
{
|
||||
var o = {
|
||||
p: 1,
|
||||
r: 3,
|
||||
s: 4,
|
||||
t: 5,
|
||||
};
|
||||
|
||||
for (var i in o)
|
||||
{
|
||||
gc();
|
||||
delete o.t;
|
||||
delete o.s;
|
||||
}
|
||||
}
|
||||
testTwoDeletions();
|
||||
|
||||
function testThreeDeletions()
|
||||
{
|
||||
var o = {
|
||||
p: 1,
|
||||
r: 3,
|
||||
s: 4,
|
||||
t: 5,
|
||||
x: 7,
|
||||
};
|
||||
|
||||
for (var i in o)
|
||||
{
|
||||
gc();
|
||||
delete o.x;
|
||||
delete o.t;
|
||||
delete o.s;
|
||||
}
|
||||
}
|
||||
testThreeDeletions();
|
||||
|
||||
/******************************************************************************/
|
||||
|
||||
if (typeof reportCompare === "function")
|
||||
reportCompare(true, true);
|
||||
|
||||
print("Tests complete");
|
@ -0,0 +1,30 @@
|
||||
// Any copyright is dedicated to the Public Domain.
|
||||
// http://creativecommons.org/licenses/publicdomain/
|
||||
|
||||
//-----------------------------------------------------------------------------
|
||||
var gTestfile = "for-in-with-gc-during-iterator-init.js";
|
||||
var BUGNUMBER = 1464472;
|
||||
var summary =
|
||||
"Properly trace NativeIterator when a GC occurs during its initialization";
|
||||
|
||||
print(BUGNUMBER + ": " + summary);
|
||||
|
||||
/**************
|
||||
* BEGIN TEST *
|
||||
**************/
|
||||
|
||||
gczeal(17, 1);
|
||||
for (var i = 0; i < 100; ++i)
|
||||
{
|
||||
Object.prototype[1012] = "value";
|
||||
imports = {};
|
||||
for (dmod in imports)
|
||||
continue; // gc occurs here converting 1012 to string
|
||||
}
|
||||
|
||||
/******************************************************************************/
|
||||
|
||||
if (typeof reportCompare === "function")
|
||||
reportCompare(true, true);
|
||||
|
||||
print("Tests complete");
|
@ -56,6 +56,9 @@ typedef Rooted<PropertyIteratorObject*> RootedPropertyIteratorObject;
|
||||
|
||||
static const gc::AllocKind ITERATOR_FINALIZE_KIND = gc::AllocKind::OBJECT2_BACKGROUND;
|
||||
|
||||
// Beware! This function may have to trace incompletely-initialized
|
||||
// |NativeIterator| allocations if the |IdToString| in that constructor recurs
|
||||
// into this code.
|
||||
void
|
||||
NativeIterator::trace(JSTracer* trc)
|
||||
{
|
||||
@ -66,11 +69,24 @@ NativeIterator::trace(JSTracer* trc)
|
||||
if (iterObj_)
|
||||
TraceManuallyBarrieredEdge(trc, &iterObj_, "iterObj");
|
||||
|
||||
// The limits below are correct at every instant of |NativeIterator|
|
||||
// initialization, with the end-pointer incremented as each new guard is
|
||||
// created, so they're safe to use here.
|
||||
std::for_each(guardsBegin(), guardsEnd(),
|
||||
[trc](HeapReceiverGuard& guard) {
|
||||
guard.trace(trc);
|
||||
});
|
||||
|
||||
// But as properties must be created *before* guards, |propertiesBegin()|
|
||||
// that depends on |guardsEnd()| having its final value can't safely be
|
||||
// used. Until this is fully initialized, use |propertyCursor_| instead,
|
||||
// which points at the start of properties even in partially initialized
|
||||
// |NativeIterator|s. (|propertiesEnd()| is safe at all times with respect
|
||||
// to the properly-chosen beginning.)
|
||||
//
|
||||
// Note that we must trace all properties (not just those not yet visited,
|
||||
// or just visited, due to |NativeIterator::previousPropertyWas|) for
|
||||
// |NativeIterator|s to be reusable.
|
||||
GCPtrFlatString* begin = MOZ_LIKELY(isInitialized()) ? propertiesBegin() : propertyCursor_;
|
||||
std::for_each(begin, propertiesEnd(),
|
||||
[trc](GCPtrFlatString& prop) {
|
||||
@ -674,7 +690,7 @@ NativeIterator::NativeIterator(JSContext* cx, Handle<PropertyIteratorObject*> pr
|
||||
propertyCursor_(reinterpret_cast<GCPtrFlatString*>(guardsBegin() + numGuards)),
|
||||
propertiesEnd_(propertyCursor_),
|
||||
guardKey_(guardKey),
|
||||
flags_(0)
|
||||
flags_(0) // note: no Flags::Initialized
|
||||
{
|
||||
MOZ_ASSERT(!*hadError);
|
||||
|
||||
@ -740,6 +756,8 @@ NativeIterator::NativeIterator(JSContext* cx, Handle<PropertyIteratorObject*> pr
|
||||
MOZ_ASSERT(i == numGuards);
|
||||
}
|
||||
|
||||
// |guardsEnd_| is now guaranteed to point at the start of properties, so
|
||||
// we can mark this initialized.
|
||||
MOZ_ASSERT(static_cast<void*>(guardsEnd_) == propertyCursor_);
|
||||
markInitialized();
|
||||
|
||||
|
@ -58,9 +58,33 @@ struct NativeIterator
|
||||
// active. Not serialized by XDR.
|
||||
struct Flags
|
||||
{
|
||||
// This flag is set when all guards and properties associated with this
|
||||
// NativeIterator have been initialized, such that |guardsEnd_|, in
|
||||
// addition to being the end of guards, is also the beginning of
|
||||
// properties.
|
||||
//
|
||||
// This flag is only *not* set when a NativeIterator is in the process
|
||||
// of being constructed. At such time |guardsEnd_| accounts only for
|
||||
// guards that have been initialized -- potentially none of them.
|
||||
// Instead, |propertyCursor_| is initialized to the ultimate/actual
|
||||
// start of properties and must be used instead of |propertiesBegin()|,
|
||||
// which asserts that this flag is present to guard against misuse.
|
||||
static constexpr uint32_t Initialized = 0x1;
|
||||
|
||||
// This flag indicates that this NativeIterator is currently being used
|
||||
// to enumerate an object's properties and has not yet been closed.
|
||||
static constexpr uint32_t Active = 0x2;
|
||||
|
||||
// This flag indicates that the object being enumerated by this
|
||||
// |NativeIterator| had a property deleted from it before it was
|
||||
// visited, forcing the properties array in this to be mutated to
|
||||
// remove it.
|
||||
static constexpr uint32_t HasUnvisitedPropertyDeletion = 0x4;
|
||||
|
||||
// If any of these bits are set on a |NativeIterator|, it isn't
|
||||
// currently reusable. (An active |NativeIterator| can't be stolen
|
||||
// *right now*; a |NativeIterator| that's had its properties mutated
|
||||
// can never be reused, because it would give incorrect results.)
|
||||
static constexpr uint32_t NotReusable = Active | HasUnvisitedPropertyDeletion;
|
||||
};
|
||||
|
||||
@ -248,6 +272,12 @@ struct NativeIterator
|
||||
|
||||
bool isReusable() const {
|
||||
MOZ_ASSERT(isInitialized());
|
||||
|
||||
// Cached NativeIterators are reusable if they're not currently active
|
||||
// and their properties array hasn't been mutated, i.e. if only
|
||||
// |Flags::Initialized| is set. Using |Flags::NotReusable| to test
|
||||
// would also work, but this formulation is safer against memory
|
||||
// corruption.
|
||||
return flags_ == Flags::Initialized;
|
||||
}
|
||||
|
||||
|
@ -8,6 +8,7 @@
|
||||
#include "ScriptPreloader-inl.h"
|
||||
|
||||
#include "mozilla/Unused.h"
|
||||
#include "mozilla/ipc/FileDescriptor.h"
|
||||
#include "nsIFile.h"
|
||||
|
||||
#include <private/pprio.h>
|
||||
@ -142,12 +143,11 @@ AutoMemMap::cloneHandle() const
|
||||
void
|
||||
AutoMemMap::reset()
|
||||
{
|
||||
if (addr && !persistent_) {
|
||||
Unused << NS_WARN_IF(PR_MemUnmap(addr, size()) != PR_SUCCESS);
|
||||
addr = nullptr;
|
||||
}
|
||||
if (fileMap) {
|
||||
if (addr && !persistent_) {
|
||||
Unused << NS_WARN_IF(PR_MemUnmap(addr, size()) != PR_SUCCESS);
|
||||
addr = nullptr;
|
||||
}
|
||||
|
||||
Unused << NS_WARN_IF(PR_CloseFileMap(fileMap) != PR_SUCCESS);
|
||||
fileMap = nullptr;
|
||||
}
|
||||
|
@ -10,7 +10,6 @@
|
||||
#include "mozilla/MemoryReporting.h"
|
||||
#include "mozilla/RangedPtr.h"
|
||||
#include "mozilla/Result.h"
|
||||
#include "mozilla/ipc/FileDescriptor.h"
|
||||
#include "nsIMemoryReporter.h"
|
||||
|
||||
#include <prio.h>
|
||||
@ -18,6 +17,10 @@
|
||||
class nsIFile;
|
||||
|
||||
namespace mozilla {
|
||||
namespace ipc {
|
||||
class FileDescriptor;
|
||||
}
|
||||
|
||||
namespace loader {
|
||||
|
||||
using mozilla::ipc::FileDescriptor;
|
||||
|
@ -99,6 +99,7 @@ template<> struct IsIntegralHelper<unsigned long long> : TrueType {};
|
||||
template<> struct IsIntegralHelper<bool> : TrueType {};
|
||||
template<> struct IsIntegralHelper<wchar_t> : TrueType {};
|
||||
template<> struct IsIntegralHelper<char16_t> : TrueType {};
|
||||
template<> struct IsIntegralHelper<char32_t> : TrueType {};
|
||||
|
||||
} /* namespace detail */
|
||||
|
||||
|
@ -12,6 +12,7 @@ transforms:
|
||||
|
||||
kind-dependencies:
|
||||
- repackage-signing
|
||||
- repackage-signing-l10n
|
||||
- partials-signing
|
||||
|
||||
only-for-build-platforms:
|
||||
|
@ -2,8 +2,9 @@
|
||||
# License, v. 2.0. If a copy of the MPL was not distributed with this
|
||||
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
|
||||
loader: taskgraph.loader.single_dep:loader
|
||||
loader: taskgraph.loader.multi_dep:loader
|
||||
|
||||
group-by: platform
|
||||
|
||||
transforms:
|
||||
- taskgraph.transforms.l10n:transforms
|
||||
@ -13,6 +14,9 @@ transforms:
|
||||
|
||||
kind-dependencies:
|
||||
- build
|
||||
- build-signing
|
||||
- repackage
|
||||
- repackage-signing
|
||||
- toolchain
|
||||
|
||||
only-for-build-platforms:
|
||||
@ -99,27 +103,27 @@ job-template:
|
||||
linux.*: # linux64 and 32 get same treatment here
|
||||
EN_US_PACKAGE_NAME: target.tar.bz2
|
||||
EN_US_BINARY_URL:
|
||||
task-reference: https://queue.taskcluster.net/v1/task/<signed-build>/artifacts/{artifact_prefix}
|
||||
task-reference: https://queue.taskcluster.net/v1/task/<build-signing>/artifacts/{artifact_prefix}
|
||||
MAR_TOOLS_URL:
|
||||
task-reference: https://queue.taskcluster.net/v1/task/<unsigned-build>/artifacts/{artifact_prefix}/host/bin
|
||||
task-reference: https://queue.taskcluster.net/v1/task/<build>/artifacts/{artifact_prefix}/host/bin
|
||||
macosx64:
|
||||
EN_US_PACKAGE_NAME: target.dmg
|
||||
EN_US_BINARY_URL:
|
||||
task-reference: https://queue.taskcluster.net/v1/task/<repackage>/artifacts/{artifact_prefix}
|
||||
MAR_TOOLS_URL:
|
||||
task-reference: https://queue.taskcluster.net/v1/task/<unsigned-build>/artifacts/{artifact_prefix}/host/bin
|
||||
task-reference: https://queue.taskcluster.net/v1/task/<build>/artifacts/{artifact_prefix}/host/bin
|
||||
win.*:
|
||||
EN_US_PACKAGE_NAME: target.zip
|
||||
EN_US_BINARY_URL:
|
||||
task-reference: https://queue.taskcluster.net/v1/task/<signed-build>/artifacts/{artifact_prefix}
|
||||
task-reference: https://queue.taskcluster.net/v1/task/<build-signing>/artifacts/{artifact_prefix}
|
||||
EN_US_INSTALLER_BINARY_URL:
|
||||
task-reference: https://queue.taskcluster.net/v1/task/<repackage-signed>/artifacts/{artifact_prefix}
|
||||
task-reference: https://queue.taskcluster.net/v1/task/<repackage-signing>/artifacts/{artifact_prefix}
|
||||
MAR_TOOLS_URL:
|
||||
task-reference: https://queue.taskcluster.net/v1/task/<unsigned-build>/artifacts/{artifact_prefix}/host/bin
|
||||
task-reference: https://queue.taskcluster.net/v1/task/<build>/artifacts/{artifact_prefix}/host/bin
|
||||
android-api-16:
|
||||
EN_US_PACKAGE_NAME: target.apk
|
||||
EN_US_BINARY_URL:
|
||||
task-reference: https://queue.taskcluster.net/v1/task/<unsigned-build>/artifacts/{artifact_prefix}
|
||||
task-reference: https://queue.taskcluster.net/v1/task/<build>/artifacts/{artifact_prefix}
|
||||
mozharness:
|
||||
config:
|
||||
by-build-platform:
|
||||
|
@ -2,7 +2,9 @@
|
||||
# License, v. 2.0. If a copy of the MPL was not distributed with this
|
||||
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
|
||||
loader: taskgraph.loader.single_dep:loader
|
||||
loader: taskgraph.loader.multi_dep:loader
|
||||
|
||||
group-by: platform
|
||||
|
||||
transforms:
|
||||
- taskgraph.transforms.l10n:transforms
|
||||
@ -12,6 +14,9 @@ transforms:
|
||||
|
||||
kind-dependencies:
|
||||
- build
|
||||
- build-signing
|
||||
- repackage
|
||||
- repackage-signing
|
||||
- toolchain
|
||||
|
||||
only-for-build-platforms:
|
||||
@ -123,27 +128,27 @@ job-template:
|
||||
linux.*: # linux64 and 32 get same treatment here
|
||||
EN_US_PACKAGE_NAME: target.tar.bz2
|
||||
EN_US_BINARY_URL:
|
||||
task-reference: https://queue.taskcluster.net/v1/task/<signed-build>/artifacts/{artifact_prefix}
|
||||
task-reference: https://queue.taskcluster.net/v1/task/<build-signing>/artifacts/{artifact_prefix}
|
||||
MAR_TOOLS_URL:
|
||||
task-reference: https://queue.taskcluster.net/v1/task/<unsigned-build>/artifacts/{artifact_prefix}/host/bin
|
||||
task-reference: https://queue.taskcluster.net/v1/task/<build>/artifacts/{artifact_prefix}/host/bin
|
||||
macosx64.*:
|
||||
EN_US_PACKAGE_NAME: target.dmg
|
||||
EN_US_BINARY_URL:
|
||||
task-reference: https://queue.taskcluster.net/v1/task/<repackage>/artifacts/{artifact_prefix}
|
||||
MAR_TOOLS_URL:
|
||||
task-reference: https://queue.taskcluster.net/v1/task/<unsigned-build>/artifacts/{artifact_prefix}/host/bin
|
||||
task-reference: https://queue.taskcluster.net/v1/task/<build>/artifacts/{artifact_prefix}/host/bin
|
||||
win.*:
|
||||
EN_US_PACKAGE_NAME: target.zip
|
||||
EN_US_BINARY_URL:
|
||||
task-reference: https://queue.taskcluster.net/v1/task/<signed-build>/artifacts/{artifact_prefix}
|
||||
task-reference: https://queue.taskcluster.net/v1/task/<build-signing>/artifacts/{artifact_prefix}
|
||||
EN_US_INSTALLER_BINARY_URL:
|
||||
task-reference: https://queue.taskcluster.net/v1/task/<repackage-signed>/artifacts/{artifact_prefix}
|
||||
task-reference: https://queue.taskcluster.net/v1/task/<repackage-signing>/artifacts/{artifact_prefix}
|
||||
MAR_TOOLS_URL:
|
||||
task-reference: https://queue.taskcluster.net/v1/task/<unsigned-build>/artifacts/{artifact_prefix}/host/bin
|
||||
task-reference: https://queue.taskcluster.net/v1/task/<build>/artifacts/{artifact_prefix}/host/bin
|
||||
android-api-16-nightly:
|
||||
EN_US_PACKAGE_NAME: target.apk
|
||||
EN_US_BINARY_URL:
|
||||
task-reference: https://queue.taskcluster.net/v1/task/<unsigned-build>/artifacts/{artifact_prefix}/en-US
|
||||
task-reference: https://queue.taskcluster.net/v1/task/<build>/artifacts/{artifact_prefix}/en-US
|
||||
mozharness:
|
||||
config:
|
||||
by-build-platform:
|
||||
|
@ -11,6 +11,7 @@ transforms:
|
||||
|
||||
kind-dependencies:
|
||||
- repackage-signing
|
||||
- repackage-signing-l10n
|
||||
|
||||
only-for-attributes:
|
||||
- nightly
|
||||
|
29
taskcluster/ci/repackage-signing-l10n/kind.yml
Normal file
29
taskcluster/ci/repackage-signing-l10n/kind.yml
Normal file
@ -0,0 +1,29 @@
|
||||
# This Source Code Form is subject to the terms of the Mozilla Public
|
||||
# License, v. 2.0. If a copy of the MPL was not distributed with this
|
||||
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
|
||||
loader: taskgraph.loader.single_dep:loader
|
||||
|
||||
transforms:
|
||||
- taskgraph.transforms.name_sanity:transforms
|
||||
- taskgraph.transforms.repackage_signing:transforms
|
||||
- taskgraph.transforms.repackage_routes:transforms
|
||||
- taskgraph.transforms.task:transforms
|
||||
|
||||
kind-dependencies:
|
||||
- repackage-l10n
|
||||
|
||||
only-for-build-platforms:
|
||||
- linux-nightly/opt
|
||||
- linux-devedition-nightly/opt
|
||||
- linux64-nightly/opt
|
||||
- linux64-devedition-nightly/opt
|
||||
- linux64-asan-reporter-nightly/opt
|
||||
- macosx64-nightly/opt
|
||||
- macosx64-devedition-nightly/opt
|
||||
- win32-nightly/opt
|
||||
- win32-devedition-nightly/opt
|
||||
- win32/opt
|
||||
- win64-nightly/opt
|
||||
- win64-devedition-nightly/opt
|
||||
- win64/opt
|
@ -12,7 +12,6 @@ transforms:
|
||||
|
||||
kind-dependencies:
|
||||
- repackage
|
||||
- repackage-l10n
|
||||
|
||||
only-for-build-platforms:
|
||||
- linux-nightly/opt
|
||||
|
@ -438,6 +438,11 @@ repackage-signing
|
||||
Repackage-signing take the repackaged installers (windows) and update packaging (with
|
||||
the signed internal bits) and signs them.
|
||||
|
||||
repackage-signing-l10n
|
||||
----------------------
|
||||
Repackage-signing take the repackaged installers (windows) and update packaging (with
|
||||
the signed internal bits) and signs them for localized versions.
|
||||
|
||||
repo-update
|
||||
-----------
|
||||
Repo-Update tasks are tasks that perform some action on the project repo itself,
|
||||
|
99
taskcluster/taskgraph/loader/multi_dep.py
Normal file
99
taskcluster/taskgraph/loader/multi_dep.py
Normal file
@ -0,0 +1,99 @@
|
||||
# This Source Code Form is subject to the terms of the Mozilla Public
|
||||
# License, v. 2.0. If a copy of the MPL was not distributed with this
|
||||
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
|
||||
from __future__ import absolute_import, print_function, unicode_literals
|
||||
|
||||
import copy
|
||||
|
||||
|
||||
# Define a collection of group_by functions
|
||||
GROUP_BY_MAP = {}
|
||||
|
||||
|
||||
def group_by(name):
|
||||
def wrapper(func):
|
||||
GROUP_BY_MAP[name] = func
|
||||
return func
|
||||
return wrapper
|
||||
|
||||
|
||||
def loader(kind, path, config, params, loaded_tasks):
|
||||
"""
|
||||
Load tasks based on the jobs dependant kinds, designed for use as
|
||||
multiple-dependent needs.
|
||||
|
||||
Required ``group-by-fn`` is used to define how we coalesce the
|
||||
multiple deps together to pass to transforms, e.g. all kinds specified get
|
||||
collapsed by platform with `platform`
|
||||
|
||||
The `only-for-build-platforms` kind configuration, if specified, will limit
|
||||
the build platforms for which a job will be created. Alternatively there is
|
||||
'not-for-build-platforms' kind configuration which will be consulted only after
|
||||
'only-for-build-platforms' is checked (if present), and omit any jobs where the
|
||||
build platform matches.
|
||||
|
||||
Optional ``job-template`` kind configuration value, if specified, will be used to
|
||||
pass configuration down to the specified transforms used.
|
||||
"""
|
||||
job_template = config.get('job-template')
|
||||
|
||||
for dep_tasks in group_tasks(config, loaded_tasks):
|
||||
job = {'dependent-tasks': dep_tasks}
|
||||
if job_template:
|
||||
job.update(copy.deepcopy(job_template))
|
||||
# copy shipping_product from upstream
|
||||
product = dep_tasks[dep_tasks.keys()[0]].attributes.get(
|
||||
'shipping_product',
|
||||
dep_tasks[dep_tasks.keys()[0]].task.get('shipping-product')
|
||||
)
|
||||
if product:
|
||||
job.setdefault('shipping-product', product)
|
||||
|
||||
yield job
|
||||
|
||||
|
||||
def group_tasks(config, tasks):
|
||||
group_by_fn = GROUP_BY_MAP[config['group-by']]
|
||||
|
||||
groups = group_by_fn(config, tasks)
|
||||
|
||||
for combinations in groups.itervalues():
|
||||
kinds = [f.kind for f in combinations]
|
||||
assert_unique_members(kinds, error_msg=(
|
||||
"Multi_dep.py should have filtered down to one task per kind"))
|
||||
dependencies = {t.kind: copy.deepcopy(t) for t in combinations}
|
||||
yield dependencies
|
||||
|
||||
|
||||
@group_by('platform')
|
||||
def platform_grouping(config, tasks):
|
||||
only_platforms = config.get('only-for-build-platforms')
|
||||
not_platforms = config.get('not-for-build-platforms')
|
||||
|
||||
groups = {}
|
||||
for task in tasks:
|
||||
if task.kind not in config.get('kind-dependencies', []):
|
||||
continue
|
||||
platform = task.attributes.get('build_platform')
|
||||
build_type = task.attributes.get('build_type')
|
||||
product = task.attributes.get('shipping_product',
|
||||
task.task.get('shipping-product'))
|
||||
|
||||
# Skip only_ and not_ platforms that don't match
|
||||
if only_platforms or not_platforms:
|
||||
if not platform or not build_type:
|
||||
continue
|
||||
combined_platform = "{}/{}".format(platform, build_type)
|
||||
if only_platforms and combined_platform not in only_platforms:
|
||||
continue
|
||||
elif not_platforms and combined_platform in not_platforms:
|
||||
continue
|
||||
|
||||
groups.setdefault((platform, build_type, product), []).append(task)
|
||||
return groups
|
||||
|
||||
|
||||
def assert_unique_members(kinds, error_msg=None):
|
||||
if len(kinds) != len(set(kinds)):
|
||||
raise Exception(error_msg)
|
@ -219,8 +219,11 @@ def make_task_description(config, jobs):
|
||||
dependencies.update(repackage_dependencies)
|
||||
|
||||
# If this isn't a direct dependency, it won't be in there.
|
||||
if 'repackage-signing' not in dependencies:
|
||||
if 'repackage-signing' not in dependencies and \
|
||||
'repackage-signing-l10n' not in dependencies:
|
||||
repackage_signing_name = "repackage-signing"
|
||||
if job.get('locale'):
|
||||
repackage_signing_name = "repackage-signing-l10n"
|
||||
repackage_signing_deps = {"repackage-signing":
|
||||
dep_job.dependencies[repackage_signing_name]
|
||||
}
|
||||
|
@ -101,6 +101,7 @@ def make_task_description(config, jobs):
|
||||
if not build_platform:
|
||||
raise Exception("Cannot find build platform!")
|
||||
|
||||
label = dep_job.label.replace("repackage-signing-l10n", "beetmover-")
|
||||
label = dep_job.label.replace("repackage-signing-", "beetmover-")
|
||||
label = label.replace("repackage-", "beetmover-")
|
||||
label = label.replace("chunking-dummy-", "beetmover-")
|
||||
|
@ -97,8 +97,8 @@ l10n_description_schema = Schema({
|
||||
|
||||
Optional('run-on-projects'): job_description_schema['run-on-projects'],
|
||||
|
||||
# task object of the dependent task
|
||||
Required('dependent-task'): object,
|
||||
# dictionary of dependent task objects, keyed by kind.
|
||||
Required('dependent-tasks'): {basestring: object},
|
||||
|
||||
# worker-type to utilize
|
||||
Required('worker-type'): _by_platform(basestring),
|
||||
@ -197,7 +197,7 @@ def _remove_locales(locales, to_remove=None):
|
||||
@transforms.add
|
||||
def setup_name(config, jobs):
|
||||
for job in jobs:
|
||||
dep = job['dependent-task']
|
||||
dep = job['dependent-tasks']['build']
|
||||
# Set the name to the same as the dep task, without kind name.
|
||||
# Label will get set automatically with this kinds name.
|
||||
job['name'] = job.get('name',
|
||||
@ -208,7 +208,7 @@ def setup_name(config, jobs):
|
||||
@transforms.add
|
||||
def copy_in_useful_magic(config, jobs):
|
||||
for job in jobs:
|
||||
dep = job['dependent-task']
|
||||
dep = job['dependent-tasks']['build']
|
||||
attributes = copy_attributes_from_dependent_job(dep)
|
||||
attributes.update(job.get('attributes', {}))
|
||||
# build-platform is needed on `job` for by-build-platform
|
||||
@ -229,20 +229,19 @@ def validate_early(config, jobs):
|
||||
def setup_nightly_dependency(config, jobs):
|
||||
""" Sets up a task dependency to the signing job this relates to """
|
||||
for job in jobs:
|
||||
job['dependencies'] = {'unsigned-build': job['dependent-task'].label}
|
||||
job['dependencies'] = {'build': job['dependent-tasks']['build'].label}
|
||||
if job['attributes']['build_platform'].startswith('win') or \
|
||||
job['attributes']['build_platform'].startswith('linux'):
|
||||
# Weave these in and just assume they will be there in the resulting graph
|
||||
job['dependencies'].update({
|
||||
'signed-build': 'build-signing-{}'.format(job['name']),
|
||||
'build-signing': job['dependent-tasks']['build-signing'].label,
|
||||
})
|
||||
if job['attributes']['build_platform'].startswith('macosx'):
|
||||
job['dependencies'].update({
|
||||
'repackage': 'repackage-{}'.format(job['name'])
|
||||
'repackage': job['dependent-tasks']['repackage'].label
|
||||
})
|
||||
if job['attributes']['build_platform'].startswith('win'):
|
||||
job['dependencies'].update({
|
||||
'repackage-signed': 'repackage-signing-{}'.format(job['name'])
|
||||
'repackage-signing': job['dependent-tasks']['repackage-signing'].label
|
||||
})
|
||||
yield job
|
||||
|
||||
|
@ -81,6 +81,9 @@ def make_task_description(config, jobs):
|
||||
|
||||
signing_task = None
|
||||
for dependency in sorted(dependencies.keys()):
|
||||
if 'repackage-signing-l10n' in dependency:
|
||||
signing_task = dependency
|
||||
break
|
||||
if 'repackage-signing' in dependency:
|
||||
signing_task = dependency
|
||||
break
|
||||
|
@ -108,6 +108,8 @@ XPCOMUtils.defineLazyGetter(this, "console", ExtensionCommon.getConsole);
|
||||
|
||||
XPCOMUtils.defineLazyGetter(this, "LocaleData", () => ExtensionCommon.LocaleData);
|
||||
|
||||
const {sharedData} = Services.ppmm;
|
||||
|
||||
// The userContextID reserved for the extension storage (its purpose is ensuring that the IndexedDB
|
||||
// storage used by the browser.storage.local API is not directly accessible from the extension code).
|
||||
XPCOMUtils.defineLazyGetter(this, "WEBEXT_STORAGE_USER_CONTEXT_ID", () => {
|
||||
@ -1245,6 +1247,8 @@ class LangpackBootstrapScope {
|
||||
}
|
||||
}
|
||||
|
||||
let activeExtensionIDs = new Set();
|
||||
|
||||
/**
|
||||
* This class is the main representation of an active WebExtension
|
||||
* in the main process.
|
||||
@ -1254,6 +1258,8 @@ class Extension extends ExtensionData {
|
||||
constructor(addonData, startupReason) {
|
||||
super(addonData.resourceURI);
|
||||
|
||||
this.sharedDataKeys = new Set();
|
||||
|
||||
this.uuid = UUIDMap.get(addonData.id);
|
||||
this.instanceId = getUniqueId();
|
||||
|
||||
@ -1307,6 +1313,8 @@ class Extension extends ExtensionData {
|
||||
this._optionalOrigins = null;
|
||||
this.webAccessibleResources = null;
|
||||
|
||||
this.registeredContentScripts = new Map();
|
||||
|
||||
this.emitter = new EventEmitter();
|
||||
|
||||
/* eslint-disable mozilla/balanced-listeners */
|
||||
@ -1513,6 +1521,15 @@ class Extension extends ExtensionData {
|
||||
return manifest;
|
||||
}
|
||||
|
||||
get contentSecurityPolicy() {
|
||||
return this.manifest.content_security_policy;
|
||||
}
|
||||
|
||||
get backgroundScripts() {
|
||||
return (this.manifest.background &&
|
||||
this.manifest.background.scripts);
|
||||
}
|
||||
|
||||
// Representation of the extension to send to content
|
||||
// processes. This should include anything the content process might
|
||||
// need.
|
||||
@ -1521,20 +1538,24 @@ class Extension extends ExtensionData {
|
||||
id: this.id,
|
||||
uuid: this.uuid,
|
||||
name: this.name,
|
||||
contentSecurityPolicy: this.contentSecurityPolicy,
|
||||
instanceId: this.instanceId,
|
||||
manifest: this.manifest,
|
||||
resourceURL: this.resourceURL,
|
||||
baseURL: this.baseURI.spec,
|
||||
contentScripts: this.contentScripts,
|
||||
registeredContentScripts: new Map(),
|
||||
webAccessibleResources: this.webAccessibleResources.map(res => res.glob),
|
||||
whiteListedHosts: this.whiteListedHosts.patterns.map(pat => pat.pattern),
|
||||
localeData: this.localeData.serialize(),
|
||||
permissions: this.permissions,
|
||||
optionalPermissions: this.manifest.optional_permissions,
|
||||
};
|
||||
}
|
||||
|
||||
// Extended serialized data which is only needed in the extensions process,
|
||||
// and is never deserialized in web content processes.
|
||||
serializeExtended() {
|
||||
return {
|
||||
backgroundScripts: this.backgroundScripts,
|
||||
childModules: this.modules && this.modules.child,
|
||||
dependencies: this.dependencies,
|
||||
permissions: this.permissions,
|
||||
principal: this.principal,
|
||||
optionalPermissions: this.manifest.optional_permissions,
|
||||
schemaURLs: this.schemaURLs,
|
||||
};
|
||||
}
|
||||
@ -1577,6 +1598,30 @@ class Extension extends ExtensionData {
|
||||
});
|
||||
}
|
||||
|
||||
setSharedData(key, value) {
|
||||
key = `extension/${this.id}/${key}`;
|
||||
this.sharedDataKeys.add(key);
|
||||
|
||||
sharedData.set(key, value);
|
||||
}
|
||||
|
||||
getSharedData(key, value) {
|
||||
key = `extension/${this.id}/${key}`;
|
||||
return sharedData.get(key);
|
||||
}
|
||||
|
||||
initSharedData() {
|
||||
this.setSharedData("", this.serialize());
|
||||
this.setSharedData("extendedData", this.serializeExtended());
|
||||
this.setSharedData("locales", this.localeData.serialize());
|
||||
this.setSharedData("manifest", this.manifest);
|
||||
this.updateContentScripts();
|
||||
}
|
||||
|
||||
updateContentScripts() {
|
||||
this.setSharedData("contentScripts", this.registeredContentScripts);
|
||||
}
|
||||
|
||||
runManifest(manifest) {
|
||||
let promises = [];
|
||||
for (let directive in manifest) {
|
||||
@ -1587,22 +1632,11 @@ class Extension extends ExtensionData {
|
||||
}
|
||||
}
|
||||
|
||||
let data = Services.ppmm.initialProcessData;
|
||||
if (!data["Extension:Extensions"]) {
|
||||
data["Extension:Extensions"] = [];
|
||||
}
|
||||
activeExtensionIDs.add(this.id);
|
||||
sharedData.set("extensions/activeIDs", activeExtensionIDs);
|
||||
|
||||
let serial = this.serialize();
|
||||
|
||||
// Map of the programmatically registered content script definitions
|
||||
// (by string scriptId), used in ext-contentScripts.js to propagate
|
||||
// the registered content scripts to the child content processes
|
||||
// (e.g. when a new content process starts after a content process crash).
|
||||
this.registeredContentScripts = serial.registeredContentScripts;
|
||||
|
||||
data["Extension:Extensions"].push(serial);
|
||||
|
||||
return this.broadcast("Extension:Startup", serial).then(() => {
|
||||
Services.ppmm.sharedData.flush();
|
||||
return this.broadcast("Extension:Startup", this.id).then(() => {
|
||||
return Promise.all(promises);
|
||||
});
|
||||
}
|
||||
@ -1727,6 +1761,8 @@ class Extension extends ExtensionData {
|
||||
|
||||
GlobalManager.init(this);
|
||||
|
||||
this.initSharedData();
|
||||
|
||||
this.policy.active = false;
|
||||
this.policy = processScript.initExtension(this);
|
||||
this.policy.extension = this;
|
||||
@ -1798,8 +1834,12 @@ class Extension extends ExtensionData {
|
||||
StartupCache.clearAddonData(this.id);
|
||||
}
|
||||
|
||||
let data = Services.ppmm.initialProcessData;
|
||||
data["Extension:Extensions"] = data["Extension:Extensions"].filter(e => e.id !== this.id);
|
||||
activeExtensionIDs.delete(this.id);
|
||||
sharedData.set("extensions/activeIDs", activeExtensionIDs);
|
||||
|
||||
for (let key of this.sharedDataKeys) {
|
||||
sharedData.delete(key);
|
||||
}
|
||||
|
||||
Services.ppmm.removeMessageListener(this.MESSAGE_EMIT_EVENT, this);
|
||||
|
||||
|
@ -59,6 +59,8 @@ const {
|
||||
withHandlingUserInput,
|
||||
} = ExtensionCommon;
|
||||
|
||||
const {sharedData} = Services.cpmm;
|
||||
|
||||
const isContentProcess = Services.appinfo.processType == Services.appinfo.PROCESS_TYPE_CONTENT;
|
||||
|
||||
// Copy an API object from |source| into the scope |dest|.
|
||||
@ -602,9 +604,9 @@ class BrowserExtensionContent extends EventEmitter {
|
||||
this.uuid = data.uuid;
|
||||
this.instanceId = data.instanceId;
|
||||
|
||||
this.childModules = data.childModules;
|
||||
this.dependencies = data.dependencies;
|
||||
this.schemaURLs = data.schemaURLs;
|
||||
if (WebExtensionPolicy.isExtensionProcess) {
|
||||
Object.assign(this, this.getSharedData("extendedData"));
|
||||
}
|
||||
|
||||
this.MESSAGE_EMIT_EVENT = `Extension:EmitEvent:${this.instanceId}`;
|
||||
Services.cpmm.addMessageListener(this.MESSAGE_EMIT_EVENT, this);
|
||||
@ -616,7 +618,6 @@ class BrowserExtensionContent extends EventEmitter {
|
||||
this.webAccessibleResources = data.webAccessibleResources.map(res => new MatchGlob(res));
|
||||
this.permissions = data.permissions;
|
||||
this.optionalPermissions = data.optionalPermissions;
|
||||
this.principal = data.principal;
|
||||
|
||||
let restrictSchemes = !this.hasPermission("mozillaAddons");
|
||||
|
||||
@ -624,11 +625,14 @@ class BrowserExtensionContent extends EventEmitter {
|
||||
|
||||
this.apiManager = this.getAPIManager();
|
||||
|
||||
this.localeData = new LocaleData(data.localeData);
|
||||
this._manifest = null;
|
||||
this._localeData = null;
|
||||
|
||||
this.manifest = data.manifest;
|
||||
this.baseURL = data.baseURL;
|
||||
this.baseURI = Services.io.newURI(data.baseURL);
|
||||
this.baseURI = Services.io.newURI(`moz-extension://${this.uuid}/`);
|
||||
this.baseURL = this.baseURI.spec;
|
||||
|
||||
this.principal = Services.scriptSecurityManager.createCodebasePrincipal(
|
||||
this.baseURI, {});
|
||||
|
||||
// Only used in addon processes.
|
||||
this.views = new Set();
|
||||
@ -683,13 +687,33 @@ class BrowserExtensionContent extends EventEmitter {
|
||||
ExtensionManager.extensions.set(this.id, this);
|
||||
}
|
||||
|
||||
getSharedData(key, value) {
|
||||
return sharedData.get(`extension/${this.id}/${key}`);
|
||||
}
|
||||
|
||||
get localeData() {
|
||||
if (!this._localeData) {
|
||||
this._localeData = new LocaleData(this.getSharedData("locales"));
|
||||
}
|
||||
return this._localeData;
|
||||
}
|
||||
|
||||
get manifest() {
|
||||
if (!this._manifest) {
|
||||
this._manifest = this.getSharedData("manifest");
|
||||
}
|
||||
return this._manifest;
|
||||
}
|
||||
|
||||
getAPIManager() {
|
||||
let apiManagers = [ExtensionPageChild.apiManager];
|
||||
|
||||
for (let id of this.dependencies) {
|
||||
let extension = processScript.getExtensionChild(id);
|
||||
if (extension) {
|
||||
apiManagers.push(extension.experimentAPIManager);
|
||||
if (this.dependencies) {
|
||||
for (let id of this.dependencies) {
|
||||
let extension = processScript.getExtensionChild(id);
|
||||
if (extension) {
|
||||
apiManagers.push(extension.experimentAPIManager);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -140,7 +140,7 @@ let apiManager = new class extends SchemaAPIManager {
|
||||
|
||||
// Load order matters here. The base manifest defines types which are
|
||||
// extended by other schemas, so needs to be loaded first.
|
||||
return Schemas.load(BASE_SCHEMA, AppConstants.DEBUG).then(() => {
|
||||
return Schemas.load(BASE_SCHEMA).then(() => {
|
||||
let promises = [];
|
||||
for (let [/* name */, url] of XPCOMUtils.enumerateCategoryEntries(CATEGORY_EXTENSION_SCHEMAS)) {
|
||||
promises.push(Schemas.load(url));
|
||||
@ -151,7 +151,9 @@ let apiManager = new class extends SchemaAPIManager {
|
||||
for (let url of schemaURLs) {
|
||||
promises.push(Schemas.load(url));
|
||||
}
|
||||
return Promise.all(promises);
|
||||
return Promise.all(promises).then(() => {
|
||||
Schemas.updateSharedSchemas();
|
||||
});
|
||||
});
|
||||
})();
|
||||
|
||||
@ -749,19 +751,14 @@ class DevToolsExtensionPageContextParent extends ExtensionPageContextParent {
|
||||
ParentAPIManager = {
|
||||
proxyContexts: new Map(),
|
||||
|
||||
parentMessageManagers: new Set(),
|
||||
|
||||
init() {
|
||||
Services.obs.addObserver(this, "message-manager-close");
|
||||
Services.obs.addObserver(this, "ipc:content-created");
|
||||
|
||||
Services.mm.addMessageListener("API:CreateProxyContext", this);
|
||||
Services.mm.addMessageListener("API:CloseProxyContext", this, true);
|
||||
Services.mm.addMessageListener("API:Call", this);
|
||||
Services.mm.addMessageListener("API:AddListener", this);
|
||||
Services.mm.addMessageListener("API:RemoveListener", this);
|
||||
|
||||
this.schemaHook = this.schemaHook.bind(this);
|
||||
},
|
||||
|
||||
attachMessageManager(extension, processMessageManager) {
|
||||
@ -783,23 +780,6 @@ ParentAPIManager = {
|
||||
extension.parentMessageManager = null;
|
||||
}
|
||||
}
|
||||
|
||||
this.parentMessageManagers.delete(mm);
|
||||
} else if (topic === "ipc:content-created") {
|
||||
let mm = subject.QueryInterface(Ci.nsIInterfaceRequestor)
|
||||
.getInterface(Ci.nsIMessageSender);
|
||||
if (mm.remoteType === E10SUtils.EXTENSION_REMOTE_TYPE) {
|
||||
this.parentMessageManagers.add(mm);
|
||||
mm.sendAsyncMessage("Schema:Add", Schemas.schemaJSON);
|
||||
|
||||
Schemas.schemaHook = this.schemaHook;
|
||||
}
|
||||
}
|
||||
},
|
||||
|
||||
schemaHook(schemas) {
|
||||
for (let mm of this.parentMessageManagers) {
|
||||
mm.sendAsyncMessage("Schema:Add", schemas);
|
||||
}
|
||||
},
|
||||
|
||||
|
@ -32,6 +32,9 @@ XPCOMUtils.defineLazyGetter(this, "StartupCache", () => ExtensionParent.StartupC
|
||||
|
||||
var EXPORTED_SYMBOLS = ["SchemaRoot", "Schemas"];
|
||||
|
||||
const KEY_CONTENT_SCHEMAS = "extensions-framework/schemas/content";
|
||||
const KEY_PRIVILEGED_SCHEMAS = "extensions-framework/schemas/privileged";
|
||||
|
||||
const {DEBUG} = AppConstants;
|
||||
|
||||
const isParentProcess = Services.appinfo.processType === Services.appinfo.PROCESS_TYPE_DEFAULT;
|
||||
@ -3057,10 +3060,13 @@ this.Schemas = {
|
||||
// is useful for sending the JSON across processes.
|
||||
schemaJSON: new Map(),
|
||||
|
||||
// A separate map of schema JSON which should be available in all
|
||||
// content processes.
|
||||
|
||||
// A map of schema JSON which should be available in all content processes.
|
||||
contentSchemaJSON: new Map(),
|
||||
|
||||
// A map of schema JSON which should only be available to extension processes.
|
||||
privilegedSchemaJSON: new Map(),
|
||||
|
||||
_rootSchema: null,
|
||||
|
||||
get rootSchema() {
|
||||
@ -3085,35 +3091,20 @@ this.Schemas = {
|
||||
this.initialized = true;
|
||||
|
||||
if (Services.appinfo.processType == Services.appinfo.PROCESS_TYPE_CONTENT) {
|
||||
let data = Services.cpmm.initialProcessData;
|
||||
let schemas = data["Extension:Schemas"];
|
||||
if (schemas) {
|
||||
this.schemaJSON = schemas;
|
||||
let addSchemas = schemas => {
|
||||
for (let [key, value] of schemas.entries()) {
|
||||
this.schemaJSON.set(key, value);
|
||||
}
|
||||
};
|
||||
|
||||
if (WebExtensionPolicy.isExtensionProcess || DEBUG) {
|
||||
addSchemas(Services.cpmm.sharedData.get(KEY_PRIVILEGED_SCHEMAS));
|
||||
}
|
||||
|
||||
Services.cpmm.addMessageListener("Schema:Add", this);
|
||||
}
|
||||
},
|
||||
|
||||
receiveMessage(msg) {
|
||||
let {data} = msg;
|
||||
switch (msg.name) {
|
||||
case "Schema:Add":
|
||||
// If we're given a Map, the ordering of the initial items
|
||||
// matters, so swap with our current data to make sure its
|
||||
// entries appear first.
|
||||
if (typeof data.get === "function") {
|
||||
// Create a new Map so we're sure it's in the same compartment.
|
||||
[this.schemaJSON, data] = [new Map(data), this.schemaJSON];
|
||||
}
|
||||
|
||||
for (let [url, schema] of data) {
|
||||
this.schemaJSON.set(url, schema);
|
||||
}
|
||||
if (this._rootSchema) {
|
||||
throw new Error("Schema loaded after root schema populated");
|
||||
}
|
||||
break;
|
||||
let schemas = Services.cpmm.sharedData.get(KEY_CONTENT_SCHEMAS);
|
||||
if (schemas) {
|
||||
addSchemas(schemas);
|
||||
}
|
||||
}
|
||||
},
|
||||
|
||||
@ -3133,13 +3124,8 @@ this.Schemas = {
|
||||
|
||||
if (content) {
|
||||
this.contentSchemaJSON.set(url, schema);
|
||||
|
||||
let data = Services.ppmm.initialProcessData;
|
||||
data["Extension:Schemas"] = this.contentSchemaJSON;
|
||||
|
||||
Services.ppmm.broadcastAsyncMessage("Schema:Add", [[url, schema]]);
|
||||
} else if (this.schemaHook) {
|
||||
this.schemaHook([[url, schema]]);
|
||||
} else {
|
||||
this.privilegedSchemaJSON.set(url, schema);
|
||||
}
|
||||
|
||||
if (this._rootSchema) {
|
||||
@ -3147,6 +3133,13 @@ this.Schemas = {
|
||||
}
|
||||
},
|
||||
|
||||
updateSharedSchemas() {
|
||||
let {sharedData} = Services.ppmm;
|
||||
|
||||
sharedData.set(KEY_CONTENT_SCHEMAS, this.contentSchemaJSON);
|
||||
sharedData.set(KEY_PRIVILEGED_SCHEMAS, this.privilegedSchemaJSON);
|
||||
},
|
||||
|
||||
fetch(url) {
|
||||
return readJSONAndBlobbify(url);
|
||||
},
|
||||
|
@ -30,6 +30,12 @@ const {
|
||||
getInnerWindowID,
|
||||
} = ExtensionUtils;
|
||||
|
||||
const {sharedData} = Services.cpmm;
|
||||
|
||||
function getData(extension, key = "") {
|
||||
return sharedData.get(`extension/${extension.id}/${key}`);
|
||||
}
|
||||
|
||||
// We need to avoid touching Services.appinfo here in order to prevent
|
||||
// the wrong version from being cached during xpcshell test startup.
|
||||
// eslint-disable-next-line mozilla/use-services
|
||||
@ -301,20 +307,8 @@ ExtensionManager = {
|
||||
Services.cpmm.addMessageListener("Extension:RegisterContentScript", this);
|
||||
Services.cpmm.addMessageListener("Extension:UnregisterContentScripts", this);
|
||||
|
||||
let procData = Services.cpmm.initialProcessData || {};
|
||||
|
||||
for (let data of procData["Extension:Extensions"] || []) {
|
||||
this.initExtension(data);
|
||||
}
|
||||
|
||||
if (isContentProcess) {
|
||||
// Make sure we handle new schema data until Schemas.jsm is loaded.
|
||||
if (!procData["Extension:Schemas"]) {
|
||||
procData["Extension:Schemas"] = new Map();
|
||||
}
|
||||
this.schemaJSON = procData["Extension:Schemas"];
|
||||
|
||||
Services.cpmm.addMessageListener("Schema:Add", this);
|
||||
for (let id of sharedData.get("extensions/activeIDs") || []) {
|
||||
this.initExtension(getData({id}));
|
||||
}
|
||||
},
|
||||
|
||||
@ -336,6 +330,11 @@ ExtensionManager = {
|
||||
webAccessibleResources = extension.webAccessibleResources.map(host => new MatchGlob(host));
|
||||
}
|
||||
|
||||
let {backgroundScripts} = extension;
|
||||
if (!backgroundScripts && WebExtensionPolicy.isExtensionProcess) {
|
||||
({backgroundScripts} = getData(extension, "extendedData") || {});
|
||||
}
|
||||
|
||||
policy = new WebExtensionPolicy({
|
||||
id: extension.id,
|
||||
mozExtensionHostname: extension.uuid,
|
||||
@ -346,12 +345,11 @@ ExtensionManager = {
|
||||
allowedOrigins,
|
||||
webAccessibleResources,
|
||||
|
||||
contentSecurityPolicy: extension.manifest.content_security_policy,
|
||||
contentSecurityPolicy: extension.contentSecurityPolicy,
|
||||
|
||||
localizeCallback,
|
||||
|
||||
backgroundScripts: (extension.manifest.background &&
|
||||
extension.manifest.background.scripts),
|
||||
backgroundScripts,
|
||||
|
||||
contentScripts: extension.contentScripts.map(script => parseScriptOptions(script, restrictSchemes)),
|
||||
});
|
||||
@ -363,13 +361,11 @@ ExtensionManager = {
|
||||
// a content process that crashed and it has been recreated).
|
||||
const registeredContentScripts = this.registeredContentScripts.get(policy);
|
||||
|
||||
if (extension.registeredContentScripts) {
|
||||
for (let [scriptId, options] of extension.registeredContentScripts) {
|
||||
const parsedOptions = parseScriptOptions(options, restrictSchemes);
|
||||
const script = new WebExtensionContentScript(policy, parsedOptions);
|
||||
policy.registerContentScript(script);
|
||||
registeredContentScripts.set(scriptId, script);
|
||||
}
|
||||
for (let [scriptId, options] of getData(extension, "contentScripts") || []) {
|
||||
const parsedOptions = parseScriptOptions(options, restrictSchemes);
|
||||
const script = new WebExtensionContentScript(policy, parsedOptions);
|
||||
policy.registerContentScript(script);
|
||||
registeredContentScripts.set(scriptId, script);
|
||||
}
|
||||
|
||||
policy.active = true;
|
||||
@ -379,6 +375,9 @@ ExtensionManager = {
|
||||
},
|
||||
|
||||
initExtension(data) {
|
||||
if (typeof data === "string") {
|
||||
data = getData({id: data});
|
||||
}
|
||||
let policy = this.initExtensionPolicy(data);
|
||||
|
||||
DocumentManager.initExtension(policy);
|
||||
@ -415,23 +414,6 @@ ExtensionManager = {
|
||||
break;
|
||||
}
|
||||
|
||||
case "Schema:Add": {
|
||||
// If we're given a Map, the ordering of the initial items
|
||||
// matters, so swap with our current data to make sure its
|
||||
// entries appear first.
|
||||
if (typeof data.get === "function") {
|
||||
[this.schemaJSON, data] = [data, this.schemaJSON];
|
||||
|
||||
Services.cpmm.initialProcessData["Extension:Schemas"] =
|
||||
this.schemaJSON;
|
||||
}
|
||||
|
||||
for (let [url, schema] of data) {
|
||||
this.schemaJSON.set(url, schema);
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
||||
case "Extension:RegisterContentScript": {
|
||||
let policy = WebExtensionPolicy.getByID(data.id);
|
||||
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user