mirror of
https://github.com/mozilla/gecko-dev.git
synced 2025-02-27 04:38:02 +00:00
merge autoland to mozilla-central a=merge
This commit is contained in:
commit
eed5d11870
@ -1361,7 +1361,11 @@ pref("media.gmp.trial-create.enabled", true);
|
||||
|
||||
#ifdef MOZ_ADOBE_EME
|
||||
pref("media.gmp-eme-adobe.visible", true);
|
||||
pref("media.gmp-eme-adobe.enabled", true);
|
||||
// When Adobe EME is enabled in the build system, we don't actually enable
|
||||
// the plugin by default, so that it doesn't download and install by default.
|
||||
// When Adobe EME is first used, Firefox will prompt the user to enable it,
|
||||
// and then download the CDM.
|
||||
pref("media.gmp-eme-adobe.enabled", false);
|
||||
#endif
|
||||
|
||||
#ifdef MOZ_WIDEVINE_EME
|
||||
|
@ -345,13 +345,6 @@ var gFxAccounts = {
|
||||
},
|
||||
|
||||
openAccountsPage: function (action, urlParams={}) {
|
||||
// An entrypoint param is used for server-side metrics. If the current tab
|
||||
// is UITour, assume that it initiated the call to this method and override
|
||||
// the entrypoint accordingly.
|
||||
if (UITour.tourBrowsersByWindow.get(window) &&
|
||||
UITour.tourBrowsersByWindow.get(window).has(gBrowser.selectedBrowser)) {
|
||||
urlParams.entrypoint = "uitour";
|
||||
}
|
||||
let params = new URLSearchParams();
|
||||
if (action) {
|
||||
params.set("action", action);
|
||||
|
@ -290,11 +290,6 @@ var gSyncUI = {
|
||||
|
||||
openSetup: function SUI_openSetup(wizardType, entryPoint = "syncbutton") {
|
||||
if (this.weaveService.fxAccountsEnabled) {
|
||||
// If the user is also in an uitour, set the entrypoint to `uitour`
|
||||
if (UITour.tourBrowsersByWindow.get(window) &&
|
||||
UITour.tourBrowsersByWindow.get(window).has(gBrowser.selectedBrowser)) {
|
||||
entryPoint = "uitour";
|
||||
}
|
||||
this.openPrefs(entryPoint);
|
||||
} else {
|
||||
let win = Services.wm.getMostRecentWindow("Weave:AccountSetup");
|
||||
|
@ -119,9 +119,6 @@ function* asyncCleanup() {
|
||||
// When Sync is not setup.
|
||||
add_task(() => openPrefsFromMenuPanel("PanelUI-remotetabs-setupsync", "synced-tabs"));
|
||||
add_task(asyncCleanup);
|
||||
// Test that uitour is in progress, the entrypoint is `uitour` and not `menupanel`
|
||||
add_task(() => openPrefsFromMenuPanel("PanelUI-remotetabs-setupsync", "uitour"));
|
||||
add_task(asyncCleanup);
|
||||
|
||||
// When Sync is configured in a "needs reauthentication" state.
|
||||
add_task(function* () {
|
||||
|
@ -13,4 +13,5 @@ EXTRA_COMPONENTS += [
|
||||
DIRS += ['schemas']
|
||||
|
||||
BROWSER_CHROME_MANIFESTS += ['test/browser/browser.ini']
|
||||
MOCHITEST_MANIFESTS += ['test/mochitest/mochitest.ini']
|
||||
XPCSHELL_TESTS_MANIFESTS += ['test/xpcshell/xpcshell.ini']
|
||||
|
@ -82,6 +82,7 @@
|
||||
{
|
||||
"namespace": "commands",
|
||||
"description": "Use the commands API to add keyboard shortcuts that trigger actions in your extension, for example, an action to open the browser action or send a command to the xtension.",
|
||||
"permissions": ["manifest:commands"],
|
||||
"types": [
|
||||
{
|
||||
"id": "Command",
|
||||
|
@ -0,0 +1,6 @@
|
||||
[DEFAULT]
|
||||
support-files =
|
||||
../../../../../toolkit/components/extensions/test/mochitest/test_ext_all_apis.js
|
||||
tags = webextensions
|
||||
|
||||
[test_ext_all_apis.html]
|
@ -0,0 +1,75 @@
|
||||
<!DOCTYPE HTML>
|
||||
<html>
|
||||
<head>
|
||||
<title>WebExtension test</title>
|
||||
<meta charset="utf-8">
|
||||
<script type="text/javascript" src="/tests/SimpleTest/SimpleTest.js"></script>
|
||||
<script type="text/javascript" src="/tests/SimpleTest/SpawnTask.js"></script>
|
||||
<script type="text/javascript" src="/tests/SimpleTest/ExtensionTestUtils.js"></script>
|
||||
<link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css">
|
||||
</head>
|
||||
<body>
|
||||
<script>
|
||||
"use strict";
|
||||
/* exported expectedContentApisTargetSpecific, expectedBackgroundApisTargetSpecific */
|
||||
let expectedContentApisTargetSpecific = [
|
||||
];
|
||||
|
||||
let expectedBackgroundApisTargetSpecific = [
|
||||
"tabs.MutedInfoReason",
|
||||
"tabs.TAB_ID_NONE",
|
||||
"tabs.TabStatus",
|
||||
"tabs.WindowType",
|
||||
"tabs.ZoomSettingsMode",
|
||||
"tabs.ZoomSettingsScope",
|
||||
"tabs.connect",
|
||||
"tabs.create",
|
||||
"tabs.detectLanguage",
|
||||
"tabs.duplicate",
|
||||
"tabs.executeScript",
|
||||
"tabs.get",
|
||||
"tabs.getCurrent",
|
||||
"tabs.getZoom",
|
||||
"tabs.getZoomSettings",
|
||||
"tabs.highlight",
|
||||
"tabs.insertCSS",
|
||||
"tabs.move",
|
||||
"tabs.onActivated",
|
||||
"tabs.onAttached",
|
||||
"tabs.onCreated",
|
||||
"tabs.onDetached",
|
||||
"tabs.onHighlighted",
|
||||
"tabs.onMoved",
|
||||
"tabs.onRemoved",
|
||||
"tabs.onReplaced",
|
||||
"tabs.onUpdated",
|
||||
"tabs.onZoomChange",
|
||||
"tabs.query",
|
||||
"tabs.reload",
|
||||
"tabs.remove",
|
||||
"tabs.removeCSS",
|
||||
"tabs.sendMessage",
|
||||
"tabs.setZoom",
|
||||
"tabs.setZoomSettings",
|
||||
"tabs.update",
|
||||
"windows.CreateType",
|
||||
"windows.WINDOW_ID_CURRENT",
|
||||
"windows.WINDOW_ID_NONE",
|
||||
"windows.WindowState",
|
||||
"windows.WindowType",
|
||||
"windows.create",
|
||||
"windows.get",
|
||||
"windows.getAll",
|
||||
"windows.getCurrent",
|
||||
"windows.getLastFocused",
|
||||
"windows.onCreated",
|
||||
"windows.onFocusChanged",
|
||||
"windows.onRemoved",
|
||||
"windows.remove",
|
||||
"windows.update",
|
||||
];
|
||||
</script>
|
||||
<script src="test_ext_all_apis.js"></script>
|
||||
|
||||
</body>
|
||||
</html>
|
@ -353,6 +353,10 @@ this.UnsubmittedCrashHandler = {
|
||||
Services.prefs.getBranch("browser.crashReports.unsubmittedCheck.");
|
||||
},
|
||||
|
||||
get enabled() {
|
||||
return this.prefs.getBoolPref("enabled");
|
||||
},
|
||||
|
||||
// showingNotification is set to true once a notification
|
||||
// is successfully shown, and then set back to false if
|
||||
// the notification is dismissed by an action by the user.
|
||||
@ -371,9 +375,12 @@ this.UnsubmittedCrashHandler = {
|
||||
|
||||
this.initialized = true;
|
||||
|
||||
let shouldCheck = this.prefs.getBoolPref("enabled");
|
||||
|
||||
if (shouldCheck) {
|
||||
// UnsubmittedCrashHandler can be initialized but still be disabled.
|
||||
// This is intentional, as this makes simulating UnsubmittedCrashHandler's
|
||||
// reactions to browser startup and shutdown easier in test automation.
|
||||
//
|
||||
// UnsubmittedCrashHandler, when initialized but not enabled, is inert.
|
||||
if (this.enabled) {
|
||||
if (this.prefs.prefHasUserValue("suppressUntilDate")) {
|
||||
if (this.prefs.getCharPref("suppressUntilDate") > this.dateString()) {
|
||||
// We'll be suppressing any notifications until after suppressedDate,
|
||||
@ -400,6 +407,10 @@ this.UnsubmittedCrashHandler = {
|
||||
|
||||
this.initialized = false;
|
||||
|
||||
if (!this.enabled) {
|
||||
return;
|
||||
}
|
||||
|
||||
if (this.suppressed) {
|
||||
this.suppressed = false;
|
||||
// No need to do any more clean-up, since we were suppressed.
|
||||
|
@ -198,11 +198,20 @@ add_task(function* setup() {
|
||||
let oldServerURL = env.get("MOZ_CRASHREPORTER_URL");
|
||||
env.set("MOZ_CRASHREPORTER_URL", SERVER_URL);
|
||||
|
||||
// nsBrowserGlue starts up UnsubmittedCrashHandler automatically
|
||||
// so at this point, it is initialized. It's possible that it
|
||||
// was initialized, but is preffed off, so it's inert, so we
|
||||
// shut it down, make sure it's preffed on, and then restart it.
|
||||
// Note that making the component initialize even when it's
|
||||
// disabled is an intentional choice, as this allows for easier
|
||||
// simulation of startup and shutdown.
|
||||
UnsubmittedCrashHandler.uninit();
|
||||
yield SpecialPowers.pushPrefEnv({
|
||||
set: [
|
||||
["browser.crashReports.unsubmittedCheck.enabled", true],
|
||||
],
|
||||
});
|
||||
UnsubmittedCrashHandler.init();
|
||||
|
||||
registerCleanupFunction(function() {
|
||||
gNotificationBox = null;
|
||||
|
@ -33,7 +33,7 @@ buildscript {
|
||||
}
|
||||
|
||||
dependencies {
|
||||
classpath 'com.android.tools.build:gradle:2.1.0'
|
||||
classpath 'com.android.tools.build:gradle:2.1.3'
|
||||
classpath('com.stanfy.spoon:spoon-gradle-plugin:1.0.4') {
|
||||
// Without these, we get errors linting.
|
||||
exclude module: 'guava'
|
||||
|
@ -423,6 +423,16 @@ InspectorPanel.prototype = {
|
||||
return this._InspectorTabPanel;
|
||||
},
|
||||
|
||||
/**
|
||||
* Check if the inspector should use the landscape mode.
|
||||
*
|
||||
* @return {Boolean} true if the inspector should be in landscape mode.
|
||||
*/
|
||||
useLandscapeMode: function () {
|
||||
let { clientWidth } = this.panelDoc.getElementById("inspector-splitter-box");
|
||||
return clientWidth > PORTRAIT_MODE_WIDTH;
|
||||
},
|
||||
|
||||
/**
|
||||
* Build Splitter located between the main and side area of
|
||||
* the Inspector panel.
|
||||
@ -445,7 +455,8 @@ InspectorPanel.prototype = {
|
||||
}),
|
||||
endPanel: this.InspectorTabPanel({
|
||||
id: "inspector-sidebar-container"
|
||||
})
|
||||
}),
|
||||
vert: this.useLandscapeMode(),
|
||||
});
|
||||
|
||||
this._splitter = this.ReactDOM.render(splitter,
|
||||
@ -473,9 +484,8 @@ InspectorPanel.prototype = {
|
||||
* to `horizontal` to support portrait view.
|
||||
*/
|
||||
onPanelWindowResize: function () {
|
||||
let box = this.panelDoc.getElementById("inspector-splitter-box");
|
||||
this._splitter.setState({
|
||||
vert: (box.clientWidth > PORTRAIT_MODE_WIDTH)
|
||||
vert: this.useLandscapeMode(),
|
||||
});
|
||||
},
|
||||
|
||||
|
@ -134,6 +134,7 @@ subsuite = clipboard
|
||||
skip-if = os == "mac" # Full keyboard navigation on OSX only works if Full Keyboard Access setting is set to All Control in System Keyboard
|
||||
[browser_inspector_picker-stop-on-destroy.js]
|
||||
[browser_inspector_picker-stop-on-tool-change.js]
|
||||
[browser_inspector_portrait_mode.js]
|
||||
[browser_inspector_pseudoclass-lock.js]
|
||||
[browser_inspector_pseudoclass-menu.js]
|
||||
[browser_inspector_reload-01.js]
|
||||
|
@ -0,0 +1,79 @@
|
||||
/* This Source Code Form is subject to the terms of the Mozilla Public
|
||||
* License, v. 2.0. If a copy of the MPL was not distributed with this
|
||||
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
|
||||
|
||||
"use strict";
|
||||
|
||||
// Test that the inspector splitter is properly initialized in horizontal mode if the
|
||||
// inspector starts in portrait mode.
|
||||
|
||||
add_task(function* () {
|
||||
let { inspector, toolbox } = yield openInspectorForURL(
|
||||
"data:text/html;charset=utf-8,<h1>foo</h1><span>bar</span>", "window");
|
||||
|
||||
let hostWindow = toolbox._host._window;
|
||||
let originalWidth = hostWindow.outerWidth;
|
||||
let originalHeight = hostWindow.outerHeight;
|
||||
|
||||
let splitter = inspector.panelDoc.querySelector(".inspector-sidebar-splitter");
|
||||
|
||||
// If the inspector is not already in landscape mode.
|
||||
if (!splitter.classList.contains("vert")) {
|
||||
info("Resize toolbox window to force inspector to landscape mode");
|
||||
let onClassnameMutation = waitForClassMutation(splitter);
|
||||
hostWindow.resizeTo(800, 500);
|
||||
yield onClassnameMutation;
|
||||
|
||||
ok(splitter.classList.contains("vert"), "Splitter is in vertical mode");
|
||||
}
|
||||
|
||||
info("Resize toolbox window to force inspector to portrait mode");
|
||||
let onClassnameMutation = waitForClassMutation(splitter);
|
||||
hostWindow.resizeTo(500, 500);
|
||||
yield onClassnameMutation;
|
||||
|
||||
ok(splitter.classList.contains("horz"), "Splitter is in horizontal mode");
|
||||
|
||||
info("Close the inspector");
|
||||
let target = TargetFactory.forTab(gBrowser.selectedTab);
|
||||
yield gDevTools.closeToolbox(target);
|
||||
|
||||
info("Reopen inspector");
|
||||
({ inspector, toolbox } = yield openInspector("window"));
|
||||
|
||||
// Devtools window should still be 500px * 500px, inspector should still be in portrait.
|
||||
splitter = inspector.panelDoc.querySelector(".inspector-sidebar-splitter");
|
||||
ok(splitter.classList.contains("horz"), "Splitter is in horizontal mode");
|
||||
|
||||
info("Restore original window size");
|
||||
toolbox._host._window.resizeTo(originalWidth, originalHeight);
|
||||
});
|
||||
|
||||
/**
|
||||
* Helper waiting for a class attribute mutation on the provided target. Returns a
|
||||
* promise.
|
||||
*
|
||||
* @param {Node} target
|
||||
* Node to observe
|
||||
* @return {Promise} promise that will resolve upon receiving a mutation for the class
|
||||
* attribute on the target.
|
||||
*/
|
||||
function waitForClassMutation(target) {
|
||||
return new Promise(resolve => {
|
||||
let observer = new MutationObserver((mutations) => {
|
||||
for (let mutation of mutations) {
|
||||
if (mutation.attributeName === "class") {
|
||||
observer.disconnect();
|
||||
resolve();
|
||||
return;
|
||||
}
|
||||
}
|
||||
});
|
||||
observer.observe(target, { attributes: true });
|
||||
});
|
||||
}
|
||||
|
||||
registerCleanupFunction(function () {
|
||||
// Restore the host type for other tests.
|
||||
Services.prefs.clearUserPref("devtools.toolbox.host");
|
||||
});
|
@ -454,6 +454,25 @@ const TEST_DATA = [
|
||||
index: 0, enabled: false},
|
||||
expected: "/*! content: '*\\/'; */"
|
||||
},
|
||||
|
||||
{
|
||||
desc: "delete disabled property",
|
||||
input: "\n a:b;\n /* color:#f0c; */\n e:f;",
|
||||
instruction: {type: "remove", name: "color", index: 1},
|
||||
expected: "\n a:b;\n e:f;",
|
||||
},
|
||||
{
|
||||
desc: "delete heuristic-disabled property",
|
||||
input: "\n a:b;\n /*! c:d; */\n e:f;",
|
||||
instruction: {type: "remove", name: "c", index: 1},
|
||||
expected: "\n a:b;\n e:f;",
|
||||
},
|
||||
{
|
||||
desc: "delete disabled property leaving other disabled property",
|
||||
input: "\n a:b;\n /* color:#f0c; background-color: seagreen; */\n e:f;",
|
||||
instruction: {type: "remove", name: "color", index: 1},
|
||||
expected: "\n a:b;\n /* background-color: seagreen; */\n e:f;",
|
||||
},
|
||||
];
|
||||
|
||||
function rewriteDeclarations(inputString, instruction, defaultIndentation) {
|
||||
|
@ -197,7 +197,8 @@ iframe {
|
||||
|
||||
#markup-box {
|
||||
width: 100%;
|
||||
flex: 1 1 auto;
|
||||
flex: 1;
|
||||
min-height: 0;
|
||||
}
|
||||
|
||||
#markup-box > iframe {
|
||||
|
@ -3298,6 +3298,9 @@ WebConsoleConnectionProxy.prototype = {
|
||||
messages.sort((a, b) => a.timeStamp - b.timeStamp);
|
||||
|
||||
if (this.webConsoleFrame.NEW_CONSOLE_OUTPUT_ENABLED) {
|
||||
// Filter out CSS page errors.
|
||||
messages = messages.filter(message => !(message._type == "PageError"
|
||||
&& Utils.categoryForScriptError(message) === CATEGORY_CSS));
|
||||
for (let packet of messages) {
|
||||
this.dispatchMessageAdd(packet);
|
||||
}
|
||||
|
@ -486,18 +486,12 @@ function parseDeclarations(isCssPropertyKnown, inputString,
|
||||
*/
|
||||
function RuleRewriter(isCssPropertyKnown, rule, inputString) {
|
||||
this.rule = rule;
|
||||
this.inputString = inputString;
|
||||
// Whether there are any newlines in the input text.
|
||||
this.hasNewLine = /[\r\n]/.test(this.inputString);
|
||||
this.isCssPropertyKnown = isCssPropertyKnown;
|
||||
|
||||
// Keep track of which any declarations we had to rewrite while
|
||||
// performing the requested action.
|
||||
this.changedDeclarations = {};
|
||||
// The declarations.
|
||||
this.declarations = parseDeclarations(isCssPropertyKnown, this.inputString,
|
||||
true);
|
||||
|
||||
this.decl = null;
|
||||
this.result = null;
|
||||
// If not null, a promise that must be wait upon before |apply| can
|
||||
// do its work.
|
||||
this.editPromise = null;
|
||||
@ -507,9 +501,28 @@ function RuleRewriter(isCssPropertyKnown, rule, inputString) {
|
||||
// indentation based on the style sheet's text. This override
|
||||
// facility is for testing.
|
||||
this.defaultIndentation = null;
|
||||
|
||||
this.startInitialization(inputString);
|
||||
}
|
||||
|
||||
RuleRewriter.prototype = {
|
||||
/**
|
||||
* An internal function to initialize the rewriter with a given
|
||||
* input string.
|
||||
*
|
||||
* @param {String} inputString the input to use
|
||||
*/
|
||||
startInitialization: function (inputString) {
|
||||
this.inputString = inputString;
|
||||
// Whether there are any newlines in the input text.
|
||||
this.hasNewLine = /[\r\n]/.test(this.inputString);
|
||||
// The declarations.
|
||||
this.declarations = parseDeclarations(this.isCssPropertyKnown, this.inputString,
|
||||
true);
|
||||
this.decl = null;
|
||||
this.result = null;
|
||||
},
|
||||
|
||||
/**
|
||||
* An internal function to complete initialization and set some
|
||||
* properties for further processing.
|
||||
@ -924,6 +937,17 @@ RuleRewriter.prototype = {
|
||||
return;
|
||||
}
|
||||
|
||||
// If the property is disabled, then first enable it, and then
|
||||
// delete it. We take this approach because we want to remove the
|
||||
// entire comment if possible; but the logic for dealing with
|
||||
// comments is hairy and already implemented in
|
||||
// setPropertyEnabled.
|
||||
if (this.decl.commentOffsets) {
|
||||
this.setPropertyEnabled(index, name, true);
|
||||
this.startInitialization(this.result);
|
||||
this.completeInitialization(index);
|
||||
}
|
||||
|
||||
let copyOffset = this.decl.offsets[1];
|
||||
// Maybe removing this rule left us with a completely blank
|
||||
// line. In this case, we'll delete the whole thing. We only
|
||||
|
@ -6,6 +6,7 @@
|
||||
|
||||
#include "AnimationUtils.h"
|
||||
|
||||
#include "nsContentUtils.h" // For nsContentUtils::IsCallerChrome
|
||||
#include "nsDebug.h"
|
||||
#include "nsIAtom.h"
|
||||
#include "nsIContent.h"
|
||||
@ -63,7 +64,7 @@ AnimationUtils::IsOffscreenThrottlingEnabled()
|
||||
}
|
||||
|
||||
/* static */ bool
|
||||
AnimationUtils::IsCoreAPIEnabled()
|
||||
AnimationUtils::IsCoreAPIEnabledForCaller()
|
||||
{
|
||||
static bool sCoreAPIEnabled;
|
||||
static bool sPrefCached = false;
|
||||
@ -74,7 +75,7 @@ AnimationUtils::IsCoreAPIEnabled()
|
||||
"dom.animations-api.core.enabled");
|
||||
}
|
||||
|
||||
return sCoreAPIEnabled;
|
||||
return sCoreAPIEnabled || nsContentUtils::IsCallerChrome();
|
||||
}
|
||||
|
||||
} // namespace mozilla
|
||||
|
@ -63,10 +63,10 @@ public:
|
||||
|
||||
/**
|
||||
* Returns true if the preference to enable the core Web Animations API is
|
||||
* true.
|
||||
* true or the caller is chrome.
|
||||
*/
|
||||
static bool
|
||||
IsCoreAPIEnabled();
|
||||
IsCoreAPIEnabledForCaller();
|
||||
};
|
||||
|
||||
} // namespace mozilla
|
||||
|
@ -132,7 +132,7 @@ KeyframeEffect::SetIterationComposite(
|
||||
{
|
||||
// Ignore iterationComposite if the Web Animations API is not enabled,
|
||||
// then the default value 'Replace' will be used.
|
||||
if (!AnimationUtils::IsCoreAPIEnabled()) {
|
||||
if (!AnimationUtils::IsCoreAPIEnabledForCaller()) {
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -115,7 +115,7 @@ KeyframeEffectParams::ParseSpacing(const nsAString& aSpacing,
|
||||
|
||||
// Ignore spacing if the core API is not enabled since it is not yet ready to
|
||||
// ship.
|
||||
if (!AnimationUtils::IsCoreAPIEnabled()) {
|
||||
if (!AnimationUtils::IsCoreAPIEnabledForCaller()) {
|
||||
aSpacingMode = SpacingMode::distribute;
|
||||
return;
|
||||
}
|
||||
|
@ -512,7 +512,7 @@ KeyframeEffectParamsFromUnion(const OptionsType& aOptions,
|
||||
aRv);
|
||||
// Ignore iterationComposite if the Web Animations API is not enabled,
|
||||
// then the default value 'Replace' will be used.
|
||||
if (AnimationUtils::IsCoreAPIEnabled()) {
|
||||
if (AnimationUtils::IsCoreAPIEnabledForCaller()) {
|
||||
result.mIterationComposite = options.mIterationComposite;
|
||||
}
|
||||
}
|
||||
|
13
dom/base/crashtests/1304437.html
Normal file
13
dom/base/crashtests/1304437.html
Normal file
@ -0,0 +1,13 @@
|
||||
<!DOCTYPE html>
|
||||
<html>
|
||||
<script>
|
||||
window.onload=function(){
|
||||
var e=document.createElement("q");
|
||||
document.documentElement.appendChild(e);
|
||||
e.style="mask-image:url(data:image/gif;base64,R0lGODlhAQABAIABAP///wAAACwAAAAAAQABAAACAkQBADs=),url(data:image/gif;base64,R0lGODlhAQABAAAAACH5BAEKAAEALAAAAAABAAEAAAICTAEAOw==)";
|
||||
setTimeout(function(){
|
||||
e.style="mask-image:url(data:image/gif;base64,R0lGODlhAQABAIABAP///wAAACwAAAAAAQABAAACAkQBADs=)";
|
||||
},0);
|
||||
};
|
||||
</script>
|
||||
</html>
|
@ -207,3 +207,4 @@ load xhr_empty_datauri.html
|
||||
load xhr_html_nullresponse.html
|
||||
load 1230422.html
|
||||
load 1251361.html
|
||||
load 1304437.html
|
||||
|
@ -525,7 +525,6 @@ MediaDecoder::MediaDecoder(MediaDecoderOwner* aOwner)
|
||||
, INIT_MIRROR(mPlaybackPosition, 0)
|
||||
, INIT_MIRROR(mIsAudioDataAudible, false)
|
||||
, INIT_CANONICAL(mVolume, 0.0)
|
||||
, INIT_CANONICAL(mPlaybackRate, 1.0)
|
||||
, INIT_CANONICAL(mPreservesPitch, true)
|
||||
, INIT_CANONICAL(mEstimatedDuration, NullableTimeUnit())
|
||||
, INIT_CANONICAL(mExplicitDuration, Maybe<double>())
|
||||
@ -744,6 +743,9 @@ MediaDecoder::SetStateMachineParameters()
|
||||
if (mMinimizePreroll) {
|
||||
mDecoderStateMachine->DispatchMinimizePrerollUntilPlaybackStarts();
|
||||
}
|
||||
if (mPlaybackRate != 1 && mPlaybackRate != 0) {
|
||||
mDecoderStateMachine->DispatchSetPlaybackRate(mPlaybackRate);
|
||||
}
|
||||
mTimedMetadataListener = mDecoderStateMachine->TimedMetadataEvent().Connect(
|
||||
AbstractThread::MainThread(), this, &MediaDecoder::OnMetadataUpdate);
|
||||
mMetadataLoadedListener = mDecoderStateMachine->MetadataLoadedEvent().Connect(
|
||||
@ -1510,7 +1512,10 @@ MediaDecoder::SetPlaybackRate(double aPlaybackRate)
|
||||
if (mPlaybackRate == 0.0) {
|
||||
mPausedForPlaybackRateNull = true;
|
||||
Pause();
|
||||
} else if (mPausedForPlaybackRateNull) {
|
||||
return;
|
||||
}
|
||||
|
||||
if (mPausedForPlaybackRateNull) {
|
||||
// Play() uses mPausedForPlaybackRateNull value, so must reset it first
|
||||
mPausedForPlaybackRateNull = false;
|
||||
// If the playbackRate is no longer null, restart the playback, iff the
|
||||
@ -1519,6 +1524,10 @@ MediaDecoder::SetPlaybackRate(double aPlaybackRate)
|
||||
Play();
|
||||
}
|
||||
}
|
||||
|
||||
if (mDecoderStateMachine) {
|
||||
mDecoderStateMachine->DispatchSetPlaybackRate(aPlaybackRate);
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
|
@ -764,7 +764,7 @@ protected:
|
||||
Canonical<double> mVolume;
|
||||
|
||||
// PlaybackRate and pitch preservation status we should start at.
|
||||
Canonical<double> mPlaybackRate;
|
||||
double mPlaybackRate = 1;
|
||||
|
||||
Canonical<bool> mPreservesPitch;
|
||||
|
||||
@ -827,9 +827,6 @@ public:
|
||||
AbstractCanonical<double>* CanonicalVolume() {
|
||||
return &mVolume;
|
||||
}
|
||||
AbstractCanonical<double>* CanonicalPlaybackRate() {
|
||||
return &mPlaybackRate;
|
||||
}
|
||||
AbstractCanonical<bool>* CanonicalPreservesPitch() {
|
||||
return &mPreservesPitch;
|
||||
}
|
||||
|
@ -745,7 +745,6 @@ MediaDecoderStateMachine::MediaDecoderStateMachine(MediaDecoder* aDecoder,
|
||||
INIT_MIRROR(mPlayState, MediaDecoder::PLAY_STATE_LOADING),
|
||||
INIT_MIRROR(mNextPlayState, MediaDecoder::PLAY_STATE_PAUSED),
|
||||
INIT_MIRROR(mVolume, 1.0),
|
||||
INIT_MIRROR(mLogicalPlaybackRate, 1.0),
|
||||
INIT_MIRROR(mPreservesPitch, true),
|
||||
INIT_MIRROR(mSameOriginMedia, false),
|
||||
INIT_MIRROR(mMediaPrincipalHandle, PRINCIPAL_HANDLE_NONE),
|
||||
@ -807,7 +806,6 @@ MediaDecoderStateMachine::InitializationTask(MediaDecoder* aDecoder)
|
||||
mPlayState.Connect(aDecoder->CanonicalPlayState());
|
||||
mNextPlayState.Connect(aDecoder->CanonicalNextPlayState());
|
||||
mVolume.Connect(aDecoder->CanonicalVolume());
|
||||
mLogicalPlaybackRate.Connect(aDecoder->CanonicalPlaybackRate());
|
||||
mPreservesPitch.Connect(aDecoder->CanonicalPreservesPitch());
|
||||
mSameOriginMedia.Connect(aDecoder->CanonicalSameOriginMedia());
|
||||
mMediaPrincipalHandle.Connect(aDecoder->CanonicalMediaPrincipalHandle());
|
||||
@ -824,7 +822,6 @@ MediaDecoderStateMachine::InitializationTask(MediaDecoder* aDecoder)
|
||||
mWatchManager.Watch(mAudioCompleted, &MediaDecoderStateMachine::UpdateNextFrameStatus);
|
||||
mWatchManager.Watch(mVideoCompleted, &MediaDecoderStateMachine::UpdateNextFrameStatus);
|
||||
mWatchManager.Watch(mVolume, &MediaDecoderStateMachine::VolumeChanged);
|
||||
mWatchManager.Watch(mLogicalPlaybackRate, &MediaDecoderStateMachine::LogicalPlaybackRateChanged);
|
||||
mWatchManager.Watch(mPreservesPitch, &MediaDecoderStateMachine::PreservesPitchChanged);
|
||||
mWatchManager.Watch(mEstimatedDuration, &MediaDecoderStateMachine::RecomputeDuration);
|
||||
mWatchManager.Watch(mExplicitDuration, &MediaDecoderStateMachine::RecomputeDuration);
|
||||
@ -1494,8 +1491,7 @@ MediaDecoderStateMachine::MaybeStartBuffering()
|
||||
|
||||
bool shouldBuffer;
|
||||
if (mReader->UseBufferingHeuristics()) {
|
||||
shouldBuffer = HasLowDecodedData(EXHAUSTED_DATA_MARGIN_USECS) &&
|
||||
HasLowBufferedData();
|
||||
shouldBuffer = HasLowDecodedData() && HasLowBufferedData();
|
||||
} else {
|
||||
MOZ_ASSERT(mReader->IsWaitForDataSupported());
|
||||
shouldBuffer = (OutOfDecodedAudio() && mReader->IsWaitingAudioData()) ||
|
||||
@ -1710,7 +1706,6 @@ MediaDecoderStateMachine::Shutdown()
|
||||
mPlayState.DisconnectIfConnected();
|
||||
mNextPlayState.DisconnectIfConnected();
|
||||
mVolume.DisconnectIfConnected();
|
||||
mLogicalPlaybackRate.DisconnectIfConnected();
|
||||
mPreservesPitch.DisconnectIfConnected();
|
||||
mSameOriginMedia.DisconnectIfConnected();
|
||||
mMediaPrincipalHandle.DisconnectIfConnected();
|
||||
@ -2318,17 +2313,28 @@ MediaDecoderStateMachine::StartMediaSink()
|
||||
}
|
||||
}
|
||||
|
||||
bool MediaDecoderStateMachine::HasLowDecodedData(int64_t aAudioUsecs)
|
||||
bool
|
||||
MediaDecoderStateMachine::HasLowDecodedAudio()
|
||||
{
|
||||
MOZ_ASSERT(OnTaskQueue());
|
||||
return IsAudioDecoding() &&
|
||||
GetDecodedAudioDuration() < EXHAUSTED_DATA_MARGIN_USECS * mPlaybackRate;
|
||||
}
|
||||
|
||||
bool
|
||||
MediaDecoderStateMachine::HasLowDecodedVideo()
|
||||
{
|
||||
MOZ_ASSERT(OnTaskQueue());
|
||||
return IsVideoDecoding() &&
|
||||
VideoQueue().GetSize() < LOW_VIDEO_FRAMES * mPlaybackRate;
|
||||
}
|
||||
|
||||
bool
|
||||
MediaDecoderStateMachine::HasLowDecodedData()
|
||||
{
|
||||
MOZ_ASSERT(OnTaskQueue());
|
||||
MOZ_ASSERT(mReader->UseBufferingHeuristics());
|
||||
// We consider ourselves low on decoded data if we're low on audio,
|
||||
// provided we've not decoded to the end of the audio stream, or
|
||||
// if we're low on video frames, provided
|
||||
// we've not decoded to the end of the video stream.
|
||||
return ((IsAudioDecoding() && GetDecodedAudioDuration() < aAudioUsecs) ||
|
||||
(IsVideoDecoding() &&
|
||||
static_cast<uint32_t>(VideoQueue().GetSize()) < LOW_VIDEO_FRAMES));
|
||||
return HasLowDecodedAudio() || HasLowDecodedVideo();
|
||||
}
|
||||
|
||||
bool MediaDecoderStateMachine::OutOfDecodedAudio()
|
||||
@ -2782,16 +2788,12 @@ bool MediaDecoderStateMachine::IsStateMachineScheduled() const
|
||||
}
|
||||
|
||||
void
|
||||
MediaDecoderStateMachine::LogicalPlaybackRateChanged()
|
||||
MediaDecoderStateMachine::SetPlaybackRate(double aPlaybackRate)
|
||||
{
|
||||
MOZ_ASSERT(OnTaskQueue());
|
||||
MOZ_ASSERT(aPlaybackRate != 0, "Should be handled by MediaDecoder::Pause()");
|
||||
|
||||
if (mLogicalPlaybackRate == 0) {
|
||||
// This case is handled in MediaDecoder by pausing playback.
|
||||
return;
|
||||
}
|
||||
|
||||
mPlaybackRate = mLogicalPlaybackRate;
|
||||
mPlaybackRate = aPlaybackRate;
|
||||
mMediaSink->SetPlaybackRate(mPlaybackRate);
|
||||
|
||||
if (mIsAudioPrerolling && DonePrerollingAudio()) {
|
||||
|
@ -174,6 +174,12 @@ public:
|
||||
// Seeks to the decoder to aTarget asynchronously.
|
||||
RefPtr<MediaDecoder::SeekPromise> InvokeSeek(SeekTarget aTarget);
|
||||
|
||||
void DispatchSetPlaybackRate(double aPlaybackRate)
|
||||
{
|
||||
OwnerThread()->DispatchStateChange(NewRunnableMethod<double>(
|
||||
this, &MediaDecoderStateMachine::SetPlaybackRate, aPlaybackRate));
|
||||
}
|
||||
|
||||
// Set/Unset dormant state.
|
||||
void DispatchSetDormant(bool aDormant);
|
||||
|
||||
@ -367,7 +373,7 @@ protected:
|
||||
void AudioAudibleChanged(bool aAudible);
|
||||
|
||||
void VolumeChanged();
|
||||
void LogicalPlaybackRateChanged();
|
||||
void SetPlaybackRate(double aPlaybackRate);
|
||||
void PreservesPitchChanged();
|
||||
|
||||
MediaQueue<MediaData>& AudioQueue() { return mAudioQueue; }
|
||||
@ -381,11 +387,13 @@ protected:
|
||||
// decode more.
|
||||
bool NeedToDecodeVideo();
|
||||
|
||||
// Returns true if we've got less than aAudioUsecs microseconds of decoded
|
||||
// and playable data. The decoder monitor must be held.
|
||||
//
|
||||
// True if we are low in decoded audio/video data.
|
||||
// May not be invoked when mReader->UseBufferingHeuristics() is false.
|
||||
bool HasLowDecodedData(int64_t aAudioUsecs);
|
||||
bool HasLowDecodedData();
|
||||
|
||||
bool HasLowDecodedAudio();
|
||||
|
||||
bool HasLowDecodedVideo();
|
||||
|
||||
bool OutOfDecodedAudio();
|
||||
|
||||
@ -886,11 +894,6 @@ private:
|
||||
// Volume of playback. 0.0 = muted. 1.0 = full volume.
|
||||
Mirror<double> mVolume;
|
||||
|
||||
// TODO: The separation between mPlaybackRate and mLogicalPlaybackRate is a
|
||||
// kludge to preserve existing fragile logic while converting this setup to
|
||||
// state-mirroring. Some hero should clean this up.
|
||||
Mirror<double> mLogicalPlaybackRate;
|
||||
|
||||
// Pitch preservation for the playback rate.
|
||||
Mirror<bool> mPreservesPitch;
|
||||
|
||||
|
@ -30,8 +30,9 @@ TextTrackList::TextTrackList(nsPIDOMWindowInner* aOwnerWindow)
|
||||
|
||||
TextTrackList::TextTrackList(nsPIDOMWindowInner* aOwnerWindow,
|
||||
TextTrackManager* aTextTrackManager)
|
||||
: DOMEventTargetHelper(aOwnerWindow)
|
||||
, mTextTrackManager(aTextTrackManager)
|
||||
: DOMEventTargetHelper(aOwnerWindow)
|
||||
, mPendingTextTrackChange(false)
|
||||
, mTextTrackManager(aTextTrackManager)
|
||||
{
|
||||
}
|
||||
|
||||
@ -131,7 +132,7 @@ TextTrackList::DidSeek()
|
||||
}
|
||||
}
|
||||
|
||||
class TrackEventRunner final: public Runnable
|
||||
class TrackEventRunner : public Runnable
|
||||
{
|
||||
public:
|
||||
TrackEventRunner(TextTrackList* aList, nsIDOMEvent* aEvent)
|
||||
@ -144,11 +145,25 @@ public:
|
||||
return mList->DispatchTrackEvent(mEvent);
|
||||
}
|
||||
|
||||
private:
|
||||
RefPtr<TextTrackList> mList;
|
||||
private:
|
||||
RefPtr<nsIDOMEvent> mEvent;
|
||||
};
|
||||
|
||||
class ChangeEventRunner final : public TrackEventRunner
|
||||
{
|
||||
public:
|
||||
ChangeEventRunner(TextTrackList* aList, nsIDOMEvent* aEvent)
|
||||
: TrackEventRunner(aList, aEvent)
|
||||
{}
|
||||
|
||||
NS_IMETHOD Run() override
|
||||
{
|
||||
mList->mPendingTextTrackChange = false;
|
||||
return TrackEventRunner::Run();
|
||||
}
|
||||
};
|
||||
|
||||
nsresult
|
||||
TextTrackList::DispatchTrackEvent(nsIDOMEvent* aEvent)
|
||||
{
|
||||
@ -158,13 +173,17 @@ TextTrackList::DispatchTrackEvent(nsIDOMEvent* aEvent)
|
||||
void
|
||||
TextTrackList::CreateAndDispatchChangeEvent()
|
||||
{
|
||||
RefPtr<Event> event = NS_NewDOMEvent(this, nullptr, nullptr);
|
||||
MOZ_ASSERT(NS_IsMainThread());
|
||||
if (!mPendingTextTrackChange) {
|
||||
mPendingTextTrackChange = true;
|
||||
RefPtr<Event> event = NS_NewDOMEvent(this, nullptr, nullptr);
|
||||
|
||||
event->InitEvent(NS_LITERAL_STRING("change"), false, false);
|
||||
event->SetTrusted(true);
|
||||
event->InitEvent(NS_LITERAL_STRING("change"), false, false);
|
||||
event->SetTrusted(true);
|
||||
|
||||
nsCOMPtr<nsIRunnable> eventRunner = new TrackEventRunner(this, event);
|
||||
NS_DispatchToMainThread(eventRunner);
|
||||
nsCOMPtr<nsIRunnable> eventRunner = new ChangeEventRunner(this, event);
|
||||
NS_DispatchToMainThread(eventRunner);
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
|
@ -67,6 +67,8 @@ public:
|
||||
IMPL_EVENT_HANDLER(addtrack)
|
||||
IMPL_EVENT_HANDLER(removetrack)
|
||||
|
||||
bool mPendingTextTrackChange;
|
||||
|
||||
private:
|
||||
~TextTrackList();
|
||||
|
||||
|
@ -61,10 +61,15 @@ class VideoPuppeteer(object):
|
||||
'var video = arguments[0];'
|
||||
'var currentTime = video.wrappedJSObject.currentTime;'
|
||||
'var duration = video.wrappedJSObject.duration;'
|
||||
'var buffered = video.wrappedJSObject.buffered;'
|
||||
'var bufferedRanges = [];'
|
||||
'for (var i = 0; i < buffered.length; i++) {'
|
||||
'bufferedRanges.push([buffered.start(i), buffered.end(i)]);'
|
||||
'}'
|
||||
'var played = video.wrappedJSObject.played;'
|
||||
'var timeRanges = [];'
|
||||
'var playedRanges = [];'
|
||||
'for (var i = 0; i < played.length; i++) {'
|
||||
'timeRanges.push([played.start(i), played.end(i)]);'
|
||||
'playedRanges.push([played.start(i), played.end(i)]);'
|
||||
'}'
|
||||
'var totalFrames = '
|
||||
'video.getVideoPlaybackQuality()["totalVideoFrames"];'
|
||||
@ -248,6 +253,11 @@ class VideoPuppeteer(object):
|
||||
|
||||
current_time: The current time of the wrapped element.
|
||||
duration: the duration of the wrapped element.
|
||||
buffered: the buffered ranges of the wrapped element. In its raw form
|
||||
this is as a list where the first element is the length and the second
|
||||
element is a list of 2 item lists, where each two items are a buffered
|
||||
range. Once assigned to the tuple this data should be wrapped in the
|
||||
TimeRanges class.
|
||||
played: the played ranges of the wrapped element. In its raw form this
|
||||
is as a list where the first element is the length and the second
|
||||
element is a list of 2 item lists, where each two items are a played
|
||||
@ -267,6 +277,7 @@ class VideoPuppeteer(object):
|
||||
['current_time',
|
||||
'duration',
|
||||
'remaining_time',
|
||||
'buffered',
|
||||
'played',
|
||||
'lag',
|
||||
'total_frames',
|
||||
@ -279,23 +290,28 @@ class VideoPuppeteer(object):
|
||||
"""
|
||||
Create an instance of the video_state_info named tuple. This function
|
||||
expects a dictionary populated with the following keys: current_time,
|
||||
duration, raw_time_ranges, total_frames, dropped_frames, and
|
||||
duration, raw_played_ranges, total_frames, dropped_frames, and
|
||||
corrupted_frames.
|
||||
|
||||
Aside from raw_time_ranges, see `_video_state_named_tuple` for more
|
||||
information on the above keys and values. For raw_time_ranges a
|
||||
Aside from raw_played_ranges, see `_video_state_named_tuple` for more
|
||||
information on the above keys and values. For raw_played_ranges a
|
||||
list is expected that can be consumed to make a TimeRanges object.
|
||||
|
||||
:return: A named tuple 'video_state_info' derived from arguments and
|
||||
state information from the puppeteer.
|
||||
"""
|
||||
raw_time_ranges = video_state_info_kwargs['raw_time_ranges']
|
||||
# Remove raw ranges from dict as it is not used in the final named
|
||||
raw_buffered_ranges = video_state_info_kwargs['raw_buffered_ranges']
|
||||
raw_played_ranges = video_state_info_kwargs['raw_played_ranges']
|
||||
# Remove raw ranges from dict as they are not used in the final named
|
||||
# tuple and will provide an unexpected kwarg if kept.
|
||||
del video_state_info_kwargs['raw_time_ranges']
|
||||
del video_state_info_kwargs['raw_buffered_ranges']
|
||||
del video_state_info_kwargs['raw_played_ranges']
|
||||
# Create buffered ranges
|
||||
video_state_info_kwargs['buffered'] = (
|
||||
TimeRanges(raw_buffered_ranges[0], raw_buffered_ranges[1]))
|
||||
# Create played ranges
|
||||
video_state_info_kwargs['played'] = (
|
||||
TimeRanges(raw_time_ranges[0], raw_time_ranges[1]))
|
||||
TimeRanges(raw_played_ranges[0], raw_played_ranges[1]))
|
||||
# Calculate elapsed times
|
||||
elapsed_current_time = (video_state_info_kwargs['current_time'] -
|
||||
self._first_seen_time)
|
||||
@ -327,7 +343,8 @@ class VideoPuppeteer(object):
|
||||
'return ['
|
||||
'currentTime,'
|
||||
'duration,'
|
||||
'[played.length, timeRanges],'
|
||||
'[buffered.length, bufferedRanges],'
|
||||
'[played.length, playedRanges],'
|
||||
'totalFrames,'
|
||||
'droppedFrames,'
|
||||
'corruptedFrames];')
|
||||
@ -342,8 +359,9 @@ class VideoPuppeteer(object):
|
||||
information, such as lag. This is stored in the last seen state to
|
||||
stress that it's based on the snapshot.
|
||||
"""
|
||||
keys = ['current_time', 'duration', 'raw_time_ranges', 'total_frames',
|
||||
'dropped_frames', 'corrupted_frames']
|
||||
keys = ['current_time', 'duration', 'raw_buffered_ranges',
|
||||
'raw_played_ranges', 'total_frames', 'dropped_frames',
|
||||
'corrupted_frames']
|
||||
values = self._execute_video_script(self._fetch_state_script)
|
||||
self._last_seen_video_state = (
|
||||
self._create_video_state_info(**dict(zip(keys, values))))
|
||||
|
@ -346,7 +346,8 @@ class YouTubePuppeteer(VideoPuppeteer):
|
||||
'return ['
|
||||
'currentTime,'
|
||||
'duration,'
|
||||
'[played.length, timeRanges],'
|
||||
'[buffered.length, bufferedRanges],'
|
||||
'[played.length, playedRanges],'
|
||||
'totalFrames,'
|
||||
'droppedFrames,'
|
||||
'corruptedFrames,'
|
||||
@ -371,8 +372,9 @@ class YouTubePuppeteer(VideoPuppeteer):
|
||||
stress that it's based on the snapshot.
|
||||
"""
|
||||
values = self._execute_yt_script(self._fetch_state_script)
|
||||
video_keys = ['current_time', 'duration', 'raw_time_ranges',
|
||||
'total_frames', 'dropped_frames', 'corrupted_frames']
|
||||
video_keys = ['current_time', 'duration', 'raw_buffered_ranges',
|
||||
'raw_played_ranges', 'total_frames', 'dropped_frames',
|
||||
'corrupted_frames']
|
||||
player_keys = ['player_duration', 'player_current_time',
|
||||
'player_playback_quality', 'player_movie_id',
|
||||
'player_movie_title', 'player_url', 'player_state',
|
||||
|
@ -28,15 +28,22 @@ video.textTracks.addEventListener("change", changed);
|
||||
|
||||
is(track.mode, "hidden", "New TextTrack's mode should be hidden.");
|
||||
track.mode = "showing";
|
||||
// Bug882674: change the mode again to see if we receive only one
|
||||
// change event.
|
||||
track.mode = "hidden";
|
||||
|
||||
var eventCount = 0;
|
||||
function changed(event) {
|
||||
eventCount++;
|
||||
is(eventCount, 1, "change event dispatched multiple times.");
|
||||
is(event.target, video.textTracks, "change event's target should be video.textTracks.");
|
||||
ok(event instanceof window.Event, "change event should be a simple event.");
|
||||
ok(!event.bubbles, "change event should not bubble.");
|
||||
ok(event.isTrusted, "change event should be trusted.");
|
||||
ok(!event.cancelable, "change event should not be cancelable.");
|
||||
|
||||
SimpleTest.finish();
|
||||
// Delay the finish function call for testing the change event count.
|
||||
setTimeout(SimpleTest.finish, 0);
|
||||
}
|
||||
</script>
|
||||
</pre>
|
||||
|
@ -186,7 +186,6 @@ SRICheck::VerifyIntegrity(const SRIMetadata& aMetadata,
|
||||
NS_ENSURE_ARG_POINTER(aLoader);
|
||||
NS_ENSURE_ARG_POINTER(aReporter);
|
||||
|
||||
NS_ConvertUTF16toUTF8 utf8Hash(aString);
|
||||
nsCOMPtr<nsIChannel> channel;
|
||||
aLoader->GetChannel(getter_AddRefs(channel));
|
||||
|
||||
@ -203,7 +202,10 @@ SRICheck::VerifyIntegrity(const SRIMetadata& aMetadata,
|
||||
|
||||
SRICheckDataVerifier verifier(aMetadata, aSourceFileURI, aReporter);
|
||||
nsresult rv;
|
||||
rv = verifier.Update(utf8Hash.Length(), (uint8_t*)utf8Hash.get());
|
||||
nsDependentCString rawBuffer;
|
||||
rv = aLoader->GetRawBuffer(rawBuffer);
|
||||
NS_ENSURE_SUCCESS(rv, rv);
|
||||
rv = verifier.Update(rawBuffer.Length(), (const uint8_t*)rawBuffer.get());
|
||||
NS_ENSURE_SUCCESS(rv, rv);
|
||||
|
||||
return verifier.Verify(aMetadata, channel, aSourceFileURI, aReporter);
|
||||
|
2
dom/security/test/sri/file_bug_1271796.css
Normal file
2
dom/security/test/sri/file_bug_1271796.css
Normal file
@ -0,0 +1,2 @@
|
||||
/*! Simple test for bug 1271796 */
|
||||
p::before { content: "\2014"; }
|
@ -72,13 +72,13 @@
|
||||
ok(true, "A UTF8 stylesheet (with BOM) was correctly loaded when integrity matched");
|
||||
}
|
||||
function bad_correctUTF8BOMHashBlocked() {
|
||||
todo(false, "We should load UTF8 (with BOM) stylesheets with hashes that match!");
|
||||
ok(false, "We should load UTF8 (with BOM) stylesheets with hashes that match!");
|
||||
}
|
||||
function good_correctUTF8ishHashLoaded() {
|
||||
ok(true, "A UTF8ish stylesheet was correctly loaded when integrity matched");
|
||||
}
|
||||
function bad_correctUTF8ishHashBlocked() {
|
||||
todo(false, "We should load UTF8ish stylesheets with hashes that match!");
|
||||
ok(false, "We should load UTF8ish stylesheets with hashes that match!");
|
||||
}
|
||||
</script>
|
||||
|
||||
|
@ -1,5 +1,6 @@
|
||||
[DEFAULT]
|
||||
support-files =
|
||||
file_bug_1271796.css
|
||||
iframe_require-sri-for_main.html
|
||||
iframe_require-sri-for_main.html^headers^
|
||||
iframe_script_crossdomain.html
|
||||
@ -43,3 +44,4 @@ support-files =
|
||||
[test_style_sameorigin.html]
|
||||
[test_require-sri-for_csp_directive.html]
|
||||
[test_require-sri-for_csp_directive_disabled.html]
|
||||
[test_bug_1271796.html]
|
||||
|
30
dom/security/test/sri/test_bug_1271796.html
Normal file
30
dom/security/test/sri/test_bug_1271796.html
Normal file
@ -0,0 +1,30 @@
|
||||
<!DOCTYPE HTML>
|
||||
<!-- Any copyright is dedicated to the Public Domain.
|
||||
http://creativecommons.org/publicdomain/zero/1.0/ -->
|
||||
<html>
|
||||
<head>
|
||||
<script type="application/javascript" src="/tests/SimpleTest/SimpleTest.js"></script>
|
||||
<link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css"/>
|
||||
<script type="application/javascript">
|
||||
SimpleTest.waitForExplicitFinish();
|
||||
|
||||
function good_shouldLoadEncodingProblem() {
|
||||
ok(true, "Problematically encoded file correctly loaded.")
|
||||
};
|
||||
function bad_shouldntEncounterBug1271796() {
|
||||
ok(false, "Problematically encoded should load!")
|
||||
}
|
||||
window.onload = function() {
|
||||
SimpleTest.finish();
|
||||
}
|
||||
</script>
|
||||
<link rel="stylesheet" href="file_bug_1271796.css" crossorigin="anonymous"
|
||||
integrity="sha384-8Xl0mTN4S2QZ5xeliG1sd4Ar9o1xMw6JoJy9RNjyHGQDha7GiLxo8l1llwLVgTNG"
|
||||
onload="good_shouldLoadEncodingProblem();"
|
||||
onerror="bad_shouldntEncounterBug1271796();">
|
||||
</head>
|
||||
<body>
|
||||
<a href="https://bugzilla.mozilla.org/show_bug.cgi?id=1271796">Bug 1271796</a><br>
|
||||
<p>This text is prepended by emdash if css has loaded</p>
|
||||
</body>
|
||||
</html>
|
@ -1660,7 +1660,9 @@ CompositorBridgeParent::FlushApzRepaints(const LayerTransactionParent* aLayerTre
|
||||
// use the compositor's root layer tree id.
|
||||
layersId = mRootLayerTreeID;
|
||||
}
|
||||
mApzcTreeManager->FlushApzRepaints(layersId);
|
||||
APZThreadUtils::RunOnControllerThread(NS_NewRunnableFunction([=] () {
|
||||
mApzcTreeManager->FlushApzRepaints(layersId);
|
||||
}));
|
||||
}
|
||||
|
||||
void
|
||||
@ -1671,29 +1673,6 @@ CompositorBridgeParent::GetAPZTestData(const LayerTransactionParent* aLayerTree,
|
||||
*aOutData = sIndirectLayerTrees[mRootLayerTreeID].mApzTestData;
|
||||
}
|
||||
|
||||
class NotifyAPZConfirmedTargetTask : public Runnable
|
||||
{
|
||||
public:
|
||||
explicit NotifyAPZConfirmedTargetTask(const RefPtr<APZCTreeManager>& aAPZCTM,
|
||||
const uint64_t& aInputBlockId,
|
||||
const nsTArray<ScrollableLayerGuid>& aTargets)
|
||||
: mAPZCTM(aAPZCTM),
|
||||
mInputBlockId(aInputBlockId),
|
||||
mTargets(aTargets)
|
||||
{
|
||||
}
|
||||
|
||||
NS_IMETHOD Run() override {
|
||||
mAPZCTM->SetTargetAPZC(mInputBlockId, mTargets);
|
||||
return NS_OK;
|
||||
}
|
||||
|
||||
private:
|
||||
RefPtr<APZCTreeManager> mAPZCTM;
|
||||
uint64_t mInputBlockId;
|
||||
nsTArray<ScrollableLayerGuid> mTargets;
|
||||
};
|
||||
|
||||
void
|
||||
CompositorBridgeParent::SetConfirmedTargetAPZC(const LayerTransactionParent* aLayerTree,
|
||||
const uint64_t& aInputBlockId,
|
||||
@ -1702,8 +1681,13 @@ CompositorBridgeParent::SetConfirmedTargetAPZC(const LayerTransactionParent* aLa
|
||||
if (!mApzcTreeManager) {
|
||||
return;
|
||||
}
|
||||
RefPtr<Runnable> task =
|
||||
new NotifyAPZConfirmedTargetTask(mApzcTreeManager, aInputBlockId, aTargets);
|
||||
// Need to specifically bind this since it's overloaded.
|
||||
void (APZCTreeManager::*setTargetApzcFunc)
|
||||
(uint64_t, const nsTArray<ScrollableLayerGuid>&) =
|
||||
&APZCTreeManager::SetTargetAPZC;
|
||||
RefPtr<Runnable> task = NewRunnableMethod
|
||||
<uint64_t, StoreCopyPassByConstLRef<nsTArray<ScrollableLayerGuid>>>
|
||||
(mApzcTreeManager.get(), setTargetApzcFunc, aInputBlockId, aTargets);
|
||||
APZThreadUtils::RunOnControllerThread(task.forget());
|
||||
|
||||
}
|
||||
|
@ -592,40 +592,42 @@ class gfxContextMatrixAutoSaveRestore
|
||||
{
|
||||
public:
|
||||
gfxContextMatrixAutoSaveRestore() :
|
||||
mContext(nullptr)
|
||||
mContext(nullptr)
|
||||
{
|
||||
}
|
||||
|
||||
explicit gfxContextMatrixAutoSaveRestore(gfxContext *aContext) :
|
||||
mContext(aContext), mMatrix(aContext->CurrentMatrix())
|
||||
mContext(aContext), mMatrix(aContext->CurrentMatrix())
|
||||
{
|
||||
}
|
||||
|
||||
~gfxContextMatrixAutoSaveRestore()
|
||||
{
|
||||
if (mContext) {
|
||||
mContext->SetMatrix(mMatrix);
|
||||
}
|
||||
if (mContext) {
|
||||
mContext->SetMatrix(mMatrix);
|
||||
}
|
||||
}
|
||||
|
||||
void SetContext(gfxContext *aContext)
|
||||
{
|
||||
NS_ASSERTION(!mContext, "Not going to restore the matrix on some context!");
|
||||
mContext = aContext;
|
||||
mMatrix = aContext->CurrentMatrix();
|
||||
NS_ASSERTION(!mContext,
|
||||
"Not going to restore the matrix on some context!");
|
||||
mContext = aContext;
|
||||
mMatrix = aContext->CurrentMatrix();
|
||||
}
|
||||
|
||||
void Restore()
|
||||
{
|
||||
if (mContext) {
|
||||
mContext->SetMatrix(mMatrix);
|
||||
}
|
||||
if (mContext) {
|
||||
mContext->SetMatrix(mMatrix);
|
||||
mContext = nullptr;
|
||||
}
|
||||
}
|
||||
|
||||
const gfxMatrix& Matrix()
|
||||
{
|
||||
MOZ_ASSERT(mContext, "mMatrix doesn't contain a useful matrix");
|
||||
return mMatrix;
|
||||
MOZ_ASSERT(mContext, "mMatrix doesn't contain a useful matrix");
|
||||
return mMatrix;
|
||||
}
|
||||
|
||||
bool HasMatrix() const { return !!mContext; }
|
||||
|
6
gradle/wrapper/gradle-wrapper.properties
vendored
6
gradle/wrapper/gradle-wrapper.properties
vendored
@ -1,7 +1,7 @@
|
||||
#Tue Apr 12 09:52:06 CEST 2016
|
||||
#Fri Sep 16 15:41:50 PDT 2016
|
||||
distributionBase=GRADLE_USER_HOME
|
||||
distributionPath=wrapper/dists
|
||||
zipStoreBase=GRADLE_USER_HOME
|
||||
zipStorePath=wrapper/dists
|
||||
distributionUrl=https\://services.gradle.org/distributions/gradle-2.10-all.zip
|
||||
distributionSha256Sum=496d60c331f8666f99b66d08ff67a880697a7e85a9d9b76ff08814cf97f61a4c
|
||||
distributionUrl=https\://services.gradle.org/distributions/gradle-2.14.1-all.zip
|
||||
distributionSha256Sum=88a910cdf2e03ebbb5fe90f7ecf534fc9ac22e12112dc9a2fee810c598a76091
|
||||
|
@ -328,7 +328,7 @@ RestyleTracker::AddPendingRestyle(Element* aElement,
|
||||
|
||||
// We can only treat this element as a restyle root if we would
|
||||
// actually restyle its descendants (so either call
|
||||
// ReResolveStyleContext on it or just reframe it).
|
||||
// ElementRestyler::Restyle on it or just reframe it).
|
||||
if ((aRestyleHint & ~eRestyle_LaterSiblings) ||
|
||||
(aMinChangeHint & nsChangeHint_ReconstructFrame)) {
|
||||
Element* cur =
|
||||
|
@ -3238,7 +3238,8 @@ nsCSSRendering::PaintBackgroundWithSC(const PaintBGParams& aParams,
|
||||
clipSet = true;
|
||||
if (!clipBorderArea.IsEqualEdges(aParams.borderArea)) {
|
||||
// We're drawing the background for the joined continuation boxes
|
||||
// so we need to clip that to the slice that we want for this frame.
|
||||
// so we need to clip that to the slice that we want for this
|
||||
// frame.
|
||||
gfxRect clip =
|
||||
nsLayoutUtils::RectToGfxRect(aParams.borderArea, appUnitsPerPixel);
|
||||
autoSR.EnsureSaved(ctx);
|
||||
@ -3259,7 +3260,8 @@ nsCSSRendering::PaintBackgroundWithSC(const PaintBGParams& aParams,
|
||||
if (!state.mFillArea.IsEmpty()) {
|
||||
if (co != CompositionOp::OP_OVER) {
|
||||
NS_ASSERTION(ctx->CurrentOp() == CompositionOp::OP_OVER,
|
||||
"It is assumed the initial op is OP_OVER, when it is restored later");
|
||||
"It is assumed the initial op is OP_OVER, when it is "
|
||||
"restored later");
|
||||
ctx->SetOp(co);
|
||||
}
|
||||
|
||||
|
@ -6832,16 +6832,28 @@ bool nsDisplayMask::TryMerge(nsDisplayItem* aItem)
|
||||
// items for the same content element should be merged into a single
|
||||
// compositing group
|
||||
// aItem->GetUnderlyingFrame() returns non-null because it's nsDisplaySVGEffects
|
||||
if (aItem->Frame()->GetContent() != mFrame->GetContent())
|
||||
if (aItem->Frame()->GetContent() != mFrame->GetContent()) {
|
||||
return false;
|
||||
if (aItem->GetClip() != GetClip())
|
||||
}
|
||||
if (aItem->GetClip() != GetClip()) {
|
||||
return false;
|
||||
if (aItem->ScrollClip() != ScrollClip())
|
||||
}
|
||||
if (aItem->ScrollClip() != ScrollClip()) {
|
||||
return false;
|
||||
}
|
||||
|
||||
// Do not merge if mFrame has mask. Continuation frames should apply mask
|
||||
// independently(just like nsDisplayBackgroundImage).
|
||||
const nsStyleSVGReset *style = mFrame->StyleSVGReset();
|
||||
if (style->mMask.HasLayerWithImage()) {
|
||||
return false;
|
||||
}
|
||||
|
||||
nsDisplayMask* other = static_cast<nsDisplayMask*>(aItem);
|
||||
MergeFromTrackingMergedFrames(other);
|
||||
mEffectsBounds.UnionRect(mEffectsBounds,
|
||||
other->mEffectsBounds + other->mFrame->GetOffsetTo(mFrame));
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
@ -6886,7 +6898,7 @@ nsDisplayMask::GetLayerState(nsDisplayListBuilder* aBuilder,
|
||||
}
|
||||
|
||||
bool nsDisplayMask::ComputeVisibility(nsDisplayListBuilder* aBuilder,
|
||||
nsRegion* aVisibleRegion)
|
||||
nsRegion* aVisibleRegion)
|
||||
{
|
||||
// Our children may be made translucent or arbitrarily deformed so we should
|
||||
// not allow them to subtract area from aVisibleRegion.
|
||||
|
@ -0,0 +1,25 @@
|
||||
<!DOCTYPE html>
|
||||
<html>
|
||||
<head>
|
||||
<meta charset="utf-8">
|
||||
<title>CSS Masking: mask on inline element</title>
|
||||
<link rel="author" title="CJ Ku" href="mailto:cku@mozilla.com">
|
||||
<link rel="author" title="Mozilla" href="https://www.mozilla.org">
|
||||
<style type="text/css">
|
||||
div {
|
||||
width: 100px;
|
||||
height: 100px;
|
||||
font-size: 100px;
|
||||
line-height: 100px;
|
||||
}
|
||||
|
||||
div.mask-by-png {
|
||||
mask-image: url(support/transparent-100x50-blue-100x50.png);
|
||||
}
|
||||
</style>
|
||||
</head>
|
||||
<body>
|
||||
<div class="mask-by-png">A</div>
|
||||
<div class="mask-by-png">B</div>
|
||||
</body>
|
||||
</html>
|
30
layout/reftests/w3c-css/submitted/masking/mask-image-6.html
Normal file
30
layout/reftests/w3c-css/submitted/masking/mask-image-6.html
Normal file
@ -0,0 +1,30 @@
|
||||
<!DOCTYPE html>
|
||||
<html>
|
||||
<head>
|
||||
<meta charset="utf-8">
|
||||
<title>CSS Masking: mask on inline element</title>
|
||||
<link rel="author" title="CJ Ku" href="mailto:cku@mozilla.com">
|
||||
<link rel="author" title="Mozilla" href="https://www.mozilla.org">
|
||||
<link rel="help" href="https://www.w3.org/TR/css-masking-1/#the-mask-image">
|
||||
<link rel="match" href="mask-image-6-ref.html">
|
||||
<meta name="assert" content="Test checks whether mask on inline elemnt works correctly or not.">
|
||||
<style type="text/css">
|
||||
div {
|
||||
width: 100px;
|
||||
height: 100px;
|
||||
}
|
||||
span {
|
||||
font-size: 100px;
|
||||
line-height: 100px;
|
||||
mask-image: url(support/transparent-100x50-blue-100x50.png);
|
||||
mask-repeat: repeat;
|
||||
}
|
||||
|
||||
</style>
|
||||
</head>
|
||||
<body>
|
||||
<div>
|
||||
<span>A B</span>
|
||||
</div>
|
||||
</body>
|
||||
</html>
|
@ -29,6 +29,7 @@ fuzzy-if(skiaContent,50,50) == mask-image-3f.html mask-image-3-ref.html
|
||||
== mask-image-4a.html blank.html
|
||||
== mask-image-4b.html blank.html
|
||||
== mask-image-5.html mask-image-5-ref.html
|
||||
== mask-image-6.html mask-image-6-ref.html
|
||||
|
||||
# mask-clip test cases
|
||||
== mask-clip-1.html mask-clip-1-ref.html
|
||||
|
@ -32,7 +32,7 @@ enum class CSSPseudoElementType : uint8_t;
|
||||
* (with a few exceptions, like system color changes), the data in an
|
||||
* nsStyleContext are also immutable (with the additional exception of
|
||||
* GetUniqueStyleData). When style data change,
|
||||
* nsFrameManager::ReResolveStyleContext creates a new style context.
|
||||
* ElementRestyler::Restyle creates a new style context.
|
||||
*
|
||||
* Style contexts are reference counted. References are generally held
|
||||
* by:
|
||||
|
@ -1318,7 +1318,8 @@ nsStyleSVGReset::CalcDifference(const nsStyleSVGReset& aNewData) const
|
||||
hint |= nsChangeHint_RepaintFrame;
|
||||
}
|
||||
|
||||
hint |= mMask.CalcDifference(aNewData.mMask, nsChangeHint_RepaintFrame);
|
||||
hint |= mMask.CalcDifference(aNewData.mMask,
|
||||
nsStyleImageLayers::LayerType::Mask);
|
||||
|
||||
return hint;
|
||||
}
|
||||
@ -2427,8 +2428,13 @@ nsStyleImageLayers::nsStyleImageLayers(const nsStyleImageLayers &aSource)
|
||||
|
||||
nsChangeHint
|
||||
nsStyleImageLayers::CalcDifference(const nsStyleImageLayers& aNewLayers,
|
||||
nsChangeHint aPositionChangeHint) const
|
||||
nsStyleImageLayers::LayerType aType) const
|
||||
{
|
||||
nsChangeHint positionChangeHint =
|
||||
(aType == nsStyleImageLayers::LayerType::Background)
|
||||
? nsChangeHint_UpdateBackgroundPosition
|
||||
: nsChangeHint_RepaintFrame;
|
||||
|
||||
nsChangeHint hint = nsChangeHint(0);
|
||||
|
||||
const nsStyleImageLayers& moreLayers =
|
||||
@ -2442,7 +2448,7 @@ nsStyleImageLayers::CalcDifference(const nsStyleImageLayers& aNewLayers,
|
||||
if (i < lessLayers.mImageCount) {
|
||||
nsChangeHint layerDifference =
|
||||
moreLayers.mLayers[i].CalcDifference(lessLayers.mLayers[i],
|
||||
aPositionChangeHint);
|
||||
positionChangeHint);
|
||||
hint |= layerDifference;
|
||||
if (layerDifference &&
|
||||
((moreLayers.mLayers[i].mImage.GetType() == eStyleImageType_Element) ||
|
||||
@ -2457,6 +2463,11 @@ nsStyleImageLayers::CalcDifference(const nsStyleImageLayers& aNewLayers,
|
||||
}
|
||||
}
|
||||
|
||||
if (aType == nsStyleImageLayers::LayerType::Mask &&
|
||||
mImageCount != aNewLayers.mImageCount) {
|
||||
hint |= nsChangeHint_UpdateEffects;
|
||||
}
|
||||
|
||||
if (hint) {
|
||||
return hint;
|
||||
}
|
||||
@ -2729,7 +2740,7 @@ nsStyleImageLayers::Layer::CalcDifference(const nsStyleImageLayers::Layer& aNewL
|
||||
{
|
||||
nsChangeHint hint = nsChangeHint(0);
|
||||
if (mSourceURI != aNewLayer.mSourceURI) {
|
||||
hint |= nsChangeHint_RepaintFrame;
|
||||
hint |= nsChangeHint_RepaintFrame | nsChangeHint_UpdateEffects;
|
||||
|
||||
// If Layer::mSourceURI links to a SVG mask, it has a fragment. Not vice
|
||||
// versa. Here are examples of URI contains a fragment, two of them link
|
||||
@ -2757,10 +2768,9 @@ nsStyleImageLayers::Layer::CalcDifference(const nsStyleImageLayers::Layer& aNewL
|
||||
}
|
||||
}
|
||||
|
||||
// Return nsChangeHint_UpdateEffects and nsChangeHint_UpdateOverflow if
|
||||
// either URI might link to an SVG mask.
|
||||
// Return nsChangeHint_UpdateOverflow if either URI might link to an SVG
|
||||
// mask.
|
||||
if (maybeSVGMask) {
|
||||
hint |= nsChangeHint_UpdateEffects;
|
||||
// Mask changes require that we update the PreEffectsBBoxProperty,
|
||||
// which is done during overflow computation.
|
||||
hint |= nsChangeHint_UpdateOverflow;
|
||||
@ -2827,7 +2837,7 @@ nsStyleBackground::CalcDifference(const nsStyleBackground& aNewData) const
|
||||
}
|
||||
|
||||
hint |= mImage.CalcDifference(aNewData.mImage,
|
||||
nsChangeHint_UpdateBackgroundPosition);
|
||||
nsStyleImageLayers::LayerType::Background);
|
||||
|
||||
return hint;
|
||||
}
|
||||
@ -3555,17 +3565,17 @@ nsStyleContent::nsStyleContent(const nsStyleContent& aSource)
|
||||
nsChangeHint
|
||||
nsStyleContent::CalcDifference(const nsStyleContent& aNewData) const
|
||||
{
|
||||
// In ReResolveStyleContext we assume that if there's no existing
|
||||
// In ElementRestyler::Restyle we assume that if there's no existing
|
||||
// ::before or ::after and we don't have to restyle children of the
|
||||
// node then we can't end up with a ::before or ::after due to the
|
||||
// restyle of the node itself. That's not quite true, but the only
|
||||
// exception to the above is when the 'content' property of the node
|
||||
// changes and the pseudo-element inherits the changed value. Since
|
||||
// the code here triggers a frame change on the node in that case,
|
||||
// the optimization in ReResolveStyleContext is ok. But if we ever
|
||||
// the optimization in ElementRestyler::Restyle is ok. But if we ever
|
||||
// change this code to not reconstruct frames on changes to the
|
||||
// 'content' property, then we will need to revisit the optimization
|
||||
// in ReResolveStyleContext.
|
||||
// in ElementRestyler::Restyle.
|
||||
|
||||
// Unfortunately we need to reframe even if the content lengths are the same;
|
||||
// a simple reflow will not pick up different text or different image URLs,
|
||||
|
@ -818,7 +818,7 @@ struct nsStyleImageLayers {
|
||||
}
|
||||
|
||||
nsChangeHint CalcDifference(const nsStyleImageLayers& aNewLayers,
|
||||
nsChangeHint aPositionChangeHint) const;
|
||||
nsStyleImageLayers::LayerType aType) const;
|
||||
|
||||
bool HasLayerWithImage() const;
|
||||
|
||||
|
@ -337,7 +337,7 @@ public:
|
||||
/**
|
||||
* StyleContextChanged
|
||||
*
|
||||
* To be called from nsFrameManager::ReResolveStyleContext when the
|
||||
* To be called from RestyleManager::TryStartingTransition when the
|
||||
* style of an element has changed, to initiate transitions from
|
||||
* that style change. For style contexts with :before and :after
|
||||
* pseudos, aElement is expected to be the generated before/after
|
||||
|
@ -180,11 +180,6 @@ GetOffsetToBoundingBox(nsIFrame* aFrame)
|
||||
// no offset adjustment to make.
|
||||
return nsPoint();
|
||||
}
|
||||
// We could allow aFrame to be any continuation, but since that would require
|
||||
// a GetPrevContinuation() virtual call and conditional returns, and since
|
||||
// all our current consumers always pass in the first continuation, we don't
|
||||
// currently bother.
|
||||
NS_ASSERTION(!aFrame->GetPrevContinuation(), "Not first continuation");
|
||||
|
||||
// The GetAllInFlowRectsUnion() call gets the union of the frame border-box
|
||||
// rects over all continuations, relative to the origin (top-left of the
|
||||
@ -434,8 +429,10 @@ ComputeClipExtsInDeviceSpace(gfxContext& aCtx)
|
||||
: IntRect();
|
||||
}
|
||||
|
||||
typedef nsSVGIntegrationUtils::PaintFramesParams PaintFramesParams;
|
||||
|
||||
static IntRect
|
||||
ComputeMaskGeometry(const nsSVGIntegrationUtils::PaintFramesParams& aParams,
|
||||
ComputeMaskGeometry(const PaintFramesParams& aParams,
|
||||
const nsStyleSVGReset *svgReset,
|
||||
const nsPoint& aOffsetToUserSpace,
|
||||
const nsTArray<nsSVGMaskFrame *>& aMaskFrames)
|
||||
@ -494,7 +491,7 @@ ComputeMaskGeometry(const nsSVGIntegrationUtils::PaintFramesParams& aParams,
|
||||
}
|
||||
|
||||
static DrawResult
|
||||
GenerateMaskSurface(const nsSVGIntegrationUtils::PaintFramesParams& aParams,
|
||||
GenerateMaskSurface(const PaintFramesParams& aParams,
|
||||
float aOpacity, nsStyleContext* aSC,
|
||||
const nsTArray<nsSVGMaskFrame *>& aMaskFrames,
|
||||
const nsPoint& aOffsetToUserSpace,
|
||||
@ -612,7 +609,7 @@ GenerateMaskSurface(const nsSVGIntegrationUtils::PaintFramesParams& aParams,
|
||||
}
|
||||
|
||||
static float
|
||||
ComputeOpacity(const nsSVGIntegrationUtils::PaintFramesParams& aParams)
|
||||
ComputeOpacity(const PaintFramesParams& aParams)
|
||||
{
|
||||
nsIFrame* frame = aParams.frame;
|
||||
float opacity = frame->StyleEffects()->mOpacity;
|
||||
@ -626,8 +623,8 @@ ComputeOpacity(const nsSVGIntegrationUtils::PaintFramesParams& aParams)
|
||||
}
|
||||
|
||||
static bool
|
||||
ValidateSVGFrame(const nsSVGIntegrationUtils::PaintFramesParams& aParams,
|
||||
bool aHasSVGLayout, DrawResult* aResult)
|
||||
ValidateSVGFrame(const PaintFramesParams& aParams, bool aHasSVGLayout,
|
||||
DrawResult* aResult)
|
||||
{
|
||||
#ifdef DEBUG
|
||||
NS_ASSERTION(!(aParams.frame->GetStateBits() & NS_FRAME_SVG_LAYOUT) ||
|
||||
@ -655,28 +652,37 @@ ValidateSVGFrame(const nsSVGIntegrationUtils::PaintFramesParams& aParams,
|
||||
return true;
|
||||
}
|
||||
|
||||
/**
|
||||
* Setup transform matrix of a gfx context by a specific frame. Depend on
|
||||
* aClipCtx, this function may clip that context by the visual overflow area
|
||||
* of aFrame.
|
||||
*
|
||||
* @param aFrame is the target frame.
|
||||
* @param aOffsetToBoundingBox returns the offset between the reference frame
|
||||
* and the bounding box of aFrame.
|
||||
* @oaram aOffsetToUserSpace returns the offset between the reference frame and
|
||||
* the user space coordinate of aFrame.
|
||||
* @param aClipCtx indicate whether clip aParams.ctx by visual overflow rect of
|
||||
* aFrame or not.
|
||||
*/
|
||||
static void
|
||||
SetupContextMatrix(const nsSVGIntegrationUtils::PaintFramesParams& aParams,
|
||||
nsPoint& aOffsetToBoundingBox,
|
||||
nsPoint& aToUserSpace,
|
||||
nsPoint& aOffsetToUserSpace)
|
||||
SetupContextMatrix(nsIFrame* aFrame, const PaintFramesParams& aParams,
|
||||
nsPoint& aOffsetToBoundingBox, nsPoint& aOffsetToUserSpace,
|
||||
bool aClipCtx)
|
||||
{
|
||||
nsIFrame* frame = aParams.frame;
|
||||
nsIFrame* firstFrame =
|
||||
nsLayoutUtils::FirstContinuationOrIBSplitSibling(frame);
|
||||
|
||||
nsPoint firstFrameOffset = GetOffsetToBoundingBox(firstFrame);
|
||||
aOffsetToBoundingBox = aParams.builder->ToReferenceFrame(firstFrame) - firstFrameOffset;
|
||||
if (!firstFrame->IsFrameOfType(nsIFrame::eSVG)) {
|
||||
aOffsetToBoundingBox = aParams.builder->ToReferenceFrame(aFrame) -
|
||||
GetOffsetToBoundingBox(aFrame);
|
||||
if (!aFrame->IsFrameOfType(nsIFrame::eSVG)) {
|
||||
/* Snap the offset if the reference frame is not a SVG frame,
|
||||
* since other frames will be snapped to pixel when rendering. */
|
||||
aOffsetToBoundingBox = nsPoint(
|
||||
frame->PresContext()->RoundAppUnitsToNearestDevPixels(aOffsetToBoundingBox.x),
|
||||
frame->PresContext()->RoundAppUnitsToNearestDevPixels(aOffsetToBoundingBox.y));
|
||||
aFrame->PresContext()->RoundAppUnitsToNearestDevPixels(aOffsetToBoundingBox.x),
|
||||
aFrame->PresContext()->RoundAppUnitsToNearestDevPixels(aOffsetToBoundingBox.y));
|
||||
}
|
||||
|
||||
// After applying only "aOffsetToBoundingBox", aCtx would have its origin at
|
||||
// the top left corner of frame's bounding box (over all continuations).
|
||||
// After applying only "aOffsetToBoundingBox", aParams.ctx would have its
|
||||
// origin at the top left corner of frame's bounding box (over all
|
||||
// continuations).
|
||||
// However, SVG painting needs the origin to be located at the origin of the
|
||||
// SVG frame's "user space", i.e. the space in which, for example, the
|
||||
// frame's BBox lives.
|
||||
@ -686,28 +692,36 @@ SetupContextMatrix(const nsSVGIntegrationUtils::PaintFramesParams& aParams,
|
||||
// frame's position so that SVG painting can later add it again and the
|
||||
// frame is painted in the right place.
|
||||
|
||||
gfxPoint toUserSpaceGfx = nsSVGUtils::FrameSpaceInCSSPxToUserSpaceOffset(frame);
|
||||
aToUserSpace =
|
||||
gfxPoint toUserSpaceGfx = nsSVGUtils::FrameSpaceInCSSPxToUserSpaceOffset(aFrame);
|
||||
nsPoint toUserSpace =
|
||||
nsPoint(nsPresContext::CSSPixelsToAppUnits(float(toUserSpaceGfx.x)),
|
||||
nsPresContext::CSSPixelsToAppUnits(float(toUserSpaceGfx.y)));
|
||||
|
||||
aOffsetToUserSpace = aOffsetToBoundingBox - aToUserSpace;
|
||||
aOffsetToUserSpace = aOffsetToBoundingBox - toUserSpace;
|
||||
|
||||
#ifdef DEBUG
|
||||
bool hasSVGLayout = (frame->GetStateBits() & NS_FRAME_SVG_LAYOUT);
|
||||
bool hasSVGLayout = (aFrame->GetStateBits() & NS_FRAME_SVG_LAYOUT);
|
||||
NS_ASSERTION(hasSVGLayout || aOffsetToBoundingBox == aOffsetToUserSpace,
|
||||
"For non-SVG frames there shouldn't be any additional offset");
|
||||
#endif
|
||||
|
||||
gfxPoint devPixelOffsetToUserSpace =
|
||||
nsLayoutUtils::PointToGfxPoint(aOffsetToUserSpace,
|
||||
frame->PresContext()->AppUnitsPerDevPixel());
|
||||
aParams.ctx.SetMatrix(aParams.ctx.CurrentMatrix().Translate(devPixelOffsetToUserSpace));
|
||||
aFrame->PresContext()->AppUnitsPerDevPixel());
|
||||
gfxContext& context = aParams.ctx;
|
||||
context.SetMatrix(context.CurrentMatrix().Translate(devPixelOffsetToUserSpace));
|
||||
|
||||
if (aClipCtx) {
|
||||
nsRect clipRect =
|
||||
aParams.frame->GetVisualOverflowRectRelativeToSelf() + toUserSpace;
|
||||
context.Clip(NSRectToSnappedRect(clipRect,
|
||||
aFrame->PresContext()->AppUnitsPerDevPixel(),
|
||||
*context.GetDrawTarget()));
|
||||
}
|
||||
}
|
||||
|
||||
static already_AddRefed<gfxContext>
|
||||
CreateBlendTarget(const nsSVGIntegrationUtils::PaintFramesParams& aParams,
|
||||
IntPoint& aTargetOffset)
|
||||
CreateBlendTarget(const PaintFramesParams& aParams, IntPoint& aTargetOffset)
|
||||
{
|
||||
MOZ_ASSERT(aParams.frame->StyleEffects()->mMixBlendMode !=
|
||||
NS_STYLE_BLEND_NORMAL);
|
||||
@ -731,8 +745,8 @@ CreateBlendTarget(const nsSVGIntegrationUtils::PaintFramesParams& aParams,
|
||||
}
|
||||
|
||||
static void
|
||||
BlendToTarget(const nsSVGIntegrationUtils::PaintFramesParams& aParams,
|
||||
gfxContext* aTarget, const IntPoint& aTargetOffset)
|
||||
BlendToTarget(const PaintFramesParams& aParams, gfxContext* aTarget,
|
||||
const IntPoint& aTargetOffset)
|
||||
{
|
||||
MOZ_ASSERT(aParams.frame->StyleEffects()->mMixBlendMode !=
|
||||
NS_STYLE_BLEND_NORMAL);
|
||||
@ -782,11 +796,6 @@ nsSVGIntegrationUtils::PaintMaskAndClipPath(const PaintFramesParams& aParams)
|
||||
|
||||
gfxContext& context = aParams.ctx;
|
||||
gfxContextMatrixAutoSaveRestore matrixAutoSaveRestore(&context);
|
||||
nsPoint offsetToBoundingBox;
|
||||
nsPoint toUserSpace;
|
||||
nsPoint offsetToUserSpace;
|
||||
SetupContextMatrix(aParams, offsetToBoundingBox, toUserSpace,
|
||||
offsetToUserSpace);
|
||||
|
||||
/* Properties are added lazily and may have been removed by a restyle,
|
||||
so make sure all applicable ones are set again. */
|
||||
@ -830,12 +839,23 @@ nsSVGIntegrationUtils::PaintMaskAndClipPath(const PaintFramesParams& aParams)
|
||||
MOZ_ASSERT_IF(shouldGenerateClipMaskLayer,
|
||||
!shouldApplyClipPath && !shouldApplyBasicShape);
|
||||
|
||||
nsPoint offsetToBoundingBox;
|
||||
nsPoint offsetToUserSpace;
|
||||
|
||||
// These are used if we require a temporary surface for a custom blend mode.
|
||||
// Clip the source context first, so that we can generate a smaller temporary
|
||||
// surface. (Since we will clip this context in SetupContextMatrix, a pair
|
||||
// of save/restore is needed.)
|
||||
context.Save();
|
||||
SetupContextMatrix(firstFrame, aParams, offsetToBoundingBox,
|
||||
offsetToUserSpace, true);
|
||||
IntPoint targetOffset;
|
||||
RefPtr<gfxContext> target =
|
||||
(aParams.frame->StyleEffects()->mMixBlendMode == NS_STYLE_BLEND_NORMAL)
|
||||
? RefPtr<gfxContext>(&aParams.ctx).forget()
|
||||
: CreateBlendTarget(aParams, targetOffset);
|
||||
context.Restore();
|
||||
|
||||
if (!target) {
|
||||
return DrawResult::TEMPORARY_ERROR;
|
||||
}
|
||||
@ -846,52 +866,79 @@ nsSVGIntegrationUtils::PaintMaskAndClipPath(const PaintFramesParams& aParams)
|
||||
/* Check if we need to do additional operations on this child's
|
||||
* rendering, which necessitates rendering into another surface. */
|
||||
if (shouldGenerateMask) {
|
||||
context.Save();
|
||||
nsRect clipRect =
|
||||
frame->GetVisualOverflowRectRelativeToSelf() + toUserSpace;
|
||||
context.Clip(NSRectToSnappedRect(clipRect,
|
||||
frame->PresContext()->AppUnitsPerDevPixel(),
|
||||
*context.GetDrawTarget()));
|
||||
gfxContextMatrixAutoSaveRestore matSR;
|
||||
|
||||
Matrix maskTransform;
|
||||
RefPtr<SourceSurface> maskSurface;
|
||||
|
||||
if (shouldGenerateMaskLayer) {
|
||||
matSR.SetContext(&context);
|
||||
|
||||
// For css-mask, we want to generate a mask for each continuation frame,
|
||||
// so we setup context matrix by the position of the current frame,
|
||||
// instead of the first continuation frame.
|
||||
SetupContextMatrix(frame, aParams, offsetToBoundingBox,
|
||||
offsetToUserSpace, true);
|
||||
result = GenerateMaskSurface(aParams, opacity,
|
||||
firstFrame->StyleContext(),
|
||||
maskFrames, offsetToUserSpace,
|
||||
maskTransform, maskSurface);
|
||||
}
|
||||
|
||||
if (shouldGenerateMaskLayer && !maskSurface) {
|
||||
// Entire surface is clipped out.
|
||||
context.Restore();
|
||||
return result;
|
||||
context.PopClip();
|
||||
if (!maskSurface) {
|
||||
// Entire surface is clipped out.
|
||||
return result;
|
||||
}
|
||||
}
|
||||
|
||||
if (shouldGenerateClipMaskLayer) {
|
||||
matSR.Restore();
|
||||
matSR.SetContext(&context);
|
||||
|
||||
SetupContextMatrix(firstFrame, aParams, offsetToBoundingBox,
|
||||
offsetToUserSpace, true);
|
||||
Matrix clippedMaskTransform;
|
||||
RefPtr<SourceSurface> clipMaskSurface =
|
||||
clipPathFrame->GetClipMask(context, frame, cssPxToDevPxMatrix,
|
||||
&clippedMaskTransform, maskSurface,
|
||||
maskTransform, &result);
|
||||
context.PopClip();
|
||||
|
||||
if (clipMaskSurface) {
|
||||
maskSurface = clipMaskSurface;
|
||||
maskTransform = clippedMaskTransform;
|
||||
} else {
|
||||
// Either entire surface is clipped out, or gfx buffer allocation
|
||||
// failure in nsSVGClipPathFrame::GetClipMask.
|
||||
return result;
|
||||
}
|
||||
}
|
||||
|
||||
// opacity != 1.0f.
|
||||
if (!shouldGenerateClipMaskLayer && !shouldGenerateMaskLayer) {
|
||||
MOZ_ASSERT(opacity != 1.0f);
|
||||
|
||||
matSR.SetContext(&context);
|
||||
SetupContextMatrix(firstFrame, aParams, offsetToBoundingBox,
|
||||
offsetToUserSpace, true);
|
||||
}
|
||||
|
||||
target->PushGroupForBlendBack(gfxContentType::COLOR_ALPHA, opacity, maskSurface, maskTransform);
|
||||
}
|
||||
|
||||
/* If this frame has only a trivial clipPath, set up cairo's clipping now so
|
||||
* we can just do normal painting and get it clipped appropriately.
|
||||
*/
|
||||
if (shouldApplyClipPath) {
|
||||
if (shouldApplyClipPath || shouldApplyBasicShape) {
|
||||
context.Save();
|
||||
clipPathFrame->ApplyClipPath(context, frame, cssPxToDevPxMatrix);
|
||||
} else if (shouldApplyBasicShape) {
|
||||
context.Save();
|
||||
nsCSSClipPathInstance::ApplyBasicShapeClip(context, frame);
|
||||
SetupContextMatrix(firstFrame, aParams, offsetToBoundingBox,
|
||||
offsetToUserSpace, false);
|
||||
|
||||
MOZ_ASSERT(!shouldApplyClipPath || !shouldApplyBasicShape);
|
||||
if (shouldApplyClipPath) {
|
||||
clipPathFrame->ApplyClipPath(context, frame, cssPxToDevPxMatrix);
|
||||
} else {
|
||||
nsCSSClipPathInstance::ApplyBasicShapeClip(context, frame);
|
||||
}
|
||||
}
|
||||
|
||||
/* Paint the child */
|
||||
@ -900,7 +947,7 @@ nsSVGIntegrationUtils::PaintMaskAndClipPath(const PaintFramesParams& aParams)
|
||||
RefPtr<gfxContext> oldCtx = basic->GetTarget();
|
||||
basic->SetTarget(target);
|
||||
aParams.layerManager->EndTransaction(FrameLayerBuilder::DrawPaintedLayer,
|
||||
aParams.builder);
|
||||
aParams.builder);
|
||||
basic->SetTarget(oldCtx);
|
||||
|
||||
if (shouldApplyClipPath || shouldApplyBasicShape) {
|
||||
@ -909,7 +956,12 @@ nsSVGIntegrationUtils::PaintMaskAndClipPath(const PaintFramesParams& aParams)
|
||||
|
||||
if (shouldGenerateMask) {
|
||||
target->PopGroupAndBlend();
|
||||
context.Restore();
|
||||
|
||||
if (!shouldGenerateClipMaskLayer && !shouldGenerateMaskLayer) {
|
||||
MOZ_ASSERT(opacity != 1.0f);
|
||||
// Pop the clip push by SetupContextMatrix
|
||||
context.PopClip();
|
||||
}
|
||||
}
|
||||
|
||||
if (aParams.frame->StyleEffects()->mMixBlendMode != NS_STYLE_BLEND_NORMAL) {
|
||||
@ -940,14 +992,6 @@ nsSVGIntegrationUtils::PaintFilter(const PaintFramesParams& aParams)
|
||||
return DrawResult::SUCCESS;
|
||||
}
|
||||
|
||||
gfxContext& context = aParams.ctx;
|
||||
gfxContextMatrixAutoSaveRestore matrixAutoSaveRestore(&context);
|
||||
nsPoint offsetToBoundingBox;
|
||||
nsPoint toUserSpace;
|
||||
nsPoint offsetToUserSpace;
|
||||
SetupContextMatrix(aParams, offsetToBoundingBox, toUserSpace,
|
||||
offsetToUserSpace);
|
||||
|
||||
/* Properties are added lazily and may have been removed by a restyle,
|
||||
so make sure all applicable ones are set again. */
|
||||
nsIFrame* firstFrame =
|
||||
@ -959,24 +1003,28 @@ nsSVGIntegrationUtils::PaintFilter(const PaintFramesParams& aParams)
|
||||
return DrawResult::NOT_READY;
|
||||
}
|
||||
|
||||
gfxContext& context = aParams.ctx;
|
||||
nsPoint offsetToBoundingBox;
|
||||
nsPoint offsetToUserSpace;
|
||||
|
||||
// These are used if we require a temporary surface for a custom blend mode.
|
||||
// Clip the source context first, so that we can generate a smaller temporary
|
||||
// surface. (Since we will clip this context in SetupContextMatrix, a pair
|
||||
// of save/restore is needed.)
|
||||
gfxContextAutoSaveRestore autoSR(&context);
|
||||
SetupContextMatrix(firstFrame, aParams, offsetToBoundingBox,
|
||||
offsetToUserSpace, true);
|
||||
IntPoint targetOffset;
|
||||
RefPtr<gfxContext> target =
|
||||
(aParams.frame->StyleEffects()->mMixBlendMode == NS_STYLE_BLEND_NORMAL)
|
||||
? RefPtr<gfxContext>(&aParams.ctx).forget()
|
||||
: CreateBlendTarget(aParams, targetOffset);
|
||||
if (!target) {
|
||||
context.Restore();
|
||||
return DrawResult::TEMPORARY_ERROR;
|
||||
}
|
||||
|
||||
if (opacity != 1.0f) {
|
||||
context.Save();
|
||||
nsRect clipRect =
|
||||
frame->GetVisualOverflowRectRelativeToSelf() + toUserSpace;
|
||||
context.Clip(NSRectToSnappedRect(clipRect,
|
||||
frame->PresContext()->AppUnitsPerDevPixel(),
|
||||
*context.GetDrawTarget()));
|
||||
|
||||
target->PushGroupForBlendBack(gfxContentType::COLOR_ALPHA, opacity,
|
||||
nullptr, Matrix());
|
||||
}
|
||||
@ -991,7 +1039,6 @@ nsSVGIntegrationUtils::PaintFilter(const PaintFramesParams& aParams)
|
||||
|
||||
if (opacity != 1.0f) {
|
||||
target->PopGroupAndBlend();
|
||||
context.Restore();
|
||||
}
|
||||
|
||||
if (aParams.frame->StyleEffects()->mMixBlendMode != NS_STYLE_BLEND_NORMAL) {
|
||||
|
@ -340,9 +340,10 @@ bool
|
||||
MP4Metadata::ReadTrackIndex(FallibleTArray<Index::Indice>& aDest, mozilla::TrackID aTrackID)
|
||||
{
|
||||
#ifdef MOZ_RUST_MP4PARSE
|
||||
if (mRust && mPreferRust) {
|
||||
return mRust->ReadTrackIndex(aDest, aTrackID);
|
||||
if (mRust && mPreferRust && mRust->ReadTrackIndex(aDest, aTrackID)) {
|
||||
return true;
|
||||
}
|
||||
aDest.Clear();
|
||||
#endif
|
||||
return mStagefright->ReadTrackIndex(aDest, aTrackID);
|
||||
}
|
||||
@ -839,7 +840,7 @@ MP4MetadataRust::ReadTrackIndex(FallibleTArray<Index::Indice>& aDest, mozilla::T
|
||||
}
|
||||
|
||||
// For non-fragmented mp4.
|
||||
MOZ_ASSERT(false, "Not yet implemented");
|
||||
NS_WARNING("Not yet implemented");
|
||||
|
||||
return false;
|
||||
}
|
||||
|
@ -227,7 +227,7 @@ dependencies {
|
||||
compile project(':thirdparty')
|
||||
|
||||
testCompile 'junit:junit:4.12'
|
||||
testCompile 'org.robolectric:robolectric:3.0'
|
||||
testCompile 'org.robolectric:robolectric:3.1.2'
|
||||
testCompile 'org.simpleframework:simple-http:6.0.1'
|
||||
testCompile 'org.mockito:mockito-core:1.10.19'
|
||||
|
||||
|
@ -12,4 +12,5 @@ EXTRA_COMPONENTS += [
|
||||
|
||||
DIRS += ['schemas']
|
||||
|
||||
MOCHITEST_CHROME_MANIFESTS += ['test/mochitest/chrome.ini']
|
||||
MOCHITEST_MANIFESTS += ['test/mochitest/mochitest.ini']
|
||||
MOCHITEST_CHROME_MANIFESTS += ['test/mochitest/chrome.ini']
|
||||
|
@ -0,0 +1,6 @@
|
||||
[DEFAULT]
|
||||
support-files =
|
||||
../../../../../../toolkit/components/extensions/test/mochitest/test_ext_all_apis.js
|
||||
tags = webextensions
|
||||
|
||||
[test_ext_all_apis.html]
|
@ -0,0 +1,23 @@
|
||||
<!DOCTYPE HTML>
|
||||
<html>
|
||||
<head>
|
||||
<title>WebExtension test</title>
|
||||
<meta charset="utf-8">
|
||||
<script type="text/javascript" src="/tests/SimpleTest/SimpleTest.js"></script>
|
||||
<script type="text/javascript" src="/tests/SimpleTest/SpawnTask.js"></script>
|
||||
<script type="text/javascript" src="/tests/SimpleTest/ExtensionTestUtils.js"></script>
|
||||
<link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css">
|
||||
</head>
|
||||
<body>
|
||||
<script>
|
||||
"use strict";
|
||||
/* exported expectedContentApisTargetSpecific, expectedBackgroundApisTargetSpecific */
|
||||
let expectedContentApisTargetSpecific = [
|
||||
];
|
||||
|
||||
let expectedBackgroundApisTargetSpecific = [
|
||||
];
|
||||
</script>
|
||||
<script src="test_ext_all_apis.js"></script>
|
||||
</body>
|
||||
</html>
|
@ -35,16 +35,16 @@
|
||||
"visibility": "public",
|
||||
"filename": "jcentral.tar.xz",
|
||||
"unpack": true,
|
||||
"digest": "43754910576cf6173f0dcb215ec7988f2a30dbfe32050f53ac7a9088b8b878c3ead80e02afc1dc31e229386116c82e88a403a309e5b5695f0b74f5defb13dec7",
|
||||
"size": 42832932
|
||||
"digest": "66640e3f77a0f9c0ea52f66c53bee8db3c1a27ea4a11526d15706b9da6a0302cd2d5b088f9addca84f4a962022cba3b76829cb878c90cf9bebb3aab050b4aaa4",
|
||||
"size": 47315996
|
||||
},
|
||||
{
|
||||
"algorithm": "sha512",
|
||||
"visibility": "public",
|
||||
"filename": "gradle-dist.tar.xz",
|
||||
"unpack": true,
|
||||
"digest": "990edc0e4039dbe5f77790ef59dc0d58faebbb8c82ee497615c7991eec99fe4668d0ab05508c48664b635ff6c0cfd4272db464ae1efaa548e471ab451fe0944f",
|
||||
"size": 51955340
|
||||
"digest": "36f961f85b0be846cc9e72bfa0dd1f74e7da8ef785717ce4fd102fec977f21f8902c233b28a21c1ce3797eb2759c7a74c5f74e47bd8f13c1eec640f8d7bed4ac",
|
||||
"size": 51512016
|
||||
},
|
||||
{
|
||||
"algorithm": "sha512",
|
||||
|
@ -50,16 +50,16 @@
|
||||
"visibility": "public",
|
||||
"filename": "jcentral.tar.xz",
|
||||
"unpack": true,
|
||||
"digest": "43754910576cf6173f0dcb215ec7988f2a30dbfe32050f53ac7a9088b8b878c3ead80e02afc1dc31e229386116c82e88a403a309e5b5695f0b74f5defb13dec7",
|
||||
"size": 42832932
|
||||
"digest": "66640e3f77a0f9c0ea52f66c53bee8db3c1a27ea4a11526d15706b9da6a0302cd2d5b088f9addca84f4a962022cba3b76829cb878c90cf9bebb3aab050b4aaa4",
|
||||
"size": 47315996
|
||||
},
|
||||
{
|
||||
"algorithm": "sha512",
|
||||
"visibility": "public",
|
||||
"filename": "gradle-dist.tar.xz",
|
||||
"unpack": true,
|
||||
"digest": "990edc0e4039dbe5f77790ef59dc0d58faebbb8c82ee497615c7991eec99fe4668d0ab05508c48664b635ff6c0cfd4272db464ae1efaa548e471ab451fe0944f",
|
||||
"size": 51955340
|
||||
"digest": "36f961f85b0be846cc9e72bfa0dd1f74e7da8ef785717ce4fd102fec977f21f8902c233b28a21c1ce3797eb2759c7a74c5f74e47bd8f13c1eec640f8d7bed4ac",
|
||||
"size": 51512016
|
||||
},
|
||||
{
|
||||
"size": 30899096,
|
||||
|
@ -60,16 +60,16 @@
|
||||
"visibility": "public",
|
||||
"filename": "jcentral.tar.xz",
|
||||
"unpack": true,
|
||||
"digest": "43754910576cf6173f0dcb215ec7988f2a30dbfe32050f53ac7a9088b8b878c3ead80e02afc1dc31e229386116c82e88a403a309e5b5695f0b74f5defb13dec7",
|
||||
"size": 42832932
|
||||
"digest": "66640e3f77a0f9c0ea52f66c53bee8db3c1a27ea4a11526d15706b9da6a0302cd2d5b088f9addca84f4a962022cba3b76829cb878c90cf9bebb3aab050b4aaa4",
|
||||
"size": 47315996
|
||||
},
|
||||
{
|
||||
"algorithm": "sha512",
|
||||
"visibility": "public",
|
||||
"filename": "gradle-dist.tar.xz",
|
||||
"unpack": true,
|
||||
"digest": "990edc0e4039dbe5f77790ef59dc0d58faebbb8c82ee497615c7991eec99fe4668d0ab05508c48664b635ff6c0cfd4272db464ae1efaa548e471ab451fe0944f",
|
||||
"size": 51955340
|
||||
"digest": "36f961f85b0be846cc9e72bfa0dd1f74e7da8ef785717ce4fd102fec977f21f8902c233b28a21c1ce3797eb2759c7a74c5f74e47bd8f13c1eec640f8d7bed4ac",
|
||||
"size": 51512016
|
||||
},
|
||||
{
|
||||
"version": "rustc 1.11.0 (9b21dcd6a 2016-08-15) repack",
|
||||
|
@ -25,7 +25,7 @@
|
||||
package org.mozilla.gecko.background.testhelpers;
|
||||
|
||||
import org.junit.runners.model.InitializationError;
|
||||
import org.robolectric.RobolectricGradleTestRunner;
|
||||
import org.robolectric.RobolectricTestRunner;
|
||||
import org.robolectric.annotation.Config;
|
||||
import org.robolectric.manifest.AndroidManifest;
|
||||
import org.robolectric.res.FileFsFile;
|
||||
@ -46,7 +46,7 @@ import org.robolectric.util.ReflectionHelpers;
|
||||
* that uses a Gradle `buildConfigField` to find build outputs.
|
||||
* See https://github.com/robolectric/robolectric/issues/1648#issuecomment-113731011.
|
||||
*/
|
||||
public class TestRunner extends RobolectricGradleTestRunner {
|
||||
public class TestRunner extends RobolectricTestRunner {
|
||||
private FsFile buildFolder;
|
||||
|
||||
public TestRunner(Class<?> klass) throws InitializationError {
|
||||
|
@ -10,6 +10,7 @@ import android.database.Cursor;
|
||||
import android.net.Uri;
|
||||
import android.os.RemoteException;
|
||||
|
||||
import org.junit.After;
|
||||
import org.junit.Before;
|
||||
import org.junit.Test;
|
||||
import org.junit.runner.RunWith;
|
||||
@ -51,6 +52,13 @@ public class BrowserProviderHistoryTest extends BrowserProviderHistoryVisitsTest
|
||||
).build();
|
||||
}
|
||||
|
||||
@After
|
||||
@Override
|
||||
public void tearDown() {
|
||||
thumbnailClient.release();
|
||||
super.tearDown();
|
||||
}
|
||||
|
||||
/**
|
||||
* Test aggressive expiration on new (recent) history items
|
||||
*/
|
||||
|
@ -54,7 +54,7 @@ public class BrowserProviderHistoryVisitsTestBase {
|
||||
}
|
||||
|
||||
/* package-private */ Uri insertHistoryItem(String url, String guid, Long lastVisited, Integer visitCount) throws RemoteException {
|
||||
return insertHistoryItem(url, guid, System.currentTimeMillis(), null, null);
|
||||
return insertHistoryItem(url, guid, lastVisited, visitCount, null);
|
||||
}
|
||||
|
||||
/* package-private */ Uri insertHistoryItem(String url, String guid, Long lastVisited, Integer visitCount, String title) throws RemoteException {
|
||||
|
@ -79,4 +79,10 @@ interface nsIUnicharStreamLoader : nsIStreamListener
|
||||
* called.
|
||||
*/
|
||||
readonly attribute ACString charset;
|
||||
|
||||
/**
|
||||
* Get the raw bytes as seen on the wire prior to character converstion.
|
||||
* Used by Subresource Integrity checker to generate the correct hash.
|
||||
*/
|
||||
readonly attribute ACString rawBuffer;
|
||||
};
|
||||
|
@ -102,10 +102,19 @@ nsUnicharStreamLoader::OnStopRequest(nsIRequest *aRequest,
|
||||
mContext = nullptr;
|
||||
mChannel = nullptr;
|
||||
mCharset.Truncate();
|
||||
mRawData.Truncate();
|
||||
mRawBuffer.Truncate();
|
||||
mBuffer.Truncate();
|
||||
return rv;
|
||||
}
|
||||
|
||||
NS_IMETHODIMP
|
||||
nsUnicharStreamLoader::GetRawBuffer(nsACString& aRawBuffer)
|
||||
{
|
||||
aRawBuffer = mRawBuffer;
|
||||
return NS_OK;
|
||||
}
|
||||
|
||||
/* nsIStreamListener implementation */
|
||||
NS_IMETHODIMP
|
||||
nsUnicharStreamLoader::OnDataAvailable(nsIRequest *aRequest,
|
||||
@ -220,6 +229,10 @@ nsUnicharStreamLoader::WriteSegmentFun(nsIInputStream *,
|
||||
return NS_ERROR_OUT_OF_MEMORY;
|
||||
}
|
||||
|
||||
if (!self->mRawBuffer.Append(aSegment, aCount, fallible)) {
|
||||
return NS_ERROR_OUT_OF_MEMORY;
|
||||
}
|
||||
|
||||
rv = self->mDecoder->Convert(aSegment,
|
||||
&srcLen,
|
||||
self->mBuffer.BeginWriting() + haveRead,
|
||||
|
@ -47,6 +47,10 @@ protected:
|
||||
// It will be passed to the OnDetermineCharset callback.
|
||||
nsCString mRawData;
|
||||
|
||||
// Holds complete raw bytes as received so that SRI checks can be
|
||||
// calculated on the raw data prior to character conversion.
|
||||
nsCString mRawBuffer;
|
||||
|
||||
// This holds the complete contents of the stream so far, after
|
||||
// decoding to UTF-16. It will be passed to the OnStreamComplete
|
||||
// callback.
|
||||
|
@ -15,6 +15,9 @@ from mozbuild.shellutil import quote as shell_quote
|
||||
from .common import CommonBackend
|
||||
from ..frontend.data import (
|
||||
ContextDerived,
|
||||
Defines,
|
||||
GeneratedFile,
|
||||
HostDefines,
|
||||
)
|
||||
from ..util import (
|
||||
FileAvoidWrite,
|
||||
@ -33,6 +36,9 @@ class BackendTupfile(object):
|
||||
self.environment = environment
|
||||
self.name = mozpath.join(objdir, 'Tupfile')
|
||||
self.rules_included = False
|
||||
self.shell_exported = False
|
||||
self.defines = []
|
||||
self.host_defines = []
|
||||
|
||||
self.fh = FileAvoidWrite(self.name, capture_diff=True)
|
||||
self.fh.write('# THIS FILE WAS AUTOMATICALLY GENERATED. DO NOT EDIT.\n')
|
||||
@ -46,18 +52,39 @@ class BackendTupfile(object):
|
||||
self.write('include_rules\n')
|
||||
self.rules_included = True
|
||||
|
||||
def rule(self, cmd, inputs=None, outputs=None, display=None, extra_outputs=None):
|
||||
def rule(self, cmd, inputs=None, outputs=None, display=None, extra_outputs=None, check_unchanged=False):
|
||||
inputs = inputs or []
|
||||
outputs = outputs or []
|
||||
display = display or ""
|
||||
self.include_rules()
|
||||
flags = ""
|
||||
if check_unchanged:
|
||||
# This flag causes tup to compare the outputs with the previous run
|
||||
# of the command, and skip the rest of the DAG for any that are the
|
||||
# same.
|
||||
flags += "o"
|
||||
|
||||
if display:
|
||||
caret_text = flags + ' ' + display
|
||||
else:
|
||||
caret_text = flags
|
||||
|
||||
self.write(': %(inputs)s |> %(display)s%(cmd)s |> %(outputs)s%(extra_outputs)s\n' % {
|
||||
'inputs': ' '.join(inputs),
|
||||
'display': '^ %s^ ' % display if display else '',
|
||||
'display': '^%s^ ' % caret_text if caret_text else '',
|
||||
'cmd': ' '.join(cmd),
|
||||
'outputs': ' '.join(outputs),
|
||||
'extra_outputs': ' | ' + ' '.join(extra_outputs) if extra_outputs else '',
|
||||
})
|
||||
|
||||
def export_shell(self):
|
||||
if not self.shell_exported:
|
||||
# These are used by mach/mixin/process.py to determine the current
|
||||
# shell.
|
||||
for var in ('SHELL', 'MOZILLABUILD', 'COMSPEC'):
|
||||
self.write('export %s\n' % var)
|
||||
self.shell_exported = True
|
||||
|
||||
def close(self):
|
||||
return self.fh.close()
|
||||
|
||||
@ -85,6 +112,17 @@ class TupOnly(CommonBackend, PartialBackend):
|
||||
self.environment.topsrcdir, self.environment.topobjdir)
|
||||
return self._backend_files[objdir]
|
||||
|
||||
def _get_backend_file_for(self, obj):
|
||||
return self._get_backend_file(obj.relativedir)
|
||||
|
||||
def _py_action(self, action):
|
||||
cmd = [
|
||||
'$(PYTHON)',
|
||||
'-m',
|
||||
'mozbuild.action.%s' % action,
|
||||
]
|
||||
return cmd
|
||||
|
||||
def consume_object(self, obj):
|
||||
"""Write out build files necessary to build with tup."""
|
||||
|
||||
@ -98,6 +136,43 @@ class TupOnly(CommonBackend, PartialBackend):
|
||||
if consumed:
|
||||
return False
|
||||
|
||||
backend_file = self._get_backend_file_for(obj)
|
||||
|
||||
if isinstance(obj, GeneratedFile):
|
||||
# TODO: These are directories that don't work in the tup backend
|
||||
# yet, because things they depend on aren't built yet.
|
||||
skip_directories = (
|
||||
'build', # FinalTargetPreprocessedFiles
|
||||
'layout/style/test', # HostSimplePrograms
|
||||
'toolkit/library', # libxul.so
|
||||
)
|
||||
if obj.script and obj.method and obj.relobjdir not in skip_directories:
|
||||
backend_file.export_shell()
|
||||
cmd = self._py_action('file_generate')
|
||||
cmd.extend([
|
||||
obj.script,
|
||||
obj.method,
|
||||
obj.outputs[0],
|
||||
'%s.pp' % obj.outputs[0], # deps file required
|
||||
])
|
||||
full_inputs = [f.full_path for f in obj.inputs]
|
||||
cmd.extend(full_inputs)
|
||||
|
||||
outputs = []
|
||||
outputs.extend(obj.outputs)
|
||||
outputs.append('%s.pp' % obj.outputs[0])
|
||||
|
||||
backend_file.rule(
|
||||
display='python {script}:{method} -> [%o]'.format(script=obj.script, method=obj.method),
|
||||
cmd=cmd,
|
||||
inputs=full_inputs,
|
||||
outputs=outputs,
|
||||
)
|
||||
elif isinstance(obj, Defines):
|
||||
self._process_defines(backend_file, obj)
|
||||
elif isinstance(obj, HostDefines):
|
||||
self._process_defines(backend_file, obj, host=True)
|
||||
|
||||
return True
|
||||
|
||||
def consume_finished(self):
|
||||
@ -123,34 +198,24 @@ class TupOnly(CommonBackend, PartialBackend):
|
||||
fh.write('PYTHON_PATH = $(PYTHON) $(topsrcdir)/config/pythonpath.py\n')
|
||||
fh.write('PLY_INCLUDE = -I$(topsrcdir)/other-licenses/ply\n')
|
||||
fh.write('IDL_PARSER_DIR = $(topsrcdir)/xpcom/idl-parser\n')
|
||||
fh.write('IDL_PARSER_CACHE_DIR = $(MOZ_OBJ_ROOT)/xpcom/idl-parser\n')
|
||||
fh.write('IDL_PARSER_CACHE_DIR = $(MOZ_OBJ_ROOT)/xpcom/idl-parser/xpidl\n')
|
||||
|
||||
# Run 'tup init' if necessary.
|
||||
if not os.path.exists(mozpath.join(self.environment.topsrcdir, ".tup")):
|
||||
tup = self.environment.substs.get('TUP', 'tup')
|
||||
self._cmd.run_process(cwd=self.environment.topsrcdir, log_name='tup', args=[tup, 'init'])
|
||||
|
||||
def _process_defines(self, backend_file, obj, host=False):
|
||||
defines = list(obj.get_defines())
|
||||
if defines:
|
||||
if host:
|
||||
backend_file.host_defines = defines
|
||||
else:
|
||||
backend_file.defines = defines
|
||||
|
||||
def _handle_idl_manager(self, manager):
|
||||
|
||||
# TODO: This should come from GENERATED_FILES, and can be removed once
|
||||
# those are implemented.
|
||||
backend_file = self._get_backend_file('xpcom/idl-parser')
|
||||
backend_file.rule(
|
||||
display='python header.py -> [%o]',
|
||||
cmd=[
|
||||
'$(PYTHON_PATH)',
|
||||
'$(PLY_INCLUDE)',
|
||||
'$(topsrcdir)/xpcom/idl-parser/xpidl/header.py',
|
||||
],
|
||||
outputs=['xpidlyacc.py', 'xpidllex.py'],
|
||||
)
|
||||
|
||||
backend_file = self._get_backend_file('xpcom/xpidl')
|
||||
|
||||
# These are used by mach/mixin/process.py to determine the current
|
||||
# shell.
|
||||
for var in ('SHELL', 'MOZILLABUILD', 'COMSPEC'):
|
||||
backend_file.write('export %s\n' % var)
|
||||
backend_file.export_shell()
|
||||
|
||||
for module, data in sorted(manager.modules.iteritems()):
|
||||
dest, idls = data
|
||||
@ -160,7 +225,7 @@ class TupOnly(CommonBackend, PartialBackend):
|
||||
'-I$(IDL_PARSER_DIR)',
|
||||
'-I$(IDL_PARSER_CACHE_DIR)',
|
||||
'$(topsrcdir)/python/mozbuild/mozbuild/action/xpidl-process.py',
|
||||
'--cache-dir', '$(MOZ_OBJ_ROOT)/xpcom/idl-parser',
|
||||
'--cache-dir', '$(IDL_PARSER_CACHE_DIR)',
|
||||
'$(DIST)/idl',
|
||||
'$(DIST)/include',
|
||||
'$(MOZ_OBJ_ROOT)/%s/components' % dest,
|
||||
@ -172,14 +237,26 @@ class TupOnly(CommonBackend, PartialBackend):
|
||||
outputs.extend(['$(MOZ_OBJ_ROOT)/dist/include/%s.h' % f for f in sorted(idls)])
|
||||
backend_file.rule(
|
||||
inputs=[
|
||||
'$(MOZ_OBJ_ROOT)/xpcom/idl-parser/xpidllex.py',
|
||||
'$(MOZ_OBJ_ROOT)/xpcom/idl-parser/xpidlyacc.py',
|
||||
'$(MOZ_OBJ_ROOT)/xpcom/idl-parser/xpidl/xpidllex.py',
|
||||
'$(MOZ_OBJ_ROOT)/xpcom/idl-parser/xpidl/xpidlyacc.py',
|
||||
],
|
||||
display='XPIDL %s' % module,
|
||||
cmd=cmd,
|
||||
outputs=outputs,
|
||||
)
|
||||
|
||||
def _preprocess(self, backend_file, input_file):
|
||||
cmd = self._py_action('preprocessor')
|
||||
cmd.extend(backend_file.defines)
|
||||
cmd.extend(['$(ACDEFINES)', '%f', '-o', '%o'])
|
||||
|
||||
backend_file.rule(
|
||||
inputs=[input_file],
|
||||
display='Preprocess %o',
|
||||
cmd=cmd,
|
||||
outputs=[mozpath.basename(input_file)],
|
||||
)
|
||||
|
||||
def _handle_ipdl_sources(self, ipdl_dir, sorted_ipdl_sources,
|
||||
unified_ipdl_cppsrcs_mapping):
|
||||
# TODO: This isn't implemented yet in the tup backend, but it is called
|
||||
@ -189,9 +266,33 @@ class TupOnly(CommonBackend, PartialBackend):
|
||||
def _handle_webidl_build(self, bindings_dir, unified_source_mapping,
|
||||
webidls, expected_build_output_files,
|
||||
global_define_files):
|
||||
# TODO: This isn't implemented yet in the tup backend, but it is called
|
||||
# by the CommonBackend.
|
||||
pass
|
||||
backend_file = self._get_backend_file('dom/bindings')
|
||||
backend_file.export_shell()
|
||||
|
||||
for source in sorted(webidls.all_preprocessed_sources()):
|
||||
self._preprocess(backend_file, source)
|
||||
|
||||
cmd = self._py_action('webidl')
|
||||
cmd.append(mozpath.join(self.environment.topsrcdir, 'dom', 'bindings'))
|
||||
|
||||
# The WebIDLCodegenManager knows all of the .cpp and .h files that will
|
||||
# be created (expected_build_output_files), but there are a few
|
||||
# additional files that are also created by the webidl py_action.
|
||||
outputs = [
|
||||
'_cache/webidlyacc.py',
|
||||
'codegen.json',
|
||||
'codegen.pp',
|
||||
'parser.out',
|
||||
]
|
||||
outputs.extend(expected_build_output_files)
|
||||
|
||||
backend_file.rule(
|
||||
display='WebIDL code generation',
|
||||
cmd=cmd,
|
||||
inputs=webidls.all_non_static_basenames(),
|
||||
outputs=outputs,
|
||||
check_unchanged=True,
|
||||
)
|
||||
|
||||
|
||||
class TupBackend(HybridBackend(TupOnly, RecursiveMakeBackend)):
|
||||
|
@ -1140,13 +1140,14 @@ add_task(function* test_onItemDeleted_removeFolderTransaction() {
|
||||
|
||||
_("Undo the remove folder transaction");
|
||||
txn.undoTransaction();
|
||||
yield verifyTrackedItems(["menu"]);
|
||||
do_check_eq(tracker.score, SCORE_INCREMENT_XLARGE);
|
||||
yield resetTracker();
|
||||
|
||||
// At this point, the restored folder has the same ID, but a different GUID.
|
||||
let new_folder_guid = yield PlacesUtils.promiseItemGuid(folder_id);
|
||||
|
||||
yield verifyTrackedItems(["menu", new_folder_guid]);
|
||||
do_check_eq(tracker.score, SCORE_INCREMENT_XLARGE * 2);
|
||||
yield resetTracker();
|
||||
|
||||
_("Redo the transaction");
|
||||
txn.redoTransaction();
|
||||
yield verifyTrackedItems(["menu", new_folder_guid]);
|
||||
|
@ -29,6 +29,8 @@ job-defaults:
|
||||
# the functionality that l10n needs
|
||||
|
||||
JOB_SCRIPT: "taskcluster/scripts/builder/build-l10n.sh"
|
||||
# don't run anywhere by default, but still available via try
|
||||
run-on-projects: []
|
||||
when:
|
||||
files-changed:
|
||||
- browser/locales/all-locales
|
||||
|
@ -23,9 +23,7 @@ logger = logging.getLogger(__name__)
|
||||
CONCURRENCY = 50
|
||||
|
||||
|
||||
def create_tasks(taskgraph, label_to_taskid):
|
||||
# TODO: use the taskGroupId of the decision task
|
||||
task_group_id = slugid()
|
||||
def create_tasks(taskgraph, label_to_taskid, params):
|
||||
taskid_to_label = {t: l for l, t in label_to_taskid.iteritems()}
|
||||
|
||||
session = requests.Session()
|
||||
@ -40,6 +38,13 @@ def create_tasks(taskgraph, label_to_taskid):
|
||||
|
||||
decision_task_id = os.environ.get('TASK_ID')
|
||||
|
||||
# when running as an actual decision task, we use the decision task's
|
||||
# taskId as the taskGroupId. The process that created the decision task
|
||||
# helpfully placed it in this same taskGroup. If there is no $TASK_ID,
|
||||
# fall back to a slugid
|
||||
task_group_id = decision_task_id or slugid()
|
||||
scheduler_id = 'gecko-level-{}'.format(params['level'])
|
||||
|
||||
with futures.ThreadPoolExecutor(CONCURRENCY) as e:
|
||||
fs = {}
|
||||
|
||||
@ -62,7 +67,7 @@ def create_tasks(taskgraph, label_to_taskid):
|
||||
task_def['dependencies'] = [decision_task_id]
|
||||
|
||||
task_def['taskGroupId'] = task_group_id
|
||||
task_def['schedulerId'] = '-'
|
||||
task_def['schedulerId'] = scheduler_id
|
||||
|
||||
# Wait for dependencies before submitting this.
|
||||
deps_fs = [fs[dep] for dep in task_def.get('dependencies', [])
|
||||
|
@ -91,7 +91,7 @@ def taskgraph_decision(options):
|
||||
write_artifact('label-to-taskid.json', tgg.label_to_taskid)
|
||||
|
||||
# actually create the graph
|
||||
create_tasks(tgg.optimized_task_graph, tgg.label_to_taskid)
|
||||
create_tasks(tgg.optimized_task_graph, tgg.label_to_taskid, parameters)
|
||||
|
||||
|
||||
def get_decision_parameters(options):
|
||||
|
@ -44,10 +44,11 @@ class TestCreate(unittest.TestCase):
|
||||
graph = Graph(nodes={'tid-a', 'tid-b'}, edges={('tid-a', 'tid-b', 'edge')})
|
||||
taskgraph = TaskGraph(tasks, graph)
|
||||
|
||||
create.create_tasks(taskgraph, label_to_taskid)
|
||||
create.create_tasks(taskgraph, label_to_taskid, {'level': '4'})
|
||||
|
||||
for tid, task in self.created_tasks.iteritems():
|
||||
self.assertEqual(task['payload'], 'hello world')
|
||||
self.assertEqual(task['schedulerId'], 'gecko-level-4')
|
||||
# make sure the dependencies exist, at least
|
||||
for depid in task.get('dependencies', []):
|
||||
if depid is 'decisiontask':
|
||||
@ -65,7 +66,7 @@ class TestCreate(unittest.TestCase):
|
||||
graph = Graph(nodes={'tid-a'}, edges=set())
|
||||
taskgraph = TaskGraph(tasks, graph)
|
||||
|
||||
create.create_tasks(taskgraph, label_to_taskid)
|
||||
create.create_tasks(taskgraph, label_to_taskid, {'level': '4'})
|
||||
|
||||
for tid, task in self.created_tasks.iteritems():
|
||||
self.assertEqual(task.get('dependencies'), [os.environ['TASK_ID']])
|
||||
|
@ -3,7 +3,7 @@
|
||||
set -x -e
|
||||
|
||||
: WORKSPACE ${WORKSPACE:=/workspace}
|
||||
: GRADLE_VERSION ${GRADLE_VERSION:=2.10}
|
||||
: GRADLE_VERSION ${GRADLE_VERSION:=2.14.1}
|
||||
|
||||
set -v
|
||||
|
||||
|
@ -1,4 +1,2 @@
|
||||
[global]
|
||||
disable-pip-version-check = true
|
||||
trusted-host = pypi.pub.build.mozilla.org
|
||||
|
||||
|
@ -1,4 +1,2 @@
|
||||
[global]
|
||||
disable-pip-version-check = true
|
||||
trusted-host = pypi.pub.build.mozilla.org
|
||||
|
||||
|
@ -1,4 +1,2 @@
|
||||
[global]
|
||||
disable-pip-version-check = true
|
||||
trusted-host = pypi.pub.build.mozilla.org
|
||||
|
||||
|
@ -1,4 +1,2 @@
|
||||
[global]
|
||||
disable-pip-version-check = true
|
||||
trusted-host = pypi.pub.build.mozilla.org
|
||||
|
||||
|
@ -37,13 +37,16 @@ def print_line(prefix, m):
|
||||
print(b'[%s %sZ] %s' % (prefix, now.isoformat(), m), end=b'')
|
||||
|
||||
|
||||
def run_and_prefix_output(prefix, args):
|
||||
def run_and_prefix_output(prefix, args, extra_env=None):
|
||||
"""Runs a process and prefixes its output with the time.
|
||||
|
||||
Returns the process exit code.
|
||||
"""
|
||||
print_line(prefix, b'executing %s\n' % args)
|
||||
|
||||
env = dict(os.environ)
|
||||
env.update(extra_env or {})
|
||||
|
||||
# Note: TaskCluster's stdin is a TTY. This attribute is lost
|
||||
# when we pass sys.stdin to the invoked process. If we cared
|
||||
# to preserve stdin as a TTY, we could make this work. But until
|
||||
@ -57,6 +60,7 @@ def run_and_prefix_output(prefix, args):
|
||||
stderr=subprocess.STDOUT,
|
||||
stdin=sys.stdin.fileno(),
|
||||
cwd='/',
|
||||
env=env,
|
||||
# So \r in progress bars are rendered as multiple
|
||||
# lines, preserving progress indicators.
|
||||
universal_newlines=True)
|
||||
@ -121,7 +125,7 @@ def vcs_checkout(args):
|
||||
b'--upstream', base_repo,
|
||||
revision_flag, revision,
|
||||
os.environ['GECKO_HEAD_REPOSITORY'], args.vcs_checkout
|
||||
])
|
||||
], extra_env={b'PYTHONUNBUFFERED': b'1'})
|
||||
|
||||
if res:
|
||||
sys.exit(res)
|
||||
|
@ -1,4 +1,2 @@
|
||||
[global]
|
||||
disable-pip-version-check = true
|
||||
trusted-host = pypi.pub.build.mozilla.org
|
||||
|
||||
|
@ -25,9 +25,6 @@ config = {
|
||||
"exes": {
|
||||
"gittool.py": os.path.join(LOCAL_WORKDIR, "gittool.py"),
|
||||
},
|
||||
"env": {
|
||||
"PIP_TRUSTED_HOST": "pypi.pub.build.mozilla.org",
|
||||
},
|
||||
|
||||
# Pip
|
||||
"find_links": ["http://pypi.pub.build.mozilla.org/pub"],
|
||||
|
@ -5,10 +5,6 @@ config = {
|
||||
# Tests run in mozmill-ci do not use RelEng infra
|
||||
'developer_mode': True,
|
||||
|
||||
'env': {
|
||||
'PIP_TRUSTED_HOST': 'pypi.pub.build.mozilla.org',
|
||||
},
|
||||
|
||||
# PIP
|
||||
'find_links': ['http://pypi.pub.build.mozilla.org/pub'],
|
||||
'pip_index': False,
|
||||
|
@ -13,10 +13,6 @@ external_tools_path = os.path.join(
|
||||
|
||||
|
||||
config = {
|
||||
'env': {
|
||||
'PIP_TRUSTED_HOST': 'pypi.pub.build.mozilla.org',
|
||||
},
|
||||
|
||||
# General local variable overwrite
|
||||
'exes': {
|
||||
'gittool.py': [
|
||||
|
@ -13,10 +13,6 @@ external_tools_path = os.path.join(
|
||||
)
|
||||
|
||||
config = {
|
||||
'env': {
|
||||
'PIP_TRUSTED_HOST': 'pypi.pub.build.mozilla.org',
|
||||
},
|
||||
|
||||
# PIP
|
||||
'find_links': ['http://pypi.pub.build.mozilla.org/pub'],
|
||||
'pip_index': False,
|
||||
|
@ -7,12 +7,14 @@
|
||||
'''Python usage, esp. virtualenv.
|
||||
'''
|
||||
|
||||
import distutils.version
|
||||
import os
|
||||
import subprocess
|
||||
import sys
|
||||
import time
|
||||
import json
|
||||
import traceback
|
||||
import urlparse
|
||||
|
||||
import mozharness
|
||||
from mozharness.base.script import (
|
||||
@ -49,7 +51,7 @@ def get_tlsv1_post():
|
||||
|
||||
# Virtualenv {{{1
|
||||
virtualenv_config_options = [
|
||||
[["--venv-path", "--virtualenv-path"], {
|
||||
[["--virtualenv-path"], {
|
||||
"action": "store",
|
||||
"dest": "virtualenv_path",
|
||||
"default": "venv",
|
||||
@ -111,18 +113,21 @@ class VirtualenvMixin(object):
|
||||
optional, two_pass, editable))
|
||||
|
||||
def query_virtualenv_path(self):
|
||||
c = self.config
|
||||
"""Determine the absolute path to the virtualenv."""
|
||||
dirs = self.query_abs_dirs()
|
||||
virtualenv = None
|
||||
|
||||
if 'abs_virtualenv_dir' in dirs:
|
||||
virtualenv = dirs['abs_virtualenv_dir']
|
||||
elif c.get('virtualenv_path'):
|
||||
if os.path.isabs(c['virtualenv_path']):
|
||||
virtualenv = c['virtualenv_path']
|
||||
else:
|
||||
virtualenv = os.path.join(dirs['abs_work_dir'],
|
||||
c['virtualenv_path'])
|
||||
return virtualenv
|
||||
return dirs['abs_virtualenv_dir']
|
||||
|
||||
p = self.config['virtualenv_path']
|
||||
if not p:
|
||||
self.fatal('virtualenv_path config option not set; '
|
||||
'this should never happen')
|
||||
|
||||
if os.path.isabs(p):
|
||||
return p
|
||||
else:
|
||||
return os.path.join(dirs['abs_work_dir'], p)
|
||||
|
||||
def query_python_path(self, binary="python"):
|
||||
"""Return the path of a binary inside the virtualenv, if
|
||||
@ -134,10 +139,8 @@ class VirtualenvMixin(object):
|
||||
if self._is_windows():
|
||||
bin_dir = 'Scripts'
|
||||
virtualenv_path = self.query_virtualenv_path()
|
||||
if virtualenv_path:
|
||||
self.python_paths[binary] = os.path.abspath(os.path.join(virtualenv_path, bin_dir, binary))
|
||||
else:
|
||||
self.python_paths[binary] = self.query_exe(binary)
|
||||
self.python_paths[binary] = os.path.abspath(os.path.join(virtualenv_path, bin_dir, binary))
|
||||
|
||||
return self.python_paths[binary]
|
||||
|
||||
def query_python_site_packages_path(self):
|
||||
@ -257,10 +260,20 @@ class VirtualenvMixin(object):
|
||||
else:
|
||||
self.fatal("install_module() doesn't understand an install_method of %s!" % install_method)
|
||||
|
||||
# Add --find-links pages to look at
|
||||
# Add --find-links pages to look at. Add --trusted-host automatically if
|
||||
# the host isn't secure. This allows modern versions of pip to connect
|
||||
# without requiring an override.
|
||||
proxxy = Proxxy(self.config, self.log_obj)
|
||||
trusted_hosts = set()
|
||||
for link in proxxy.get_proxies_and_urls(c.get('find_links', [])):
|
||||
command.extend(["--find-links", link])
|
||||
parsed = urlparse.urlparse(link)
|
||||
if parsed.scheme != 'https':
|
||||
trusted_hosts.add(parsed.hostname)
|
||||
|
||||
if self.pip_version >= distutils.version.LooseVersion('6.0'):
|
||||
for host in sorted(trusted_hosts):
|
||||
command.extend(['--trusted-host', host])
|
||||
|
||||
# module_url can be None if only specifying requirements files
|
||||
if module_url:
|
||||
@ -349,37 +362,55 @@ class VirtualenvMixin(object):
|
||||
dirs = self.query_abs_dirs()
|
||||
venv_path = self.query_virtualenv_path()
|
||||
self.info("Creating virtualenv %s" % venv_path)
|
||||
virtualenv = c.get('virtualenv', self.query_exe('virtualenv'))
|
||||
if isinstance(virtualenv, str):
|
||||
# allow for [python, virtualenv] in config
|
||||
virtualenv = [virtualenv]
|
||||
|
||||
if not os.path.exists(virtualenv[0]) and not self.which(virtualenv[0]):
|
||||
self.add_summary("The executable '%s' is not found; not creating "
|
||||
"virtualenv!" % virtualenv[0], level=FATAL)
|
||||
return -1
|
||||
# If running from a source checkout, use the virtualenv that is
|
||||
# vendored since that is deterministic.
|
||||
if self.topsrcdir:
|
||||
virtualenv = [
|
||||
sys.executable,
|
||||
os.path.join(self.topsrcdir, 'python', 'virtualenv', 'virtualenv.py')
|
||||
]
|
||||
virtualenv_options = c.get('virtualenv_options', [])
|
||||
# Don't create symlinks. If we don't do this, permissions issues may
|
||||
# hinder virtualenv creation or operation. Ideally we should do this
|
||||
# below when using the system virtualenv. However, this is a newer
|
||||
# feature and isn't guaranteed to be supported.
|
||||
virtualenv_options.append('--always-copy')
|
||||
|
||||
# https://bugs.launchpad.net/virtualenv/+bug/352844/comments/3
|
||||
# https://bugzilla.mozilla.org/show_bug.cgi?id=700415#c50
|
||||
if c.get('virtualenv_python_dll'):
|
||||
# We may someday want to copy a differently-named dll, but
|
||||
# let's not think about that right now =\
|
||||
dll_name = os.path.basename(c['virtualenv_python_dll'])
|
||||
target = self.query_python_path(dll_name)
|
||||
scripts_dir = os.path.dirname(target)
|
||||
self.mkdir_p(scripts_dir)
|
||||
self.copyfile(c['virtualenv_python_dll'], target, error_level=WARNING)
|
||||
# No source checkout. Try to find virtualenv from config options
|
||||
# or search path.
|
||||
else:
|
||||
self.mkdir_p(dirs['abs_work_dir'])
|
||||
virtualenv = c.get('virtualenv', self.query_exe('virtualenv'))
|
||||
if isinstance(virtualenv, str):
|
||||
# allow for [python, virtualenv] in config
|
||||
virtualenv = [virtualenv]
|
||||
|
||||
# make this list configurable?
|
||||
for module in ('distribute', 'pip'):
|
||||
if c.get('%s_url' % module):
|
||||
self.download_file(c['%s_url' % module],
|
||||
parent_dir=dirs['abs_work_dir'])
|
||||
if not os.path.exists(virtualenv[0]) and not self.which(virtualenv[0]):
|
||||
self.add_summary("The executable '%s' is not found; not creating "
|
||||
"virtualenv!" % virtualenv[0], level=FATAL)
|
||||
return -1
|
||||
|
||||
virtualenv_options = c.get('virtualenv_options',
|
||||
['--no-site-packages', '--distribute'])
|
||||
# https://bugs.launchpad.net/virtualenv/+bug/352844/comments/3
|
||||
# https://bugzilla.mozilla.org/show_bug.cgi?id=700415#c50
|
||||
if c.get('virtualenv_python_dll'):
|
||||
# We may someday want to copy a differently-named dll, but
|
||||
# let's not think about that right now =\
|
||||
dll_name = os.path.basename(c['virtualenv_python_dll'])
|
||||
target = self.query_python_path(dll_name)
|
||||
scripts_dir = os.path.dirname(target)
|
||||
self.mkdir_p(scripts_dir)
|
||||
self.copyfile(c['virtualenv_python_dll'], target, error_level=WARNING)
|
||||
else:
|
||||
self.mkdir_p(dirs['abs_work_dir'])
|
||||
|
||||
# make this list configurable?
|
||||
for module in ('distribute', 'pip'):
|
||||
if c.get('%s_url' % module):
|
||||
self.download_file(c['%s_url' % module],
|
||||
parent_dir=dirs['abs_work_dir'])
|
||||
|
||||
virtualenv_options = c.get('virtualenv_options',
|
||||
['--no-site-packages', '--distribute'])
|
||||
|
||||
if os.path.exists(self.query_python_path()):
|
||||
self.info("Virtualenv %s appears to already exist; skipping virtualenv creation." % self.query_python_path())
|
||||
@ -388,6 +419,18 @@ class VirtualenvMixin(object):
|
||||
cwd=dirs['abs_work_dir'],
|
||||
error_list=VirtualenvErrorList,
|
||||
halt_on_failure=True)
|
||||
|
||||
# Resolve the pip version so we can conditionally do things if we have
|
||||
# a modern pip.
|
||||
pip = self.query_python_path('pip')
|
||||
output = self.get_output_from_command([pip, '--version'],
|
||||
halt_on_failure=True)
|
||||
words = output.split()
|
||||
if words[0] != 'pip':
|
||||
self.fatal('pip --version output is weird: %s' % output)
|
||||
pip_version = words[1]
|
||||
self.pip_version = distutils.version.LooseVersion(pip_version)
|
||||
|
||||
if not modules:
|
||||
modules = c.get('virtualenv_modules', [])
|
||||
if not requirements:
|
||||
|
@ -1368,7 +1368,8 @@ class ScriptMixin(PlatformMixin):
|
||||
returncode = int(p.proc.returncode)
|
||||
else:
|
||||
p = subprocess.Popen(command, shell=shell, stdout=subprocess.PIPE,
|
||||
cwd=cwd, stderr=subprocess.STDOUT, env=env)
|
||||
cwd=cwd, stderr=subprocess.STDOUT, env=env,
|
||||
bufsize=0)
|
||||
loop = True
|
||||
while loop:
|
||||
if p.poll() is not None:
|
||||
@ -1785,6 +1786,21 @@ class BaseScript(ScriptMixin, LogMixin, object):
|
||||
self.new_log_obj(default_log_level=default_log_level)
|
||||
self.script_obj = self
|
||||
|
||||
# Indicate we're a source checkout if VCS directory is present at the
|
||||
# appropriate place. This code will break if this file is ever moved
|
||||
# to another directory.
|
||||
self.topsrcdir = None
|
||||
|
||||
srcreldir = 'testing/mozharness/mozharness/base'
|
||||
here = os.path.normpath(os.path.dirname(__file__))
|
||||
if here.replace('\\', '/').endswith(srcreldir):
|
||||
topsrcdir = os.path.normpath(os.path.join(here, '..', '..',
|
||||
'..', '..'))
|
||||
hg_dir = os.path.join(topsrcdir, '.hg')
|
||||
git_dir = os.path.join(topsrcdir, '.git')
|
||||
if os.path.isdir(hg_dir) or os.path.isdir(git_dir):
|
||||
self.topsrcdir = topsrcdir
|
||||
|
||||
# Set self.config to read-only.
|
||||
#
|
||||
# We can create intermediate config info programmatically from
|
||||
|
@ -47,8 +47,14 @@ class TooltoolMixin(object):
|
||||
def tooltool_fetch(self, manifest,
|
||||
output_dir=None, privileged=False, cache=None):
|
||||
"""docstring for tooltool_fetch"""
|
||||
|
||||
if self.config.get("download_tooltool"):
|
||||
# Use vendored tooltool.py if available.
|
||||
if self.topsrcdir:
|
||||
cmd = [
|
||||
sys.executable,
|
||||
os.path.join(self.topsrcdir, 'testing', 'docker', 'recipes',
|
||||
'tooltool.py')
|
||||
]
|
||||
elif self.config.get("download_tooltool"):
|
||||
cmd = [sys.executable, self._fetch_tooltool_py()]
|
||||
else:
|
||||
cmd = self.query_exe('tooltool.py', return_type='list')
|
||||
|
@ -0,0 +1,154 @@
|
||||
/* -*- Mode: indent-tabs-mode: nil; js-indent-level: 2 -*- */
|
||||
/* vim: set sts=2 sw=2 et tw=80: */
|
||||
"use strict";
|
||||
|
||||
// Tests whether not too many APIs are visible by default.
|
||||
// This file is used by test_ext_all_apis.html in browser/ and mobile/android/,
|
||||
// which may modify the following variables to add or remove expected APIs.
|
||||
/* globals expectedContentApisTargetSpecific */
|
||||
/* globals expectedBackgroundApisTargetSpecific */
|
||||
|
||||
// Generates a list of expectations.
|
||||
function generateExpectations(list) {
|
||||
return list.reduce((allApis, path) => {
|
||||
return allApis.concat(`browser.${path}`, `chrome.${path}`);
|
||||
}, []).sort();
|
||||
}
|
||||
|
||||
let expectedCommonApis = [
|
||||
"extension.getURL",
|
||||
"extension.inIncognitoContext",
|
||||
"extension.lastError",
|
||||
"i18n.detectLanguage",
|
||||
"i18n.getAcceptLanguages",
|
||||
"i18n.getMessage",
|
||||
"i18n.getUILanguage",
|
||||
"runtime.OnInstalledReason",
|
||||
"runtime.OnRestartRequiredReason",
|
||||
"runtime.PlatformArch",
|
||||
"runtime.PlatformOs",
|
||||
"runtime.RequestUpdateCheckStatus",
|
||||
"runtime.getManifest",
|
||||
"runtime.connect",
|
||||
"runtime.getURL",
|
||||
"runtime.id",
|
||||
"runtime.lastError",
|
||||
"runtime.onConnect",
|
||||
"runtime.onMessage",
|
||||
"runtime.sendMessage",
|
||||
// If you want to add a new powerful test API, please see bug 1287233.
|
||||
"test.assertEq",
|
||||
"test.assertFalse",
|
||||
"test.assertTrue",
|
||||
"test.fail",
|
||||
"test.log",
|
||||
"test.notifyFail",
|
||||
"test.notifyPass",
|
||||
"test.onMessage",
|
||||
"test.sendMessage",
|
||||
"test.succeed",
|
||||
];
|
||||
|
||||
let expectedContentApis = [
|
||||
...expectedCommonApis,
|
||||
...expectedContentApisTargetSpecific,
|
||||
];
|
||||
|
||||
let expectedBackgroundApis = [
|
||||
...expectedCommonApis,
|
||||
...expectedBackgroundApisTargetSpecific,
|
||||
"extension.ViewType",
|
||||
"extension.getBackgroundPage",
|
||||
"extension.getViews",
|
||||
"extension.isAllowedFileSchemeAccess",
|
||||
"extension.isAllowedIncognitoAccess",
|
||||
// Note: extensionTypes is not visible in Chrome.
|
||||
"extensionTypes.ImageFormat",
|
||||
"extensionTypes.RunAt",
|
||||
"management.ExtensionDisabledReason",
|
||||
"management.ExtensionInstallType",
|
||||
"management.ExtensionType",
|
||||
"management.getSelf",
|
||||
"management.uninstallSelf",
|
||||
"runtime.getBackgroundPage",
|
||||
"runtime.getBrowserInfo",
|
||||
"runtime.getPlatformInfo",
|
||||
"runtime.onUpdateAvailable",
|
||||
"runtime.openOptionsPage",
|
||||
"runtime.reload",
|
||||
"runtime.setUninstallURL",
|
||||
];
|
||||
|
||||
function sendAllApis() {
|
||||
function isEvent(key, val) {
|
||||
if (!/^on[A-Z]/.test(key)) {
|
||||
return false;
|
||||
}
|
||||
let eventKeys = [];
|
||||
for (let prop in val) {
|
||||
eventKeys.push(prop);
|
||||
}
|
||||
eventKeys = eventKeys.sort().join();
|
||||
return eventKeys === "addListener,hasListener,removeListener";
|
||||
}
|
||||
function mayRecurse(key, val) {
|
||||
if (Object.keys(val).filter(k => !/^[A-Z\-0-9_]+$/.test(k)).length === 0) {
|
||||
// Don't recurse on constants and empty objects.
|
||||
return false;
|
||||
}
|
||||
return !isEvent(key, val);
|
||||
}
|
||||
|
||||
let results = [];
|
||||
function diveDeeper(path, obj) {
|
||||
for (let key in obj) {
|
||||
let val = obj[key];
|
||||
if (typeof val == "object" && val !== null && mayRecurse(key, val)) {
|
||||
diveDeeper(`${path}.${key}`, val);
|
||||
} else {
|
||||
results.push(`${path}.${key}`);
|
||||
}
|
||||
}
|
||||
}
|
||||
diveDeeper("browser", browser);
|
||||
diveDeeper("chrome", chrome);
|
||||
browser.test.sendMessage("allApis", results.sort());
|
||||
}
|
||||
|
||||
add_task(function* test_enumerate_content_script_apis() {
|
||||
let extensionData = {
|
||||
manifest: {
|
||||
content_scripts: [{
|
||||
matches: ["http://mochi.test/*/file_sample.html"],
|
||||
js: ["contentscript.js"],
|
||||
run_at: "document_start",
|
||||
}],
|
||||
},
|
||||
files: {
|
||||
"contentscript.js": sendAllApis,
|
||||
},
|
||||
};
|
||||
let extension = ExtensionTestUtils.loadExtension(extensionData);
|
||||
yield extension.startup();
|
||||
|
||||
let win = window.open("file_sample.html");
|
||||
let actualApis = yield extension.awaitMessage("allApis");
|
||||
win.close();
|
||||
let expectedApis = generateExpectations(expectedContentApis);
|
||||
isDeeply(actualApis, expectedApis, "content script APIs");
|
||||
|
||||
yield extension.unload();
|
||||
});
|
||||
|
||||
add_task(function* test_enumerate_background_script_apis() {
|
||||
let extensionData = {
|
||||
background: sendAllApis,
|
||||
};
|
||||
let extension = ExtensionTestUtils.loadExtension(extensionData);
|
||||
yield extension.startup();
|
||||
let actualApis = yield extension.awaitMessage("allApis");
|
||||
let expectedApis = generateExpectations(expectedBackgroundApis);
|
||||
isDeeply(actualApis, expectedApis, "background script APIs");
|
||||
|
||||
yield extension.unload();
|
||||
});
|
@ -5,10 +5,10 @@
|
||||
|
||||
#include "Helpers.h"
|
||||
#include "mozIStorageError.h"
|
||||
#include "plbase64.h"
|
||||
#include "prio.h"
|
||||
#include "nsString.h"
|
||||
#include "nsNavHistory.h"
|
||||
#include "mozilla/Base64.h"
|
||||
#include "mozilla/Services.h"
|
||||
|
||||
// The length of guids that are used by history and bookmarks.
|
||||
@ -201,30 +201,6 @@ ReverseString(const nsString& aInput, nsString& aReversed)
|
||||
}
|
||||
}
|
||||
|
||||
static
|
||||
nsresult
|
||||
Base64urlEncode(const uint8_t* aBytes,
|
||||
uint32_t aNumBytes,
|
||||
nsCString& _result)
|
||||
{
|
||||
// SetLength does not set aside space for null termination. PL_Base64Encode
|
||||
// will not null terminate, however, nsCStrings must be null terminated. As a
|
||||
// result, we set the capacity to be one greater than what we need, and the
|
||||
// length to our desired length.
|
||||
uint32_t length = (aNumBytes + 2) / 3 * 4; // +2 due to integer math.
|
||||
NS_ENSURE_TRUE(_result.SetCapacity(length + 1, fallible),
|
||||
NS_ERROR_OUT_OF_MEMORY);
|
||||
_result.SetLength(length);
|
||||
(void)PL_Base64Encode(reinterpret_cast<const char*>(aBytes), aNumBytes,
|
||||
_result.BeginWriting());
|
||||
|
||||
// base64url encoding is defined in RFC 4648. It replaces the last two
|
||||
// alphabet characters of base64 encoding with '-' and '_' respectively.
|
||||
_result.ReplaceChar('+', '-');
|
||||
_result.ReplaceChar('/', '_');
|
||||
return NS_OK;
|
||||
}
|
||||
|
||||
#ifdef XP_WIN
|
||||
} // namespace places
|
||||
} // namespace mozilla
|
||||
@ -284,7 +260,8 @@ GenerateGUID(nsCString& _guid)
|
||||
nsresult rv = GenerateRandomBytes(kRequiredBytesLength, buffer);
|
||||
NS_ENSURE_SUCCESS(rv, rv);
|
||||
|
||||
rv = Base64urlEncode(buffer, kRequiredBytesLength, _guid);
|
||||
rv = Base64URLEncode(kRequiredBytesLength, buffer,
|
||||
Base64URLEncodePaddingPolicy::Omit, _guid);
|
||||
NS_ENSURE_SUCCESS(rv, rv);
|
||||
|
||||
NS_ASSERTION(_guid.Length() == GUID_LENGTH, "GUID is not the right size!");
|
||||
|
@ -842,7 +842,15 @@ namespace places {
|
||||
|
||||
int64_t lastInsertedId = aArgs->AsInt64(1);
|
||||
|
||||
nsNavHistory::StoreLastInsertedId(table, lastInsertedId);
|
||||
MOZ_ASSERT(table.EqualsLiteral("moz_places") ||
|
||||
table.EqualsLiteral("moz_historyvisits") ||
|
||||
table.EqualsLiteral("moz_bookmarks"));
|
||||
|
||||
if (table.EqualsLiteral("moz_bookmarks")) {
|
||||
nsNavBookmarks::StoreLastInsertedId(table, lastInsertedId);
|
||||
} else {
|
||||
nsNavHistory::StoreLastInsertedId(table, lastInsertedId);
|
||||
}
|
||||
|
||||
RefPtr<nsVariant> result = new nsVariant();
|
||||
rv = result->SetAsInt64(lastInsertedId);
|
||||
|
@ -148,6 +148,17 @@ NS_IMPL_ISUPPORTS(nsNavBookmarks
|
||||
)
|
||||
|
||||
|
||||
Atomic<int64_t> nsNavBookmarks::sLastInsertedItemId(0);
|
||||
|
||||
|
||||
void // static
|
||||
nsNavBookmarks::StoreLastInsertedId(const nsACString& aTable,
|
||||
const int64_t aLastInsertedId) {
|
||||
MOZ_ASSERT(aTable.EqualsLiteral("moz_bookmarks"));
|
||||
sLastInsertedItemId = aLastInsertedId;
|
||||
}
|
||||
|
||||
|
||||
nsresult
|
||||
nsNavBookmarks::Init()
|
||||
{
|
||||
@ -346,7 +357,7 @@ nsNavBookmarks::InsertBookmarkInDB(int64_t aPlaceId,
|
||||
"dateAdded, lastModified, guid) "
|
||||
"VALUES (:item_id, :page_id, :item_type, :parent, :item_index, "
|
||||
":item_title, :date_added, :last_modified, "
|
||||
"IFNULL(:item_guid, GENERATE_GUID()))"
|
||||
":item_guid)"
|
||||
);
|
||||
NS_ENSURE_STATE(stmt);
|
||||
mozStorageStatementScoper scoper(stmt);
|
||||
@ -395,34 +406,22 @@ nsNavBookmarks::InsertBookmarkInDB(int64_t aPlaceId,
|
||||
if (_guid.Length() == 12) {
|
||||
MOZ_ASSERT(IsValidGUID(_guid));
|
||||
rv = stmt->BindUTF8StringByName(NS_LITERAL_CSTRING("item_guid"), _guid);
|
||||
NS_ENSURE_SUCCESS(rv, rv);
|
||||
}
|
||||
else {
|
||||
rv = stmt->BindNullByName(NS_LITERAL_CSTRING("item_guid"));
|
||||
nsAutoCString guid;
|
||||
rv = GenerateGUID(guid);
|
||||
NS_ENSURE_SUCCESS(rv, rv);
|
||||
rv = stmt->BindUTF8StringByName(NS_LITERAL_CSTRING("item_guid"), guid);
|
||||
NS_ENSURE_SUCCESS(rv, rv);
|
||||
_guid.Assign(guid);
|
||||
}
|
||||
NS_ENSURE_SUCCESS(rv, rv);
|
||||
|
||||
rv = stmt->Execute();
|
||||
NS_ENSURE_SUCCESS(rv, rv);
|
||||
|
||||
if (*_itemId == -1) {
|
||||
// Get the newly inserted item id and GUID.
|
||||
nsCOMPtr<mozIStorageStatement> lastInsertIdStmt = mDB->GetStatement(
|
||||
"SELECT id, guid "
|
||||
"FROM moz_bookmarks "
|
||||
"ORDER BY ROWID DESC "
|
||||
"LIMIT 1"
|
||||
);
|
||||
NS_ENSURE_STATE(lastInsertIdStmt);
|
||||
mozStorageStatementScoper lastInsertIdScoper(lastInsertIdStmt);
|
||||
|
||||
bool hasResult;
|
||||
rv = lastInsertIdStmt->ExecuteStep(&hasResult);
|
||||
NS_ENSURE_SUCCESS(rv, rv);
|
||||
NS_ENSURE_TRUE(hasResult, NS_ERROR_UNEXPECTED);
|
||||
rv = lastInsertIdStmt->GetInt64(0, _itemId);
|
||||
NS_ENSURE_SUCCESS(rv, rv);
|
||||
rv = lastInsertIdStmt->GetUTF8String(1, _guid);
|
||||
NS_ENSURE_SUCCESS(rv, rv);
|
||||
*_itemId = sLastInsertedItemId;
|
||||
}
|
||||
|
||||
if (aParentId > 0) {
|
||||
|
@ -216,6 +216,10 @@ public:
|
||||
static const int32_t kGetChildrenIndex_Type;
|
||||
static const int32_t kGetChildrenIndex_PlaceID;
|
||||
|
||||
static mozilla::Atomic<int64_t> sLastInsertedItemId;
|
||||
static void StoreLastInsertedId(const nsACString& aTable,
|
||||
const int64_t aLastInsertedId);
|
||||
|
||||
private:
|
||||
static nsNavBookmarks* gBookmarksService;
|
||||
|
||||
|
@ -210,6 +210,7 @@
|
||||
"CREATE TEMP TRIGGER moz_bookmarks_foreign_count_afterinsert_trigger " \
|
||||
"AFTER INSERT ON moz_bookmarks FOR EACH ROW " \
|
||||
"BEGIN " \
|
||||
"SELECT store_last_inserted_id('moz_bookmarks', NEW.id); " \
|
||||
"UPDATE moz_places " \
|
||||
"SET foreign_count = foreign_count + 1 " \
|
||||
"WHERE id = NEW.fk;" \
|
||||
|
@ -0,0 +1,70 @@
|
||||
/**
|
||||
* This test ensures that reinserting a folder within a transaction gives it
|
||||
* a different GUID, and passes the GUID to the observers.
|
||||
*/
|
||||
|
||||
add_task(function* test_removeFolderTransaction_reinsert() {
|
||||
let folder = yield PlacesUtils.bookmarks.insert({
|
||||
type: PlacesUtils.bookmarks.TYPE_FOLDER,
|
||||
parentGuid: PlacesUtils.bookmarks.menuGuid,
|
||||
title: "Test folder",
|
||||
});
|
||||
let folderId = yield PlacesUtils.promiseItemId(folder.guid);
|
||||
let fx = yield PlacesUtils.bookmarks.insert({
|
||||
parentGuid: folder.guid,
|
||||
title: "Get Firefox!",
|
||||
url: "http://getfirefox.com",
|
||||
});
|
||||
let fxId = yield PlacesUtils.promiseItemId(fx.guid);
|
||||
let tb = yield PlacesUtils.bookmarks.insert({
|
||||
parentGuid: folder.guid,
|
||||
title: "Get Thunderbird!",
|
||||
url: "http://getthunderbird.com",
|
||||
});
|
||||
let tbId = yield PlacesUtils.promiseItemId(tb.guid);
|
||||
|
||||
let notifications = [];
|
||||
function checkNotifications(expected, message) {
|
||||
deepEqual(notifications, expected, message);
|
||||
notifications.length = 0;
|
||||
}
|
||||
|
||||
let observer = {
|
||||
onItemAdded(itemId, parentId, index, type, uri, title, dateAdded, guid,
|
||||
parentGuid) {
|
||||
notifications.push(["onItemAdded", itemId, parentId, guid, parentGuid]);
|
||||
},
|
||||
onItemRemoved(itemId, parentId, index, type, uri, guid, parentGuid) {
|
||||
notifications.push(["onItemRemoved", itemId, parentId, guid, parentGuid]);
|
||||
},
|
||||
};
|
||||
PlacesUtils.bookmarks.addObserver(observer, false);
|
||||
PlacesUtils.registerShutdownFunction(function() {
|
||||
PlacesUtils.bookmarks.removeObserver(observer);
|
||||
});
|
||||
|
||||
let transaction = PlacesUtils.bookmarks.getRemoveFolderTransaction(folderId);
|
||||
deepEqual(notifications, [], "We haven't executed the transaction yet");
|
||||
|
||||
transaction.doTransaction();
|
||||
checkNotifications([
|
||||
["onItemRemoved", tbId, folderId, tb.guid, folder.guid],
|
||||
["onItemRemoved", fxId, folderId, fx.guid, folder.guid],
|
||||
["onItemRemoved", folderId, PlacesUtils.bookmarksMenuFolderId, folder.guid,
|
||||
PlacesUtils.bookmarks.menuGuid],
|
||||
], "Executing transaction should remove folder and its descendants");
|
||||
|
||||
transaction.undoTransaction();
|
||||
// At this point, the restored folder has the same ID, but a different GUID.
|
||||
let newFolderGuid = yield PlacesUtils.promiseItemGuid(folderId);
|
||||
checkNotifications([
|
||||
["onItemAdded", folderId, PlacesUtils.bookmarksMenuFolderId, newFolderGuid,
|
||||
PlacesUtils.bookmarks.menuGuid],
|
||||
], "Undo should reinsert folder with same ID and different GUID");
|
||||
|
||||
transaction.redoTransaction();
|
||||
checkNotifications([
|
||||
["onItemRemoved", folderId, PlacesUtils.bookmarksMenuFolderId,
|
||||
newFolderGuid, PlacesUtils.bookmarks.menuGuid],
|
||||
], "Redo should forward new GUID to observer");
|
||||
});
|
@ -45,5 +45,6 @@ skip-if = toolkit == 'android' || toolkit == 'gonk'
|
||||
[test_keywords.js]
|
||||
[test_nsINavBookmarkObserver.js]
|
||||
[test_protectRoots.js]
|
||||
[test_removeFolderTransaction_reinsert.js]
|
||||
[test_removeItem.js]
|
||||
[test_savedsearches.js]
|
||||
|
@ -20,6 +20,7 @@ XPCOMUtils.defineLazyGetter(this, "kDebug", () => {
|
||||
});
|
||||
|
||||
const kContentChangeThresholdPx = 5;
|
||||
const kBrightTextSampleSize = 5;
|
||||
const kModalHighlightRepaintFreqMs = 100;
|
||||
const kHighlightAllPref = "findbar.highlightAll";
|
||||
const kModalHighlightPref = "findbar.modalHighlight";
|
||||
@ -57,8 +58,7 @@ const kModalStyles = {
|
||||
["vertical-align", "top !important"]
|
||||
],
|
||||
maskNode: [
|
||||
["background", "#000"],
|
||||
["opacity", ".35"],
|
||||
["background", "rgba(0,0,0,.35)"],
|
||||
["pointer-events", "none"],
|
||||
["position", "absolute"],
|
||||
["z-index", 1]
|
||||
@ -68,16 +68,7 @@ const kModalStyles = {
|
||||
["top", 0],
|
||||
["left", 0]
|
||||
],
|
||||
maskNodeBrightText: [ ["background", "#fff"] ],
|
||||
maskRect: [
|
||||
["background", "#fff"],
|
||||
["border-radius", "3px"],
|
||||
["margin", "-1px 0 0 -1px !important"],
|
||||
["padding", "0 1px 2px 1px !important"],
|
||||
["position", "absolute"],
|
||||
["white-space", "nowrap"]
|
||||
],
|
||||
maskRectBrightText: [ ["background", "#000"] ]
|
||||
maskNodeBrightText: [ ["background", "rgba(255,255,255,.35)"] ]
|
||||
};
|
||||
const kModalOutlineAnim = {
|
||||
"keyframes": [
|
||||
@ -116,6 +107,9 @@ function mockAnonymousContentNode(domNode) {
|
||||
},
|
||||
setAnimationForElement(id, keyframes, duration) {
|
||||
return (domNode.querySelector("#" + id) || domNode).animate(keyframes, duration);
|
||||
},
|
||||
setCutoutRectsForElement(id, rects) {
|
||||
// no-op for now.
|
||||
}
|
||||
};
|
||||
}
|
||||
@ -364,7 +358,6 @@ FinderHighlighter.prototype = {
|
||||
|
||||
this._removeHighlightAllMask(window);
|
||||
this._removeModalHighlightListeners(window);
|
||||
delete dict.brightText;
|
||||
|
||||
dict.visible = false;
|
||||
},
|
||||
@ -420,11 +413,6 @@ FinderHighlighter.prototype = {
|
||||
return;
|
||||
}
|
||||
|
||||
let fontStyle = this._getRangeFontStyle(foundRange);
|
||||
if (typeof dict.brightText == "undefined") {
|
||||
dict.brightText = this._isColorBright(fontStyle.color);
|
||||
}
|
||||
|
||||
if (data.findAgain)
|
||||
dict.updateAllRanges = true;
|
||||
|
||||
@ -433,7 +421,7 @@ FinderHighlighter.prototype = {
|
||||
else
|
||||
this._maybeCreateModalHighlightNodes(window);
|
||||
|
||||
this._updateRangeOutline(dict, textContent, fontStyle);
|
||||
this._updateRangeOutline(dict, textContent);
|
||||
}
|
||||
|
||||
let outlineNode = dict.modalHighlightOutline;
|
||||
@ -469,6 +457,7 @@ FinderHighlighter.prototype = {
|
||||
dict.dynamicRangesSet.clear();
|
||||
dict.frames.clear();
|
||||
dict.modalHighlightRectsMap.clear();
|
||||
dict.brightText = null;
|
||||
},
|
||||
|
||||
/**
|
||||
@ -572,7 +561,7 @@ FinderHighlighter.prototype = {
|
||||
* @return {Rect}
|
||||
*/
|
||||
_getRootBounds(window, includeScroll = true) {
|
||||
let dwu = this._getDWU(window);
|
||||
let dwu = this._getDWU(window.top);
|
||||
let cssPageRect = Rect.fromRect(dwu.getRootBounds());
|
||||
let scrollX = {};
|
||||
let scrollY = {};
|
||||
@ -718,17 +707,60 @@ FinderHighlighter.prototype = {
|
||||
return new Color(...cssColor).isBright;
|
||||
},
|
||||
|
||||
/**
|
||||
* Detects if the overall text color in the page can be described as bright.
|
||||
* This is done according to the following algorithm:
|
||||
* 1. With the entire set of ranges that we have found thusfar;
|
||||
* 2. Get an odd-numbered `sampleSize`, with a maximum of `kBrightTextSampleSize`
|
||||
* ranges,
|
||||
* 3. Slice the set of ranges into `sampleSize` number of equal parts,
|
||||
* 4. Grab the first range for each slice and inspect the brightness of the
|
||||
* color of its text content.
|
||||
* 5. When the majority of ranges are counted as contain bright colored text,
|
||||
* the page is considered to contain bright text overall.
|
||||
*
|
||||
* @param {Object} dict Dictionary of properties belonging to the
|
||||
* currently active window. The page text color property
|
||||
* will be recorded in `dict.brightText` as `true` or `false`.
|
||||
*/
|
||||
_detectBrightText(dict) {
|
||||
let sampleSize = Math.min(dict.modalHighlightRectsMap.size, kBrightTextSampleSize);
|
||||
let ranges = [...dict.modalHighlightRectsMap.keys()];
|
||||
let rangesCount = ranges.length;
|
||||
// Make sure the sample size is an odd number.
|
||||
if (sampleSize % 2 == 0) {
|
||||
// Make the currently found range weigh heavier.
|
||||
if (dict.currentFoundRange) {
|
||||
ranges.push(dict.currentFoundRange);
|
||||
++sampleSize;
|
||||
++rangesCount;
|
||||
} else {
|
||||
--sampleSize;
|
||||
}
|
||||
}
|
||||
let brightCount = 0;
|
||||
for (let i = 0; i < sampleSize; ++i) {
|
||||
let range = ranges[Math.floor((rangesCount / sampleSize) * i)];
|
||||
let fontStyle = this._getRangeFontStyle(range);
|
||||
if (this._isColorBright(fontStyle.color))
|
||||
++brightCount;
|
||||
}
|
||||
|
||||
dict.brightText = (brightCount >= Math.ceil(sampleSize / 2));
|
||||
},
|
||||
|
||||
/**
|
||||
* Checks if a range is inside a DOM node that's positioned in a way that it
|
||||
* doesn't scroll along when the document is scrolled and/ or zoomed. This
|
||||
* is the case for 'fixed' and 'sticky' positioned elements and elements inside
|
||||
* (i)frames.
|
||||
* is the case for 'fixed' and 'sticky' positioned elements, elements inside
|
||||
* (i)frames and elements that have their overflow styles set to 'auto' or
|
||||
* 'scroll'.
|
||||
*
|
||||
* @param {nsIDOMRange} range Range that be enclosed in a dynamic container
|
||||
* @return {Boolean}
|
||||
*/
|
||||
_isInDynamicContainer(range) {
|
||||
const kFixed = new Set(["fixed", "sticky"]);
|
||||
const kFixed = new Set(["fixed", "sticky", "scroll", "auto"]);
|
||||
let node = range.startContainer;
|
||||
while (node.nodeType != 1)
|
||||
node = node.parentNode;
|
||||
@ -744,8 +776,11 @@ FinderHighlighter.prototype = {
|
||||
}
|
||||
|
||||
do {
|
||||
if (kFixed.has(window.getComputedStyle(node, null).position))
|
||||
let style = window.getComputedStyle(node, null);
|
||||
if (kFixed.has(style.position) || kFixed.has(style.overflow) ||
|
||||
kFixed.has(style.overflowX) || kFixed.has(style.overflowY)) {
|
||||
return true;
|
||||
}
|
||||
node = node.parentNode;
|
||||
} while (node && node != document.documentElement)
|
||||
|
||||
@ -824,11 +859,11 @@ FinderHighlighter.prototype = {
|
||||
* active window
|
||||
*/
|
||||
_updateDynamicRangesRects(dict) {
|
||||
for (let range of dict.dynamicRangesSet)
|
||||
this._updateRangeRects(range, false, dict);
|
||||
// Reset the frame bounds cache.
|
||||
for (let frame of dict.frames.keys())
|
||||
dict.frames.set(frame, null);
|
||||
for (let range of dict.dynamicRangesSet)
|
||||
this._updateRangeRects(range, false, dict);
|
||||
},
|
||||
|
||||
/**
|
||||
@ -1006,49 +1041,47 @@ FinderHighlighter.prototype = {
|
||||
_repaintHighlightAllMask(window, paintContent = true) {
|
||||
window = window.top;
|
||||
let dict = this.getForWindow(window);
|
||||
let document = window.document;
|
||||
|
||||
const kMaskId = kModalIdPrefix + "-findbar-modalHighlight-outlineMask";
|
||||
let maskNode = document.createElementNS(kNSHTML, "div");
|
||||
if (!dict.modalHighlightAllMask) {
|
||||
let document = window.document;
|
||||
let maskNode = document.createElementNS(kNSHTML, "div");
|
||||
maskNode.setAttribute("id", kMaskId);
|
||||
dict.modalHighlightAllMask = kDebug ?
|
||||
mockAnonymousContentNode((document.body || document.documentElement).appendChild(maskNode)) :
|
||||
document.insertAnonymousContent(maskNode);
|
||||
}
|
||||
|
||||
// Make sure the dimmed mask node takes the full width and height that's available.
|
||||
let {width, height} = dict.lastWindowDimensions = this._getWindowDimensions(window);
|
||||
maskNode.setAttribute("id", kMaskId);
|
||||
maskNode.setAttribute("style", this._getStyleString(kModalStyles.maskNode,
|
||||
if (typeof dict.brightText != "boolean" || dict.updateAllRanges)
|
||||
this._detectBrightText(dict);
|
||||
let maskStyle = this._getStyleString(kModalStyles.maskNode,
|
||||
[ ["width", width + "px"], ["height", height + "px"] ],
|
||||
dict.brightText ? kModalStyles.maskNodeBrightText : [],
|
||||
kDebug ? kModalStyles.maskNodeDebug : []));
|
||||
kDebug ? kModalStyles.maskNodeDebug : []);
|
||||
dict.modalHighlightAllMask.setAttributeForElement(kMaskId, "style", maskStyle);
|
||||
if (dict.brightText)
|
||||
maskNode.setAttribute("brighttext", "true");
|
||||
dict.modalHighlightAllMask.setAttributeForElement(kMaskId, "brighttext", "true");
|
||||
|
||||
let allRects = [];
|
||||
if (paintContent || dict.modalHighlightAllMask) {
|
||||
this._updateRangeOutline(dict);
|
||||
this._updateDynamicRangesRects(dict);
|
||||
// Create a DOM node for each rectangle representing the ranges we found.
|
||||
let maskContent = [];
|
||||
const rectStyle = this._getStyleString(kModalStyles.maskRect,
|
||||
dict.brightText ? kModalStyles.maskRectBrightText : []);
|
||||
|
||||
let DOMRect = window.DOMRect;
|
||||
for (let [range, rects] of dict.modalHighlightRectsMap) {
|
||||
if (dict.updateAllRanges)
|
||||
rects = this._updateRangeRects(range);
|
||||
if (this._checkOverlap(dict.currentFoundRange, range))
|
||||
continue;
|
||||
for (let rect of rects) {
|
||||
maskContent.push(`<div xmlns="${kNSHTML}" style="${rectStyle}; top: ${rect.y}px;
|
||||
left: ${rect.x}px; height: ${rect.height}px; width: ${rect.width}px;"></div>`);
|
||||
}
|
||||
for (let rect of rects)
|
||||
allRects.push(new DOMRect(rect.x, rect.y, rect.width, rect.height));
|
||||
}
|
||||
dict.updateAllRanges = false;
|
||||
maskNode.innerHTML = maskContent.join("");
|
||||
}
|
||||
|
||||
// Always remove the current mask and insert it a-fresh, because we're not
|
||||
// free to alter DOM nodes inside the CanvasFrame.
|
||||
this._removeHighlightAllMask(window);
|
||||
|
||||
dict.modalHighlightAllMask = kDebug ?
|
||||
mockAnonymousContentNode((document.body || document.documentElement).appendChild(maskNode)) :
|
||||
document.insertAnonymousContent(maskNode);
|
||||
dict.modalHighlightAllMask.setCutoutRectsForElement(kMaskId, allRects);
|
||||
},
|
||||
|
||||
/**
|
||||
@ -1098,7 +1131,7 @@ FinderHighlighter.prototype = {
|
||||
|
||||
window = window.top;
|
||||
let dict = this.getForWindow(window);
|
||||
let repaintDynamicRanges = (scrollOnly && !!dict.dynamicRangesSet.size);
|
||||
let repaintDynamicRanges = ((scrollOnly || contentChanged) && !!dict.dynamicRangesSet.size);
|
||||
|
||||
// When we request to repaint unconditionally, we mean to call
|
||||
// `_repaintHighlightAllMask()` right after the timeout.
|
||||
|
@ -69,6 +69,7 @@ function promiseTestHighlighterOutput(browser, word, expectedResult, extraTest =
|
||||
removeCalls: []
|
||||
};
|
||||
let lastMaskNode, lastOutlineNode;
|
||||
let rects = [];
|
||||
|
||||
// Amount of milliseconds to wait after the last time one of our stubs
|
||||
// was called.
|
||||
@ -103,18 +104,46 @@ function promiseTestHighlighterOutput(browser, word, expectedResult, extraTest =
|
||||
Assert.ok(false, `No mask node found, but expected ${expectedResult.rectCount} rects.`);
|
||||
}
|
||||
|
||||
if (lastMaskNode) {
|
||||
Assert.equal(lastMaskNode.getElementsByTagName("div").length,
|
||||
expectedResult.rectCount, `Amount of inserted rects should match for '${word}'.`);
|
||||
}
|
||||
Assert.equal(rects.length, expectedResult.rectCount,
|
||||
`Amount of inserted rects should match for '${word}'.`);
|
||||
|
||||
// Allow more specific assertions to be tested in `extraTest`.
|
||||
extraTest = eval(extraTest);
|
||||
extraTest(lastMaskNode, lastOutlineNode);
|
||||
extraTest(lastMaskNode, lastOutlineNode, rects);
|
||||
|
||||
resolve();
|
||||
}
|
||||
|
||||
function stubAnonymousContentNode(domNode, anonNode) {
|
||||
let originals = [anonNode.setTextContentForElement,
|
||||
anonNode.setAttributeForElement, anonNode.removeAttributeForElement,
|
||||
anonNode.setCutoutRectsForElement];
|
||||
anonNode.setTextContentForElement = (id, text) => {
|
||||
try {
|
||||
(domNode.querySelector("#" + id) || domNode).textContent = text;
|
||||
} catch (ex) {}
|
||||
return originals[0].call(anonNode, id, text);
|
||||
};
|
||||
anonNode.setAttributeForElement = (id, attrName, attrValue) => {
|
||||
try {
|
||||
(domNode.querySelector("#" + id) || domNode).setAttribute(attrName, attrValue);
|
||||
} catch (ex) {}
|
||||
return originals[1].call(anonNode, id, attrName, attrValue);
|
||||
};
|
||||
anonNode.removeAttributeForElement = (id, attrName) => {
|
||||
try {
|
||||
let node = domNode.querySelector("#" + id) || domNode;
|
||||
if (node.hasAttribute(attrName))
|
||||
node.removeAttribute(attrName);
|
||||
} catch (ex) {}
|
||||
return originals[2].call(anonNode, id, attrName);
|
||||
};
|
||||
anonNode.setCutoutRectsForElement = (id, cutoutRects) => {
|
||||
rects = cutoutRects;
|
||||
return originals[3].call(anonNode, id, cutoutRects);
|
||||
};
|
||||
}
|
||||
|
||||
// Create a function that will stub the original version and collects
|
||||
// the arguments so we can check the results later.
|
||||
function stub(which) {
|
||||
@ -132,7 +161,10 @@ function promiseTestHighlighterOutput(browser, word, expectedResult, extraTest =
|
||||
timeout = setTimeout(() => {
|
||||
finish();
|
||||
}, kTimeoutMs);
|
||||
return stubbed[which].call(content.document, node);
|
||||
let res = stubbed[which].call(content.document, node);
|
||||
if (which == "insert")
|
||||
stubAnonymousContentNode(node, res);
|
||||
return res;
|
||||
};
|
||||
}
|
||||
content.document.insertAnonymousContent = stub("insert");
|
||||
@ -154,14 +186,14 @@ add_task(function* testModalResults() {
|
||||
["Roland", {
|
||||
rectCount: 1,
|
||||
insertCalls: [2, 4],
|
||||
removeCalls: [1, 2]
|
||||
removeCalls: [0, 1]
|
||||
}],
|
||||
["their law might propagate their kind", {
|
||||
rectCount: 0,
|
||||
insertCalls: [31, 32],
|
||||
removeCalls: [31, 32],
|
||||
extraTest: function(maskNode, outlineNode) {
|
||||
Assert.equal(outlineNode.getElementsByTagName("div").length, 3,
|
||||
insertCalls: [28, 31],
|
||||
removeCalls: [28, 30],
|
||||
extraTest: function(maskNode, outlineNode, rects) {
|
||||
Assert.equal(outlineNode.getElementsByTagName("div").length, 2,
|
||||
"There should be multiple rects drawn");
|
||||
}
|
||||
}],
|
||||
@ -173,12 +205,12 @@ add_task(function* testModalResults() {
|
||||
["new", {
|
||||
rectCount: 1,
|
||||
insertCalls: [1, 4],
|
||||
removeCalls: [1, 3]
|
||||
removeCalls: [0, 2]
|
||||
}],
|
||||
["o", {
|
||||
rectCount: 491,
|
||||
insertCalls: [3, 7],
|
||||
removeCalls: [3, 6]
|
||||
insertCalls: [1, 4],
|
||||
removeCalls: [0, 2]
|
||||
}]
|
||||
]);
|
||||
let url = kFixtureBaseURL + "file_FinderSample.html";
|
||||
@ -214,7 +246,7 @@ add_task(function* testModalSwitching() {
|
||||
let expectedResult = {
|
||||
rectCount: 1,
|
||||
insertCalls: [2, 4],
|
||||
removeCalls: [1, 2]
|
||||
removeCalls: [0, 1]
|
||||
};
|
||||
let promise = promiseTestHighlighterOutput(browser, word, expectedResult);
|
||||
yield promiseEnterStringIntoFindField(findbar, word);
|
||||
@ -249,8 +281,8 @@ add_task(function* testDarkPageDetection() {
|
||||
let word = "Roland";
|
||||
let expectedResult = {
|
||||
rectCount: 1,
|
||||
insertCalls: [2, 4],
|
||||
removeCalls: [1, 2]
|
||||
insertCalls: [1, 3],
|
||||
removeCalls: [0, 1]
|
||||
};
|
||||
let promise = promiseTestHighlighterOutput(browser, word, expectedResult, function(node) {
|
||||
Assert.ok(!node.hasAttribute("brighttext"), "White HTML page shouldn't have 'brighttext' set");
|
||||
@ -270,7 +302,7 @@ add_task(function* testDarkPageDetection() {
|
||||
let expectedResult = {
|
||||
rectCount: 1,
|
||||
insertCalls: [2, 4],
|
||||
removeCalls: [1, 2]
|
||||
removeCalls: [0, 1]
|
||||
};
|
||||
|
||||
yield ContentTask.spawn(browser, null, function* () {
|
||||
@ -307,7 +339,7 @@ add_task(function* testHighlightAllToggle() {
|
||||
let expectedResult = {
|
||||
rectCount: 1,
|
||||
insertCalls: [2, 4],
|
||||
removeCalls: [1, 2]
|
||||
removeCalls: [0, 1]
|
||||
};
|
||||
let promise = promiseTestHighlighterOutput(browser, word, expectedResult);
|
||||
yield promiseEnterStringIntoFindField(findbar, word);
|
||||
@ -327,8 +359,8 @@ add_task(function* testHighlightAllToggle() {
|
||||
// For posterity, let's switch back.
|
||||
expectedResult = {
|
||||
rectCount: 2,
|
||||
insertCalls: [2, 4],
|
||||
removeCalls: [1, 2]
|
||||
insertCalls: [1, 3],
|
||||
removeCalls: [0, 1]
|
||||
};
|
||||
promise = promiseTestHighlighterOutput(browser, word, expectedResult);
|
||||
yield SpecialPowers.pushPrefEnv({ "set": [[ kHighlightAllPref, true ]] });
|
||||
@ -351,7 +383,7 @@ add_task(function* testXMLDocument() {
|
||||
let expectedResult = {
|
||||
rectCount: 0,
|
||||
insertCalls: [1, 4],
|
||||
removeCalls: [1, 2]
|
||||
removeCalls: [0, 1]
|
||||
};
|
||||
let promise = promiseTestHighlighterOutput(browser, word, expectedResult);
|
||||
yield promiseEnterStringIntoFindField(findbar, word);
|
||||
@ -373,7 +405,7 @@ add_task(function* testHideOnLocationChange() {
|
||||
let expectedResult = {
|
||||
rectCount: 1,
|
||||
insertCalls: [2, 4],
|
||||
removeCalls: [1, 2]
|
||||
removeCalls: [0, 1]
|
||||
};
|
||||
let promise = promiseTestHighlighterOutput(browser, word, expectedResult);
|
||||
yield promiseEnterStringIntoFindField(findbar, word);
|
||||
|
Loading…
x
Reference in New Issue
Block a user