Merge mozilla-central to inbound. a=merge CLOSED TREE

This commit is contained in:
Oana Pop Rus 2019-03-25 12:04:50 +02:00
commit b01d230f27
112 changed files with 9701 additions and 2977 deletions

42
Cargo.lock generated
View File

@ -348,6 +348,23 @@ dependencies = [
"byte-tools 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
name = "bookmark_sync"
version = "0.1.0"
dependencies = [
"atomic_refcell 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)",
"dogear 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)",
"libc 0.2.43 (registry+https://github.com/rust-lang/crates.io-index)",
"log 0.4.6 (registry+https://github.com/rust-lang/crates.io-index)",
"moz_task 0.1.0",
"nserror 0.1.0",
"nsstring 0.1.0",
"storage 0.1.0",
"storage_variant 0.1.0",
"thin-vec 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)",
"xpcom 0.1.0",
]
[[package]]
name = "boxfnonce"
version = "0.0.3"
@ -877,6 +894,15 @@ dependencies = [
"strsim 0.6.0 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
name = "dogear"
version = "0.2.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
"log 0.4.6 (registry+https://github.com/rust-lang/crates.io-index)",
"smallbitvec 2.3.0 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
name = "dtoa"
version = "0.4.2"
@ -1195,6 +1221,7 @@ dependencies = [
"audioipc-client 0.4.0",
"audioipc-server 0.2.3",
"bitsdownload 0.1.0",
"bookmark_sync 0.1.0",
"cert_storage 0.0.1",
"cose-c 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)",
"cubeb-pulse 0.2.0",
@ -1216,6 +1243,7 @@ dependencies = [
"profiler_helper 0.1.0",
"rsdparsa_capi 0.1.0",
"rustc_version 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)",
"storage 0.1.0",
"u2fhid 0.2.3",
"webrender_bindings 0.1.0",
"xpcom 0.1.0",
@ -1756,6 +1784,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
name = "moz_task"
version = "0.1.0"
dependencies = [
"libc 0.2.43 (registry+https://github.com/rust-lang/crates.io-index)",
"memchr 2.0.1 (registry+https://github.com/rust-lang/crates.io-index)",
"nserror 0.1.0",
"nsstring 0.1.0",
"xpcom 0.1.0",
@ -2563,6 +2593,17 @@ name = "stable_deref_trait"
version = "1.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
[[package]]
name = "storage"
version = "0.1.0"
dependencies = [
"libc 0.2.43 (registry+https://github.com/rust-lang/crates.io-index)",
"nserror 0.1.0",
"nsstring 0.1.0",
"storage_variant 0.1.0",
"xpcom 0.1.0",
]
[[package]]
name = "storage_variant"
version = "0.1.0"
@ -3488,6 +3529,7 @@ dependencies = [
"checksum digest 0.8.0 (registry+https://github.com/rust-lang/crates.io-index)" = "05f47366984d3ad862010e22c7ce81a7dbcaebbdfb37241a620f8b6596ee135c"
"checksum dirs 1.0.4 (registry+https://github.com/rust-lang/crates.io-index)" = "88972de891f6118092b643d85a0b28e0678e0f948d7f879aa32f2d5aafe97d2a"
"checksum docopt 0.8.3 (registry+https://github.com/rust-lang/crates.io-index)" = "d8acd393692c503b168471874953a2531df0e9ab77d0b6bbc582395743300a4a"
"checksum dogear 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)" = "bcecbcd636b901efb0b61eea73972bda173c02c98a07fc66dd76e8ee1421ffbf"
"checksum dtoa 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)" = "09c3753c3db574d215cba4ea76018483895d7bff25a31b49ba45db21c48e50ab"
"checksum dtoa-short 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)" = "068d4026697c1a18f0b0bb8cfcad1b0c151b90d8edb9bf4c235ad68128920d1d"
"checksum dwrote 0.8.0 (registry+https://github.com/rust-lang/crates.io-index)" = "c31c624339dab99c223a4b26c2e803b7c248adaca91549ce654c76f39a03f5c8"

View File

@ -7571,8 +7571,8 @@ function checkEmptyPageOrigin(browser = gBrowser.selectedBrowser,
}
// ... so for those that don't have them, enforce that the page has the
// system principal (this matches e.g. on about:newtab).
let ssm = Services.scriptSecurityManager;
return ssm.isSystemPrincipal(contentPrincipal);
return contentPrincipal.isSystemPrincipal;
}
function ReportFalseDeceptiveSite() {

View File

@ -2226,7 +2226,7 @@ window._gBrowser = {
userContextId: params.userContextId,
});
}
if (Services.scriptSecurityManager.isSystemPrincipal(params.triggeringPrincipal)) {
if (params.triggeringPrincipal.isSystemPrincipal) {
throw new Error("System principal should never be passed into addWebTab()");
}
return this.addTab(aURI, params);
@ -4581,7 +4581,7 @@ window._gBrowser = {
// For non-system/expanded principals, we bail and show the checkbox
if (promptPrincipal.URI &&
!Services.scriptSecurityManager.isSystemPrincipal(promptPrincipal)) {
!promptPrincipal.isSystemPrincipal) {
let permission = Services.perms.testPermissionFromPrincipal(promptPrincipal,
"focus-tab-by-prompt");
if (permission != Services.perms.ALLOW_ACTION) {

View File

@ -32,7 +32,7 @@ function test_openUILink_checkPrincipal() {
const loadingPrincipal = channel.loadInfo.loadingPrincipal;
is(loadingPrincipal, null, "sanity: correct loadingPrincipal");
const triggeringPrincipal = channel.loadInfo.triggeringPrincipal;
ok(Services.scriptSecurityManager.isSystemPrincipal(triggeringPrincipal),
ok(triggeringPrincipal.isSystemPrincipal,
"sanity: correct triggeringPrincipal");
const principalToInherit = channel.loadInfo.principalToInherit;
ok(principalToInherit.isNullPrincipal, "sanity: correct principalToInherit");

View File

@ -30,7 +30,7 @@ add_task(async function test_principal_click() {
"sanity check - make sure we test the principal for the correct URI");
let triggeringPrincipal = channel.loadInfo.triggeringPrincipal;
ok(Services.scriptSecurityManager.isSystemPrincipal(triggeringPrincipal),
ok(triggeringPrincipal.isSystemPrincipal,
"loading about: from privileged page must have a triggering of System");
let contentPolicyType = channel.loadInfo.externalContentPolicyType;
@ -66,7 +66,7 @@ add_task(async function test_principal_ctrl_click() {
"sanity check - make sure we test the principal for the correct URI");
let triggeringPrincipal = channel.loadInfo.triggeringPrincipal;
ok(Services.scriptSecurityManager.isSystemPrincipal(triggeringPrincipal),
ok(triggeringPrincipal.isSystemPrincipal,
"loading about: from privileged page must have a triggering of System");
let contentPolicyType = channel.loadInfo.externalContentPolicyType;
@ -111,7 +111,7 @@ add_task(async function test_principal_right_click_open_link_in_new_tab() {
"sanity check - make sure we test the principal for the correct URI");
let triggeringPrincipal = channel.loadInfo.triggeringPrincipal;
ok(Services.scriptSecurityManager.isSystemPrincipal(triggeringPrincipal),
ok(triggeringPrincipal.isSystemPrincipal,
"loading about: from privileged page must have a triggering of System");
let contentPolicyType = channel.loadInfo.externalContentPolicyType;

View File

@ -17,6 +17,7 @@ support-files =
[browser_autoplay_blocked.js]
support-files =
browser_autoplay_blocked.html
browser_autoplay_blocked_slow.sjs
../general/audio.ogg
skip-if = verify && os == 'linux' && debug # Bug 1483648
[browser_temporary_permissions_expiry.js]

View File

@ -2,7 +2,9 @@
* Test that a blocked request to autoplay media is shown to the user
*/
const AUTOPLAY_PAGE = getRootDirectory(gTestPath).replace("chrome://mochitests/content", "https://example.com") + "browser_autoplay_blocked.html";
const AUTOPLAY_PAGE = getRootDirectory(gTestPath).replace("chrome://mochitests/content", "https://example.com") + "browser_autoplay_blocked.html";
const SLOW_AUTOPLAY_PAGE = getRootDirectory(gTestPath).replace("chrome://mochitests/content", "https://example.com") + "browser_autoplay_blocked_slow.sjs";
const AUTOPLAY_PREF = "media.autoplay.default";
const AUTOPLAY_PERM = "autoplay-media";
@ -172,3 +174,22 @@ add_task(async function testChangingBlockingSettingDuringNavigation() {
Services.perms.removeAll();
});
add_task(async function testSlowLoadingPage() {
Services.prefs.setIntPref(AUTOPLAY_PREF, Ci.nsIAutoplay.BLOCKED);
let tab1 = await BrowserTestUtils.openNewForegroundTab(gBrowser, "about:home");
let tab2 = await BrowserTestUtils.openNewForegroundTab(gBrowser, SLOW_AUTOPLAY_PAGE);
await BrowserTestUtils.switchTab(gBrowser, tab1);
// Wait until the blocked icon is hidden by switching tabs
await TestUtils.waitForCondition(() => {
return BrowserTestUtils.is_hidden(autoplayBlockedIcon());
});
await BrowserTestUtils.switchTab(gBrowser, tab2);
await blockedIconShown(tab2.linkedBrowser);
BrowserTestUtils.removeTab(tab1);
BrowserTestUtils.removeTab(tab2);
Services.perms.removeAll();
});

View File

@ -0,0 +1,30 @@
/* Any copyright is dedicated to the Public Domain.
* http://creativecommons.org/publicdomain/zero/1.0/ */
const DELAY_MS = "1000";
const AUTOPLAY_HTML = `<!DOCTYPE HTML>
<html dir="ltr" xml:lang="en-US" lang="en-US">
<head>
<meta charset="utf8">
</head>
<body>
<audio autoplay="autoplay" >
<source src="audio.ogg" />
</audio>
<script>setTimeout(() => { document.location.href = '#foo'; }, 500);</script>
</body>
</html>`;
function handleRequest(req, resp) {
resp.processAsync();
resp.setHeader("Cache-Control", "no-cache", false);
resp.setHeader("Content-Type", "text/html;charset=utf-8", false);
let timer = Cc["@mozilla.org/timer;1"].createInstance(Ci.nsITimer);
resp.write(AUTOPLAY_HTML);
timer.init(() => {
resp.write("");
resp.finish();
}, DELAY_MS, Ci.nsITimer.TYPE_ONE_SHOT);
}

View File

@ -245,7 +245,7 @@ function openWebLinkIn(url, where, params) {
if (!params.triggeringPrincipal) {
params.triggeringPrincipal = Services.scriptSecurityManager.createNullPrincipal({});
}
if (Services.scriptSecurityManager.isSystemPrincipal(params.triggeringPrincipal)) {
if (params.triggeringPrincipal.isSystemPrincipal) {
throw new Error("System principal should never be passed into openWebLinkIn()");
}

View File

@ -148,15 +148,23 @@ const GloballyBlockedPermissions = {
entry[prePath] = {};
}
if (entry[prePath][id]) {
return;
}
entry[prePath][id] = true;
// Listen to any top level navigations, once we see one clear the flag
// and remove the listener.
// Clear the flag and remove the listener once the user has navigated.
// WebProgress will report various things including hashchanges to us, the
// navigation we care about is either leaving the current page or reloading.
browser.addProgressListener({
QueryInterface: ChromeUtils.generateQI([Ci.nsIWebProgressListener,
Ci.nsISupportsWeakReference]),
onLocationChange(aWebProgress, aRequest, aLocation, aFlags) {
if (aWebProgress.isTopLevel) {
let hasLeftPage = aLocation.prePath != prePath ||
!(aFlags & Ci.nsIWebProgressListener.LOCATION_CHANGE_SAME_DOCUMENT);
let isReload = !!(aFlags & Ci.nsIWebProgressListener.LOCATION_CHANGE_RELOAD);
if (aWebProgress.isTopLevel && (hasLeftPage || isReload)) {
GloballyBlockedPermissions.remove(browser, id, prePath);
browser.removeProgressListener(this);
}

View File

@ -228,24 +228,6 @@ interface nsIScriptSecurityManager : nsISupports
*/
nsIPrincipal getChannelURIPrincipal(in nsIChannel aChannel);
/**
* Check whether a given principal is a system principal. This allows us
* to avoid handing back the system principal to script while allowing
* script to check whether a given principal is system.
*
* @deprecated use nsIPrincipal's accessors for this boolean.
* https://bugzilla.mozilla.org/show_bug.cgi?id=1517483 tracks removing
* this.
*/
boolean isSystemPrincipal(in nsIPrincipal aPrincipal);
%{C++
bool IsSystemPrincipal(nsIPrincipal* aPrincipal) {
bool isSystem = false;
IsSystemPrincipal(aPrincipal, &isSystem);
return isSystem;
}
%}
const unsigned long NO_APP_ID = 0;
const unsigned long UNKNOWN_APP_ID = 4294967295; // UINT32_MAX

View File

@ -447,12 +447,6 @@ nsScriptSecurityManager::GetChannelURIPrincipal(nsIChannel* aChannel,
return *aPrincipal ? NS_OK : NS_ERROR_FAILURE;
}
NS_IMETHODIMP
nsScriptSecurityManager::IsSystemPrincipal(nsIPrincipal* aPrincipal,
bool* aIsSystem) {
*aIsSystem = (aPrincipal == mSystemPrincipal);
return NS_OK;
}
/////////////////////////////
// nsScriptSecurityManager //
@ -472,7 +466,7 @@ NS_IMPL_ISUPPORTS(nsScriptSecurityManager, nsIScriptSecurityManager)
#if defined(DEBUG) && !defined(ANDROID)
static void AssertEvalNotUsingSystemPrincipal(nsIPrincipal* subjectPrincipal,
JSContext* cx) {
if (!nsContentUtils::IsSystemPrincipal(subjectPrincipal)) {
if (!subjectPrincipal->IsSystemPrincipal()) {
return;
}

View File

@ -26,7 +26,7 @@ var secMan = SpecialPowers.Services.scriptSecurityManager;
var principal = SpecialPowers.wrap(document).nodePrincipal;
isnot(principal, undefined, "Should have a principal");
isnot(principal, null, "Should have a non-null principal");
is(secMan.isSystemPrincipal(principal), false,
is(principal.isSystemPrincipal, false,
"Shouldn't have system principal here");
try {
secMan.checkLoadURIWithPrincipal(principal, null,

View File

@ -143,7 +143,7 @@ https://bugzilla.mozilla.org/show_bug.cgi?id=840488
checkScriptEnabled(rootWin, true);
// Privileged frames are immune to docshell flags.
ok(ssm.isSystemPrincipal(chromeWin.document.nodePrincipal), "Sanity check for System Principal");
ok(chromeWin.document.nodePrincipal.isSystemPrincipal, "Sanity check for System Principal");
setScriptEnabledForDocShell(chromeWin, false);
checkScriptEnabled(chromeWin, true);
setScriptEnabledForDocShell(chromeWin, true);

View File

@ -28,7 +28,7 @@ var secMan = SpecialPowers.Cc["@mozilla.org/scriptsecuritymanager;1"]
var sysPrincipal = secMan.getSystemPrincipal();
isnot(sysPrincipal, undefined, "Should have a principal");
isnot(sysPrincipal, null, "Should have a non-null principal");
is(secMan.isSystemPrincipal(sysPrincipal), true,
is(sysPrincipal.isSystemPrincipal, true,
"Should have system principal here");

View File

@ -10,6 +10,8 @@ const {colorUtils} = require("devtools/shared/css/color.js");
const ComputedStylePath = require("./ComputedStylePath");
const DEFAULT_COLOR = {r: 0, g: 0, b: 0, a: 1};
/* Count for linearGradient ID */
let LINEAR_GRADIENT_ID_COUNT = 0;
@ -32,13 +34,13 @@ class ColorPath extends ComputedStylePath {
return keyframe.value;
}
propToState({ keyframes }) {
propToState({ keyframes, name }) {
const maxObject = { distance: -Number.MAX_VALUE };
for (let i = 0; i < keyframes.length - 1; i++) {
const value1 = getRGBA(keyframes[i].value);
const value1 = getRGBA(name, keyframes[i].value);
for (let j = i + 1; j < keyframes.length; j++) {
const value2 = getRGBA(keyframes[j].value);
const value2 = getRGBA(name, keyframes[j].value);
const distance = getRGBADistance(value1, value2);
if (maxObject.distance >= distance) {
@ -55,12 +57,12 @@ class ColorPath extends ComputedStylePath {
const baseValue =
maxObject.value1 < maxObject.value2 ? maxObject.value1 : maxObject.value2;
return { baseValue, maxDistance };
return { baseValue, maxDistance, name };
}
toSegmentValue(computedStyle) {
const { baseValue, maxDistance } = this.state;
const value = getRGBA(computedStyle);
const { baseValue, maxDistance, name } = this.state;
const value = getRGBA(name, computedStyle);
return getRGBADistance(baseValue, value) / maxDistance;
}
@ -153,12 +155,32 @@ class ColorPath extends ComputedStylePath {
/**
* Parse given RGBA string.
*
* @param {String} propertyName
* @param {String} colorString
* e.g. rgb(0, 0, 0) or rgba(0, 0, 0, 0.5) and so on.
* @return {Object}
* RGBA {r: r, g: g, b: b, a: a}.
*/
function getRGBA(colorString) {
function getRGBA(propertyName, colorString) {
// Special handling for CSS property which can specify the not normal CSS color value.
switch (propertyName) {
case "caret-color": {
// This property can specify "auto" keyword.
if (colorString === "auto") {
return DEFAULT_COLOR;
}
break;
}
case "scrollbar-color": {
// This property can specify "auto", "dark", "light" keywords and multiple colors.
if (["auto", "dark", "light"].includes(colorString) ||
colorString.indexOf(" ") > 0) {
return DEFAULT_COLOR;
}
break;
}
}
const color = new colorUtils.CssColor(colorString);
return color.getRGBATuple();
}

View File

@ -16,6 +16,7 @@ support-files =
doc_pseudo.html
doc_short_duration.html
doc_simple_animation.html
doc_special_colors.html
head.js
keyframes-graph_keyframe-marker_head.js
summary-graph_delay-sign_head.js
@ -60,6 +61,7 @@ support-files =
skip-if = (verify && !debug)
[browser_animation_keyframes-graph_keyframe-marker.js]
[browser_animation_keyframes-graph_keyframe-marker-rtl.js]
[browser_animation_keyframes-graph_special-colors.js]
[browser_animation_keyframes-progress-bar.js]
skip-if = (os == "win" && ccov) # Bug 1490981
[browser_animation_keyframes-progress-bar_after-resuming.js]

View File

@ -0,0 +1,35 @@
/* Any copyright is dedicated to the Public Domain.
http://creativecommons.org/publicdomain/zero/1.0/ */
"use strict";
const TEST_DATA = [
{
propertyName: "caret-color",
expectedMarkers: ["auto", "rgb(0, 255, 0)"],
},
{
propertyName: "scrollbar-color",
expectedMarkers: ["rgb(0, 255, 0) rgb(255, 0, 0)", "auto"],
},
];
// Test for animatable property which can specify the non standard CSS color value.
add_task(async function() {
await addTab(URL_ROOT + "doc_special_colors.html");
const { panel } = await openAnimationInspector();
for (const { propertyName, expectedMarkers } of TEST_DATA) {
const animatedPropertyEl = panel.querySelector(`.${ propertyName }`);
ok(animatedPropertyEl, `Animated property ${ propertyName } exists`);
const markerEls = animatedPropertyEl.querySelectorAll(".keyframe-marker-item");
is(markerEls.length, expectedMarkers.length,
`The length of keyframe markers should ${ expectedMarkers.length }`);
for (let i = 0; i < expectedMarkers.length; i++) {
const actualTitle = markerEls[i].title;
const expectedTitle = expectedMarkers[i];
is(actualTitle, expectedTitle, `Value of keyframes[${ i }] is correct`);
}
}
});

View File

@ -0,0 +1,28 @@
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8">
<style>
div {
animation: anim 5s infinite;
border: 1px solid lime;
height: 100px;
width: 100px;
}
@keyframes anim {
from {
caret-color: auto;
scrollbar-color: lime red;
}
to {
caret-color: lime;
scrollbar-color: auto;
}
}
</style>
</head>
<body>
<div></div>
</body>
</html>

View File

@ -29,7 +29,7 @@ add_task(async function() {
const loadingPrincipal = channel.loadInfo.loadingPrincipal;
is(loadingPrincipal, null, "sanity: correct loadingPrincipal");
const triggeringPrincipal = channel.loadInfo.triggeringPrincipal;
ok(Services.scriptSecurityManager.isSystemPrincipal(triggeringPrincipal),
ok(triggeringPrincipal.isSystemPrincipal,
"sanity: correct triggeringPrincipal");
const principalToInherit = channel.loadInfo.principalToInherit;
ok(principalToInherit.isNullPrincipal, "sanity: correct principalToInherit");
@ -55,7 +55,7 @@ add_task(async function() {
const loadingPrincipal = channel.loadInfo.loadingPrincipal;
is(loadingPrincipal, null, "reloaded: correct loadingPrincipal");
const triggeringPrincipal = channel.loadInfo.triggeringPrincipal;
ok(Services.scriptSecurityManager.isSystemPrincipal(triggeringPrincipal),
ok(triggeringPrincipal.isSystemPrincipal,
"reloaded: correct triggeringPrincipal");
const principalToInherit = channel.loadInfo.principalToInherit;
ok(principalToInherit.isNullPrincipal, "reloaded: correct principalToInherit");

View File

@ -71,5 +71,6 @@ skip-if = os == "linux" || os == "mac" # Bug 1498336
[browser_touch_simulation.js]
[browser_user_agent_input.js]
[browser_viewport_basics.js]
[browser_viewport_resizing.js]
[browser_viewport_resizing_fixed_width.js]
[browser_viewport_resizing_fixed_width_and_zoom.js]
[browser_window_close.js]

View File

@ -0,0 +1,116 @@
/* Any copyright is dedicated to the Public Domain.
http://creativecommons.org/publicdomain/zero/1.0/ */
"use strict";
// Test viewport resizing, with and without meta viewport support.
// We call this to switch between on/off support for meta viewports.
async function setTouchAndMetaViewportSupport(ui, value) {
const reloadNeeded = await ui.updateTouchSimulation(value);
if (reloadNeeded) {
info("Reload is needed -- waiting for it.");
const reload = waitForViewportLoad(ui);
const browser = ui.getViewportBrowser();
browser.reload();
await reload;
}
}
// This function check that zoom, layout viewport width and height
// are all as expected.
async function testViewportZoomWidthAndHeight(message, ui, zoom, width, height) {
const resolution = await spawnViewportTask(ui, {}, function() {
return content.windowUtils.getResolution();
});
is(resolution, zoom, message + " should have expected zoom.");
const layoutSize = await spawnViewportTask(ui, {}, function() {
return {
width: content.screen.width,
height: content.screen.height,
};
});
is(layoutSize.width, width, message + " should have expected layout width.");
is(layoutSize.height, height, message + " should have expected layout height.");
}
const TEST_URL = "data:text/html;charset=utf-8," +
"<head><meta name=\"viewport\" content=\"width=device-width, " +
"initial-scale=1.0, minimum-scale=1.0, maximum-scale=1.0\"></head>" +
"<body>meta viewport scaled locked at 1.0</body>";
addRDMTask(TEST_URL, async function({ ui, manager }) {
// Turn on the pref that allows meta viewport support.
await SpecialPowers.pushPrefEnv({
set: [["devtools.responsive.metaViewport.enabled", true]],
});
const store = ui.toolWindow.store;
// Wait until the viewport has been added.
await waitUntilState(store, state => state.viewports.length == 1);
info("--- Starting viewport test output ---");
// We're going to take a 300,600 viewport (before) and resize it
// to 600,300 (after) and then resize it back. At the before and
// after points, we'll measure zoom and the layout viewport width
// and height.
const expected = [
{
metaSupport: false,
before: {
zoom: 1.0,
width: 300,
height: 600,
},
after: {
zoom: 1.0,
width: 600,
height: 300,
},
},
{
metaSupport: true,
before: {
zoom: 1.0,
width: 300,
height: 600,
},
after: {
zoom: 1.0,
width: 600,
height: 300,
},
},
];
for (const e of expected) {
const b = e.before;
const a = e.after;
const message = "Meta Viewport " + (e.metaSupport ? "ON" : "OFF");
// Ensure meta viewport is set.
info(message + " setting meta viewport support.");
await setTouchAndMetaViewportSupport(ui, e.metaSupport);
// Get to the initial size and check values.
await setViewportSize(ui, manager, 300, 600);
await testViewportZoomWidthAndHeight(
message + " before resize",
ui, b.zoom, b.width, b.height);
// Move to the smaller size.
await setViewportSize(ui, manager, 600, 300);
await testViewportZoomWidthAndHeight(
message + " after resize",
ui, a.zoom, a.width, a.height);
// Go back to the initial size and check again.
await setViewportSize(ui, manager, 300, 600);
await testViewportZoomWidthAndHeight(
message + " return to initial size",
ui, b.zoom, b.width, b.height);
}
});

View File

@ -5,7 +5,6 @@
"use strict";
const {Ci} = require("chrome");
const Services = require("Services");
const defer = require("devtools/shared/defer");
const protocol = require("devtools/shared/protocol");
const {LongStringActor} = require("devtools/server/actors/string");
@ -730,10 +729,9 @@ var StyleSheetsActor = protocol.ActorClassWithSpec(styleSheetsSpec, {
// StyleSheetApplicableStateChanged events. See Document.webidl.
doc.styleSheetChangeEventsEnabled = true;
const isChrome =
Services.scriptSecurityManager.isSystemPrincipal(doc.nodePrincipal);
const documentOnly = !isChrome;
const documentOnly = !doc.nodePrincipal.isSystemPrincipal;
const styleSheets = InspectorUtils.getAllStyleSheets(doc, documentOnly);
let actors = [];
for (let i = 0; i < styleSheets.length; i++) {
const sheet = styleSheets[i];

View File

@ -323,7 +323,7 @@ exports.isSafeJSObject = function(obj) {
// If there aren't Xrays, only allow chrome objects.
const principal = Cu.getObjectPrincipal(obj);
if (!Services.scriptSecurityManager.isSystemPrincipal(principal)) {
if (!principal.isSystemPrincipal) {
return false;
}

View File

@ -1,5 +1,4 @@
add_task(async function test() {
const secMan = Services.scriptSecurityManager;
const uris = [undefined, "about:blank"];
function checkContentProcess(newBrowser, uri) {
@ -8,8 +7,7 @@ add_task(async function test() {
Assert.notEqual(prin, null, "Loaded principal must not be null when adding " + uri);
Assert.notEqual(prin, undefined, "Loaded principal must not be undefined when loading " + uri);
const secMan = Services.scriptSecurityManager;
Assert.equal(secMan.isSystemPrincipal(prin), false,
Assert.equal(prin.isSystemPrincipal, false,
"Loaded principal must not be system when loading " + uri);
});
}
@ -23,7 +21,7 @@ add_task(async function test() {
isnot(prin, null, "Forced principal must not be null when loading " + uri);
isnot(prin, undefined,
"Forced principal must not be undefined when loading " + uri);
is(secMan.isSystemPrincipal(prin), false,
is(prin.isSystemPrincipal, false,
"Forced principal must not be system when loading " + uri);
// Belt-and-suspenders e10s check: make sure that the same checks hold
@ -35,7 +33,7 @@ add_task(async function test() {
prin = newBrowser.contentPrincipal;
isnot(prin, null, "Loaded principal must not be null when adding " + uri);
isnot(prin, undefined, "Loaded principal must not be undefined when loading " + uri);
is(secMan.isSystemPrincipal(prin), false,
is(prin.isSystemPrincipal, false,
"Loaded principal must not be system when loading " + uri);
// Belt-and-suspenders e10s check: make sure that the same checks hold

View File

@ -5,8 +5,7 @@ add_task(async function test() {
Assert.notEqual(prin, null, "Loaded principal must not be null");
Assert.notEqual(prin, undefined, "Loaded principal must not be undefined");
const secMan = Services.scriptSecurityManager;
Assert.equal(secMan.isSystemPrincipal(prin), false,
Assert.equal(prin.isSystemPrincipal, false,
"Loaded principal must not be system");
});
});

View File

@ -2,7 +2,6 @@ function test() {
waitForExplicitFinish();
var w;
const secMan = Services.scriptSecurityManager;
var iteration = 1;
const uris = ["", "about:blank"];
var uri;
@ -18,7 +17,7 @@ function test() {
var prin = w.document.nodePrincipal;
isnot(prin, null, "Loaded principal must not be null when adding " + uri);
isnot(prin, undefined, "Loaded principal must not be undefined when loading " + uri);
is(secMan.isSystemPrincipal(prin), false,
is(prin.isSystemPrincipal, false,
"Loaded principal must not be system when loading " + uri);
w.close();
@ -40,7 +39,7 @@ function test() {
isnot(prin, null, "Forced principal must not be null when loading " + uri);
isnot(prin, undefined,
"Forced principal must not be undefined when loading " + uri);
is(secMan.isSystemPrincipal(prin), false,
is(prin.isSystemPrincipal, false,
"Forced principal must not be system when loading " + uri);
if (uri == undefined) {
// No actual load here, so just move along.

View File

@ -21,8 +21,6 @@
function childFrameScript() {
"use strict";
const secMan = Cc["@mozilla.org/scriptsecuritymanager;1"].
getService(Ci.nsIScriptSecurityManager);
addMessageListener("test:content", function(message) {
sendAsyncMessage("test:result", "is nsIPrincipal: " +
@ -40,7 +38,7 @@
addMessageListener("test:system", function(message) {
sendAsyncMessage("test:result", "isSystemPrincipal: " +
(secMan.isSystemPrincipal(message.data) ? "OK" : "KO"));
(message.data.isSystemPrincipal ? "OK" : "KO"));
});
addMessageListener("test:ep", function(message) {

View File

@ -126,7 +126,7 @@ void PrincipalVerifier::VerifyOnMainThread() {
// Verify if a child process uses system principal, which is not allowed
// to prevent system principal is spoofed.
if (NS_WARN_IF(actor && ssm->IsSystemPrincipal(principal))) {
if (NS_WARN_IF(actor && principal->IsSystemPrincipal())) {
DispatchToInitiatingThread(NS_ERROR_FAILURE);
return;
}
@ -137,7 +137,7 @@ void PrincipalVerifier::VerifyOnMainThread() {
// Sanity check principal origin by using it to construct a URI and security
// checking it. Don't do this for the system principal, though, as its origin
// is a synthetic [System Principal] string.
if (!ssm->IsSystemPrincipal(principal)) {
if (!principal->IsSystemPrincipal()) {
nsAutoCString origin;
rv = principal->GetOriginNoSuffix(origin);
if (NS_WARN_IF(NS_FAILED(rv))) {

View File

@ -412,7 +412,7 @@ class RTCPeerConnection {
"RTCPeerConnection constructor passed invalid RTCConfiguration");
}
var principal = Cu.getWebIDLCallerPrincipal();
this._isChrome = Services.scriptSecurityManager.isSystemPrincipal(principal);
this._isChrome = principal.isSystemPrincipal;
if (_globalPCList._networkdown) {
throw new this._win.DOMException(

View File

@ -93,12 +93,9 @@ bool FramingChecker::CheckOneFrameOptionsPolicy(nsIHttpChannel* aHttpChannel,
break;
}
bool system = false;
topDoc = parentDocShellItem->GetDocument();
if (topDoc) {
if (NS_SUCCEEDED(
ssm->IsSystemPrincipal(topDoc->NodePrincipal(), &system)) &&
system) {
if (topDoc->NodePrincipal()->IsSystemPrincipal()) {
// Found a system-principled doc: last docshell was top.
break;
}

View File

@ -167,6 +167,10 @@ class MOZ_STACK_CLASS WebRenderScrollDataWrapper {
mWrRootId.mLayersId, mLayer->GetReferentRenderRoot()->GetChildType());
const WebRenderScrollData* childData =
mUpdater->GetScrollData(newWrRootId);
if (!childData) {
// The other tree might not exist yet if the scene hasn't been built.
return WebRenderScrollDataWrapper(*mUpdater, newWrRootId);
}
// See the comment above RenderRootBoundary for more context on what's
// happening here. We need to fish out the appropriate wrapper root from
// inside the dummy root. Note that the wrapper root should always be a
@ -185,7 +189,7 @@ class MOZ_STACK_CLASS WebRenderScrollDataWrapper {
if (!layerIndex) {
// It's possible that there's no wrapper root. In that case there are
// no descendants
return WebRenderScrollDataWrapper(*mUpdater, mWrRootId);
return WebRenderScrollDataWrapper(*mUpdater, newWrRootId);
}
return WebRenderScrollDataWrapper(mUpdater, newWrRootId, childData,
*layerIndex,

View File

@ -36,7 +36,7 @@ fuzzy(0-1,0-926) == badbitssize.bmp pal1.png
# number of colors."
# [We reject it. Chromium accepts it but draws nothing. Rejecting seems
# preferable give that the data is clearly untrustworthy.]
== wrapper.html?badpalettesize.bmp about:blank
fuzzy(0-245,0-8128) == wrapper.html?badpalettesize.bmp about:blank
# BMP: bihsize=40, 127 x 64, bpp=1, compression=0, colors=2
# "The 'planes' setting, which is required to be 1, is not 1."

View File

@ -67,7 +67,11 @@ class SharedMemory {
// Maps the shared memory into the caller's address space.
// Returns true on success, false otherwise. The memory address
// is accessed via the memory() accessor.
bool Map(size_t bytes);
//
// If the specified fixed address is not null, it is the address that the
// shared memory must be mapped at. Returns false if the shared memory
// could not be mapped at that address.
bool Map(size_t bytes, void* fixed_address = nullptr);
// Unmaps the shared memory from the caller's address space.
// Returns true if successful; returns false on error or if the
@ -94,6 +98,15 @@ class SharedMemory {
// It is safe to call Close repeatedly.
void Close(bool unmap_view = true);
// Returns a page-aligned address at which the given number of bytes could
// probably be mapped. Returns NULL on error or if there is insufficient
// contiguous address space to map the required number of pages.
//
// Note that there is no guarantee that the given address space will actually
// be free by the time this function returns, since another thread might map
// something there in the meantime.
static void* FindFreeAddressSpace(size_t size);
// Share the shared memory to another process. Attempts
// to create a platform-specific new_handle which can be
// used in a remote process to access the shared memory

View File

@ -156,16 +156,30 @@ bool SharedMemory::Create(size_t size) {
return true;
}
bool SharedMemory::Map(size_t bytes) {
bool SharedMemory::Map(size_t bytes, void* fixed_address) {
if (mapped_file_ == -1) return false;
memory_ = mmap(NULL, bytes, PROT_READ | (read_only_ ? 0 : PROT_WRITE),
MAP_SHARED, mapped_file_, 0);
if (memory_) max_size_ = bytes;
// Don't use MAP_FIXED when a fixed_address was specified, since that can
// replace pages that are alread mapped at that address.
memory_ =
mmap(fixed_address, bytes, PROT_READ | (read_only_ ? 0 : PROT_WRITE),
MAP_SHARED, mapped_file_, 0);
bool mmap_succeeded = (memory_ != (void*)-1);
DCHECK(mmap_succeeded) << "Call to mmap failed, errno=" << errno;
if (mmap_succeeded) {
if (fixed_address && memory_ != fixed_address) {
bool munmap_succeeded = munmap(memory_, bytes) == 0;
DCHECK(munmap_succeeded) << "Call to munmap failed, errno=" << errno;
memory_ = NULL;
return false;
}
max_size_ = bytes;
}
return mmap_succeeded;
}
@ -178,6 +192,13 @@ bool SharedMemory::Unmap() {
return true;
}
void* SharedMemory::FindFreeAddressSpace(size_t size) {
void* memory =
mmap(NULL, size, PROT_NONE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
munmap(memory, size);
return memory != MAP_FAILED ? memory : NULL;
}
bool SharedMemory::ShareToProcessCommon(ProcessId processId,
SharedMemoryHandle* new_handle,
bool close_self) {

View File

@ -109,17 +109,19 @@ bool SharedMemory::Create(size_t size) {
return true;
}
bool SharedMemory::Map(size_t bytes) {
bool SharedMemory::Map(size_t bytes, void* fixed_address) {
if (mapped_file_ == NULL) return false;
if (external_section_ && !IsSectionSafeToMap(mapped_file_)) {
return false;
}
memory_ = MapViewOfFile(
memory_ = MapViewOfFileEx(
mapped_file_, read_only_ ? FILE_MAP_READ : FILE_MAP_READ | FILE_MAP_WRITE,
0, 0, bytes);
0, 0, bytes, fixed_address);
if (memory_ != NULL) {
MOZ_ASSERT(!fixed_address || memory_ == fixed_address,
"MapViewOfFileEx returned an expected address");
return true;
}
return false;
@ -133,6 +135,14 @@ bool SharedMemory::Unmap() {
return true;
}
void* SharedMemory::FindFreeAddressSpace(size_t size) {
void* memory = VirtualAlloc(NULL, size, MEM_RESERVE, PAGE_NOACCESS);
if (memory) {
VirtualFree(memory, 0, MEM_RELEASE);
}
return memory;
}
bool SharedMemory::ShareToProcessCommon(ProcessId processId,
SharedMemoryHandle* new_handle,
bool close_self) {

View File

@ -211,9 +211,10 @@ nsresult PrincipalToPrincipalInfo(nsIPrincipal* aPrincipal,
MOZ_ASSERT(aPrincipal);
MOZ_ASSERT(aPrincipalInfo);
nsresult rv;
if (aPrincipal->GetIsNullPrincipal()) {
nsCOMPtr<nsIURI> uri;
nsresult rv = aPrincipal->GetURI(getter_AddRefs(uri));
rv = aPrincipal->GetURI(getter_AddRefs(uri));
if (NS_WARN_IF(NS_FAILED(rv))) {
return rv;
}
@ -233,19 +234,7 @@ nsresult PrincipalToPrincipalInfo(nsIPrincipal* aPrincipal,
return NS_OK;
}
nsCOMPtr<nsIScriptSecurityManager> secMan =
nsContentUtils::GetSecurityManager();
if (!secMan) {
return NS_ERROR_FAILURE;
}
bool isSystemPrincipal;
nsresult rv = secMan->IsSystemPrincipal(aPrincipal, &isSystemPrincipal);
if (NS_WARN_IF(NS_FAILED(rv))) {
return rv;
}
if (isSystemPrincipal) {
if (aPrincipal->IsSystemPrincipal()) {
*aPrincipalInfo = SystemPrincipalInfo();
return NS_OK;
}

View File

@ -50,7 +50,7 @@ class SharedMemory {
virtual void* memory() const = 0;
virtual bool Create(size_t size) = 0;
virtual bool Map(size_t nBytes) = 0;
virtual bool Map(size_t nBytes, void* fixed_address = nullptr) = 0;
virtual void CloseHandle() = 0;

View File

@ -68,7 +68,7 @@ bool SharedMemoryBasic::Create(size_t aNbytes) {
return true;
}
bool SharedMemoryBasic::Map(size_t nBytes) {
bool SharedMemoryBasic::Map(size_t nBytes, void* fixed_address) {
MOZ_ASSERT(nullptr == mMemory, "Already Map()d");
int prot = PROT_READ;
@ -76,17 +76,37 @@ bool SharedMemoryBasic::Map(size_t nBytes) {
prot |= PROT_WRITE;
}
mMemory = mmap(nullptr, nBytes, prot, MAP_SHARED, mShmFd, 0);
// Don't use MAP_FIXED when a fixed_address was specified, since that can
// replace pages that are alread mapped at that address.
mMemory = mmap(fixed_address, nBytes, prot, MAP_SHARED, mShmFd, 0);
if (MAP_FAILED == mMemory) {
LogError("ShmemAndroid::Map()");
if (!fixed_address) {
LogError("ShmemAndroid::Map()");
}
mMemory = nullptr;
return false;
}
if (fixed_address && mMemory != fixed_address) {
if (munmap(mMemory, nBytes)) {
LogError("ShmemAndroid::Map():unmap");
mMemory = nullptr;
return false;
}
}
Mapped(nBytes);
return true;
}
void* SharedMemoryBasic::FindFreeAddressSpace(size_t size) {
void* memory =
mmap(NULL, size, PROT_NONE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
munmap(memory, size);
return memory != (void*)-1 ? memory : NULL;
}
bool SharedMemoryBasic::ShareToProcess(base::ProcessId /*unused*/,
Handle* aNewHandle) {
MOZ_ASSERT(mShmFd >= 0, "Should have been Create()d by now");

View File

@ -32,7 +32,7 @@ class SharedMemoryBasic final
virtual bool Create(size_t aNbytes) override;
virtual bool Map(size_t nBytes) override;
virtual bool Map(size_t nBytes, void* fixed_address = nullptr) override;
virtual void CloseHandle() override;
@ -48,6 +48,8 @@ class SharedMemoryBasic final
static Handle NULLHandle() { return Handle(); }
static void* FindFreeAddressSpace(size_t aSize);
virtual bool IsHandleValid(const Handle& aHandle) const override {
return aHandle.fd >= 0;
}

View File

@ -41,8 +41,8 @@ class SharedMemoryBasic final
return ok;
}
virtual bool Map(size_t nBytes) override {
bool ok = mSharedMemory.Map(nBytes);
virtual bool Map(size_t nBytes, void* fixed_address = nullptr) override {
bool ok = mSharedMemory.Map(nBytes, fixed_address);
if (ok) {
Mapped(nBytes);
}
@ -76,6 +76,10 @@ class SharedMemoryBasic final
return ret;
}
static void* FindFreeAddressSpace(size_t size) {
return base::SharedMemory::FindFreeAddressSpace(size);
}
private:
~SharedMemoryBasic() {}

View File

@ -70,7 +70,7 @@ class SharedMemoryBasic final : public SharedMemoryCommon<mach_port_t> {
virtual bool Create(size_t aNbytes) override;
virtual bool Map(size_t nBytes) override;
virtual bool Map(size_t nBytes, void* fixed_address = nullptr) override;
virtual void CloseHandle() override;
@ -86,6 +86,8 @@ class SharedMemoryBasic final : public SharedMemoryCommon<mach_port_t> {
static Handle NULLHandle() { return Handle(); }
static void* FindFreeAddressSpace(size_t aSize);
virtual bool IsHandleValid(const Handle& aHandle) const override;
virtual bool ShareToProcess(base::ProcessId aProcessId,

View File

@ -512,7 +512,7 @@ bool SharedMemoryBasic::Create(size_t size) {
return true;
}
bool SharedMemoryBasic::Map(size_t size) {
bool SharedMemoryBasic::Map(size_t size, void* fixed_address) {
MOZ_ASSERT(mMemory == nullptr);
if (MACH_PORT_NULL == mPort) {
@ -520,18 +520,31 @@ bool SharedMemoryBasic::Map(size_t size) {
}
kern_return_t kr;
mach_vm_address_t address = 0;
mach_vm_address_t address = toVMAddress(fixed_address);
vm_prot_t vmProtection = VM_PROT_READ;
if (mOpenRights == RightsReadWrite) {
vmProtection |= VM_PROT_WRITE;
}
kr = mach_vm_map(mach_task_self(), &address, round_page(size), 0, VM_FLAGS_ANYWHERE, mPort, 0,
false, vmProtection, vmProtection, VM_INHERIT_NONE);
kr = mach_vm_map(mach_task_self(), &address, round_page(size), 0,
fixed_address ? VM_FLAGS_FIXED : VM_FLAGS_ANYWHERE,
mPort, 0, false, vmProtection, vmProtection, VM_INHERIT_NONE);
if (kr != KERN_SUCCESS) {
LOG_ERROR("Failed to map shared memory (%zu bytes) into %x, port %x. %s (%x)\n", size,
mach_task_self(), mPort, mach_error_string(kr), kr);
if (!fixed_address) {
LOG_ERROR("Failed to map shared memory (%zu bytes) into %x, port %x. %s (%x)\n",
size, mach_task_self(), mPort, mach_error_string(kr), kr);
}
return false;
}
if (fixed_address && fixed_address != toPointer(address)) {
kr = vm_deallocate(mach_task_self(), address, size);
if (kr != KERN_SUCCESS) {
LOG_ERROR("Failed to unmap shared memory at unsuitable address "
"(%zu bytes) from %x, port %x. %s (%x)\n",
size, mach_task_self(), mPort, mach_error_string(kr), kr);
}
return false;
}
@ -540,6 +553,18 @@ bool SharedMemoryBasic::Map(size_t size) {
return true;
}
void* SharedMemoryBasic::FindFreeAddressSpace(size_t size) {
mach_vm_address_t address = 0;
size = round_page(size);
if (mach_vm_map(mach_task_self(), &address, size, 0,
VM_FLAGS_ANYWHERE, MEMORY_OBJECT_NULL, 0, false, VM_PROT_NONE,
VM_PROT_NONE, VM_INHERIT_NONE) != KERN_SUCCESS ||
vm_deallocate(mach_task_self(), address, size) != KERN_SUCCESS) {
return nullptr;
}
return toPointer(address);
}
bool SharedMemoryBasic::ShareToProcess(base::ProcessId pid, Handle* aNewHandle) {
if (pid == getpid()) {
*aNewHandle = mPort;

View File

@ -4,6 +4,8 @@
// its object literals.
gczeal(0);
gcparam('minNurseryBytes', 1024 * 1024);
gcparam('maxNurseryBytes', 1024 * 1024);
// All reachable keys should be found, and the rest should be swept.
function basicSweeping() {

View File

@ -375,7 +375,7 @@ bool RealmPrivate::TryParseLocationURI(RealmPrivate::LocationHint aLocationHint,
static bool PrincipalImmuneToScriptPolicy(nsIPrincipal* aPrincipal) {
// System principal gets a free pass.
if (nsXPConnect::SecurityManager()->IsSystemPrincipal(aPrincipal)) {
if (aPrincipal->IsSystemPrincipal()) {
return true;
}

View File

@ -1,6 +1,5 @@
function run_test() {
var secMan = Cc["@mozilla.org/scriptsecuritymanager;1"].getService(Ci.nsIScriptSecurityManager);
Assert.ok(secMan.isSystemPrincipal(Cu.getObjectPrincipal({})));
Assert.ok(Cu.getObjectPrincipal({}).isSystemPrincipal);
var sb = new Cu.Sandbox('http://www.example.com');
Cu.evalInSandbox('var obj = { foo: 42 };', sb);
Assert.equal(Cu.getObjectPrincipal(sb.obj).origin, 'http://www.example.com');

View File

@ -401,8 +401,17 @@ void MobileViewportManager::UpdateResolution(
// Even in other scenarios, we want to ensure that zoom level is
// not _smaller_ than the intrinsic scale, otherwise we might be
// trying to show regions where there is no content to show.
if (zoom < intrinsicScale) {
newZoom = Some(intrinsicScale);
CSSToScreenScale clampedZoom = zoom;
if (clampedZoom < intrinsicScale) {
clampedZoom = intrinsicScale;
}
// Also clamp to the restrictions imposed by aViewportInfo.
clampedZoom = ClampZoom(clampedZoom, aViewportInfo);
if (clampedZoom != zoom) {
newZoom = Some(clampedZoom);
}
}
}

View File

@ -4838,7 +4838,12 @@ nsRegion nsDisplayBackgroundColor::GetOpaqueRegion(
nsDisplayListBuilder* aBuilder, bool* aSnap) const {
*aSnap = false;
if (mColor.a != 1) {
if (mColor.a != 1 ||
// Even if the current alpha channel is 1, we treat this item as if it's
// non-opaque if there is a background-color animation since the animation
// might change the alpha channel.
EffectCompositor::HasAnimationsForCompositor(
mFrame, DisplayItemType::TYPE_BACKGROUND_COLOR)) {
return nsRegion();
}

View File

@ -8,7 +8,7 @@ fuzzy-if(webrender&&winWidget,0-27,0-4) == unit-rem-iframe.html unit-rem-ref-ifr
== unit-rem.svg unit-rem-ref.svg
== unit-vh-vw.html unit-vh-vw-ref.html
== unit-vh-vw-zoom.html unit-vh-vw-zoom-ref.html
== unit-vh-vw-overflow-auto.html unit-vh-vw-overflow-auto-ref.html
skip-if(gtkWidget) == unit-vh-vw-overflow-auto.html unit-vh-vw-overflow-auto-ref.html
# These tests should probably be removed, see bug 1393603.
fails-if(!Android) == unit-vh-vw-overflow-scroll.html unit-vh-vw-overflow-scroll-ref.html

View File

@ -95,8 +95,8 @@ interface mozIStorageAsyncConnection : nsISupports {
*
* @note If your connection is already read-only, you will get a read-only
* clone.
* @note The resulting connection will NOT implement mozIStorageConnection,
* it will only implement mozIStorageAsyncConnection.
* @note The resulting connection will implement `mozIStorageConnection`, but
* all synchronous methods will throw if called from the main thread.
* @note Due to a bug in SQLite, if you use the shared cache
* (see mozIStorageService), you end up with the same privileges as the
* first connection opened regardless of what is specified in aReadOnly.

View File

@ -149,6 +149,16 @@ interface mozIStorageStatement : mozIStorageBaseStatement {
*/
long getTypeOfIndex(in unsigned long aIndex);
/**
* Retrieve the contents of a column from the current result row as a
* variant.
*
* @param aIndex
* 0-based colummn index.
* @return A variant with the type of the column value.
*/
nsIVariant getVariant(in unsigned long aIndex);
/**
* Retrieve the contents of a column from the current result row as an
* integer.

View File

@ -457,7 +457,8 @@ NS_IMPL_ISUPPORTS(CloseListener, mozIStorageCompletionCallback)
////////////////////////////////////////////////////////////////////////////////
//// Connection
Connection::Connection(Service *aService, int aFlags, bool aAsyncOnly,
Connection::Connection(Service *aService, int aFlags,
ConnectionOperation aSupportedOperations,
bool aIgnoreLockingMode)
: sharedAsyncExecutionMutex("Connection::sharedAsyncExecutionMutex"),
sharedDBMutex("Connection::sharedDBMutex"),
@ -472,7 +473,7 @@ Connection::Connection(Service *aService, int aFlags, bool aAsyncOnly,
mFlags(aFlags),
mIgnoreLockingMode(aIgnoreLockingMode),
mStorageService(aService),
mAsyncOnly(aAsyncOnly) {
mSupportedOperations(aSupportedOperations) {
MOZ_ASSERT(!mIgnoreLockingMode || mFlags & SQLITE_OPEN_READONLY,
"Can't ignore locking for a non-readonly connection!");
mStorageService->registerConnection(this);
@ -491,7 +492,7 @@ NS_IMPL_ADDREF(Connection)
NS_INTERFACE_MAP_BEGIN(Connection)
NS_INTERFACE_MAP_ENTRY(mozIStorageAsyncConnection)
NS_INTERFACE_MAP_ENTRY(nsIInterfaceRequestor)
NS_INTERFACE_MAP_ENTRY_CONDITIONAL(mozIStorageConnection, !mAsyncOnly)
NS_INTERFACE_MAP_ENTRY(mozIStorageConnection)
NS_INTERFACE_MAP_ENTRY_AMBIGUOUS(nsISupports, mozIStorageConnection)
NS_INTERFACE_MAP_END
@ -528,10 +529,11 @@ NS_IMETHODIMP_(MozExternalRefCountType) Connection::Release(void) {
// This could cause SpinningSynchronousClose() to be invoked and AddRef
// triggered for AsyncCloseConnection's strong ref if the conn was ever
// use for async purposes. (Main-thread only, though.)
Unused << Close();
Unused << synchronousClose();
} else {
nsCOMPtr<nsIRunnable> event = NewRunnableMethod(
"storage::Connection::Close", this, &Connection::Close);
nsCOMPtr<nsIRunnable> event =
NewRunnableMethod("storage::Connection::synchronousClose", this,
&Connection::synchronousClose);
if (NS_FAILED(
threadOpenedOn->Dispatch(event.forget(), NS_DISPATCH_NORMAL))) {
// The target thread was dead and so we've just leaked our runnable.
@ -539,8 +541,9 @@ NS_IMETHODIMP_(MozExternalRefCountType) Connection::Release(void) {
// be explicitly closing their connections, not relying on us to close
// them for them. (It's okay to let a statement go out of scope for
// automatic cleanup, but not a Connection.)
MOZ_ASSERT(false, "Leaked Connection::Close(), ownership fail.");
Unused << Close();
MOZ_ASSERT(false,
"Leaked Connection::synchronousClose(), ownership fail.");
Unused << synchronousClose();
}
}
@ -823,7 +826,10 @@ void Connection::initializeFailed() {
nsresult Connection::databaseElementExists(
enum DatabaseElementType aElementType, const nsACString &aElementName,
bool *_exists) {
if (!mDBConn) return NS_ERROR_NOT_INITIALIZED;
nsresult rv = connectionReady(SYNCHRONOUS);
if (NS_FAILED(rv)) {
return rv;
}
// When constructing the query, make sure to SELECT the correct db's
// sqlite_master if the user is prefixing the element with a specific db. ex:
@ -927,14 +933,25 @@ nsresult Connection::setClosedState() {
return NS_OK;
}
bool Connection::connectionReady() { return mDBConn != nullptr; }
nsresult Connection::connectionReady(ConnectionOperation aOperation) {
if (NS_WARN_IF(aOperation == SYNCHRONOUS &&
mSupportedOperations == ASYNCHRONOUS && NS_IsMainThread())) {
MOZ_ASSERT(false,
"Don't use async connections synchronously on the main thread");
return NS_ERROR_NOT_AVAILABLE;
}
if (!mDBConn) {
return NS_ERROR_NOT_INITIALIZED;
}
return NS_OK;
}
bool Connection::isConnectionReadyOnThisThread() {
MOZ_ASSERT_IF(mDBConn, !mConnectionClosed);
if (mAsyncExecutionThread && mAsyncExecutionThread->IsOnCurrentThread()) {
return true;
}
return connectionReady();
return mDBConn != nullptr;
}
bool Connection::isClosing() {
@ -1211,7 +1228,17 @@ Connection::GetInterface(const nsIID &aIID, void **_result) {
NS_IMETHODIMP
Connection::Close() {
if (!mDBConn) return NS_ERROR_NOT_INITIALIZED;
nsresult rv = connectionReady(SYNCHRONOUS);
if (NS_FAILED(rv)) {
return rv;
}
return synchronousClose();
}
nsresult Connection::synchronousClose() {
if (!mDBConn) {
return NS_ERROR_NOT_INITIALIZED;
}
#ifdef DEBUG
// Since we're accessing mAsyncExecutionThread, we need to be on the opener
@ -1259,13 +1286,14 @@ Connection::SpinningSynchronousClose() {
// As currently implemented, we can't spin to wait for an existing AsyncClose.
// Our only existing caller will never have called close; assert if misused
// so that no new callers assume this works after an AsyncClose.
MOZ_DIAGNOSTIC_ASSERT(connectionReady());
if (!connectionReady()) {
nsresult rv = connectionReady(SYNCHRONOUS);
MOZ_DIAGNOSTIC_ASSERT(NS_SUCCEEDED(rv));
if (NS_FAILED(rv)) {
return NS_ERROR_UNEXPECTED;
}
RefPtr<CloseListener> listener = new CloseListener();
nsresult rv = AsyncClose(listener);
rv = AsyncClose(listener);
NS_ENSURE_SUCCESS(rv, rv);
MOZ_ALWAYS_TRUE(SpinEventLoopUntil([&]() { return listener->mClosed; }));
MOZ_ASSERT(isClosed(), "The connection should be closed at this point");
@ -1277,8 +1305,9 @@ NS_IMETHODIMP
Connection::AsyncClose(mozIStorageCompletionCallback *aCallback) {
NS_ENSURE_TRUE(NS_IsMainThread(), NS_ERROR_NOT_SAME_THREAD);
// Check if AsyncClose or Close were already invoked.
if (!mDBConn) {
return NS_ERROR_NOT_INITIALIZED;
nsresult rv = connectionReady(ASYNCHRONOUS);
if (NS_FAILED(rv)) {
return rv;
}
// The two relevant factors at this point are whether we have a database
@ -1351,7 +1380,7 @@ Connection::AsyncClose(mozIStorageCompletionCallback *aCallback) {
// callers ignore our return value.
Unused << NS_DispatchToMainThread(completeEvent.forget());
}
MOZ_ALWAYS_SUCCEEDS(Close());
MOZ_ALWAYS_SUCCEEDS(synchronousClose());
// Return a success inconditionally here, since Close() is unlikely to fail
// and we want to reassure the consumer that its callback will be invoked.
return NS_OK;
@ -1360,7 +1389,7 @@ Connection::AsyncClose(mozIStorageCompletionCallback *aCallback) {
// setClosedState nullifies our connection pointer, so we take a raw pointer
// off it, to pass it through the close procedure.
sqlite3 *nativeConn = mDBConn;
nsresult rv = setClosedState();
rv = setClosedState();
NS_ENSURE_SUCCESS(rv, rv);
// Create and dispatch our close event to the background thread.
@ -1378,7 +1407,10 @@ Connection::AsyncClone(bool aReadOnly,
AUTO_PROFILER_LABEL("Connection::AsyncClone", OTHER);
NS_ENSURE_TRUE(NS_IsMainThread(), NS_ERROR_NOT_SAME_THREAD);
if (!mDBConn) return NS_ERROR_NOT_INITIALIZED;
nsresult rv = connectionReady(ASYNCHRONOUS);
if (NS_FAILED(rv)) {
return rv;
}
if (!mDatabaseFile) return NS_ERROR_UNEXPECTED;
int flags = mFlags;
@ -1389,8 +1421,10 @@ Connection::AsyncClone(bool aReadOnly,
flags = (~SQLITE_OPEN_CREATE & flags);
}
// Force the cloned connection to only implement the async connection API.
RefPtr<Connection> clone = new Connection(mStorageService, flags, true);
// The cloned connection will still implement the synchronous API, but throw
// if any synchronous methods are called on the main thread.
RefPtr<Connection> clone =
new Connection(mStorageService, flags, ASYNCHRONOUS);
RefPtr<AsyncInitializeClone> initEvent =
new AsyncInitializeClone(this, clone, aReadOnly, aCallback);
@ -1553,7 +1587,10 @@ Connection::Clone(bool aReadOnly, mozIStorageConnection **_connection) {
AUTO_PROFILER_LABEL("Connection::Clone", OTHER);
if (!mDBConn) return NS_ERROR_NOT_INITIALIZED;
nsresult rv = connectionReady(SYNCHRONOUS);
if (NS_FAILED(rv)) {
return rv;
}
if (!mDatabaseFile) return NS_ERROR_UNEXPECTED;
int flags = mFlags;
@ -1564,9 +1601,10 @@ Connection::Clone(bool aReadOnly, mozIStorageConnection **_connection) {
flags = (~SQLITE_OPEN_CREATE & flags);
}
RefPtr<Connection> clone = new Connection(mStorageService, flags, mAsyncOnly);
RefPtr<Connection> clone =
new Connection(mStorageService, flags, mSupportedOperations);
nsresult rv = initializeClone(clone, aReadOnly);
rv = initializeClone(clone, aReadOnly);
if (NS_FAILED(rv)) {
return rv;
}
@ -1581,7 +1619,7 @@ Connection::Interrupt() {
if (!mDBConn) {
return NS_ERROR_NOT_INITIALIZED;
}
if (!mAsyncOnly || !(mFlags & SQLITE_OPEN_READONLY)) {
if (mSupportedOperations == SYNCHRONOUS || !(mFlags & SQLITE_OPEN_READONLY)) {
return NS_ERROR_INVALID_ARG;
}
::sqlite3_interrupt(mDBConn);
@ -1597,13 +1635,16 @@ Connection::GetDefaultPageSize(int32_t *_defaultPageSize) {
NS_IMETHODIMP
Connection::GetConnectionReady(bool *_ready) {
MOZ_ASSERT(threadOpenedOn == NS_GetCurrentThread());
*_ready = connectionReady();
*_ready = !!mDBConn;
return NS_OK;
}
NS_IMETHODIMP
Connection::GetDatabaseFile(nsIFile **_dbFile) {
if (!mDBConn) return NS_ERROR_NOT_INITIALIZED;
nsresult rv = connectionReady(ASYNCHRONOUS);
if (NS_FAILED(rv)) {
return rv;
}
NS_IF_ADDREF(*_dbFile = mDatabaseFile);
@ -1612,7 +1653,10 @@ Connection::GetDatabaseFile(nsIFile **_dbFile) {
NS_IMETHODIMP
Connection::GetLastInsertRowID(int64_t *_id) {
if (!mDBConn) return NS_ERROR_NOT_INITIALIZED;
nsresult rv = connectionReady(SYNCHRONOUS);
if (NS_FAILED(rv)) {
return rv;
}
sqlite_int64 id = ::sqlite3_last_insert_rowid(mDBConn);
*_id = id;
@ -1622,7 +1666,10 @@ Connection::GetLastInsertRowID(int64_t *_id) {
NS_IMETHODIMP
Connection::GetAffectedRows(int32_t *_rows) {
if (!mDBConn) return NS_ERROR_NOT_INITIALIZED;
nsresult rv = connectionReady(SYNCHRONOUS);
if (NS_FAILED(rv)) {
return rv;
}
*_rows = ::sqlite3_changes(mDBConn);
@ -1631,7 +1678,10 @@ Connection::GetAffectedRows(int32_t *_rows) {
NS_IMETHODIMP
Connection::GetLastError(int32_t *_error) {
if (!mDBConn) return NS_ERROR_NOT_INITIALIZED;
nsresult rv = connectionReady(SYNCHRONOUS);
if (NS_FAILED(rv)) {
return rv;
}
*_error = ::sqlite3_errcode(mDBConn);
@ -1640,7 +1690,10 @@ Connection::GetLastError(int32_t *_error) {
NS_IMETHODIMP
Connection::GetLastErrorString(nsACString &_errorString) {
if (!mDBConn) return NS_ERROR_NOT_INITIALIZED;
nsresult rv = connectionReady(SYNCHRONOUS);
if (NS_FAILED(rv)) {
return rv;
}
const char *serr = ::sqlite3_errmsg(mDBConn);
_errorString.Assign(serr);
@ -1650,7 +1703,10 @@ Connection::GetLastErrorString(nsACString &_errorString) {
NS_IMETHODIMP
Connection::GetSchemaVersion(int32_t *_version) {
if (!mDBConn) return NS_ERROR_NOT_INITIALIZED;
nsresult rv = connectionReady(SYNCHRONOUS);
if (NS_FAILED(rv)) {
return rv;
}
nsCOMPtr<mozIStorageStatement> stmt;
(void)CreateStatement(NS_LITERAL_CSTRING("PRAGMA user_version"),
@ -1667,7 +1723,10 @@ Connection::GetSchemaVersion(int32_t *_version) {
NS_IMETHODIMP
Connection::SetSchemaVersion(int32_t aVersion) {
if (!mDBConn) return NS_ERROR_NOT_INITIALIZED;
nsresult rv = connectionReady(SYNCHRONOUS);
if (NS_FAILED(rv)) {
return rv;
}
nsAutoCString stmt(NS_LITERAL_CSTRING("PRAGMA user_version = "));
stmt.AppendInt(aVersion);
@ -1679,12 +1738,15 @@ NS_IMETHODIMP
Connection::CreateStatement(const nsACString &aSQLStatement,
mozIStorageStatement **_stmt) {
NS_ENSURE_ARG_POINTER(_stmt);
if (!mDBConn) return NS_ERROR_NOT_INITIALIZED;
nsresult rv = connectionReady(SYNCHRONOUS);
if (NS_FAILED(rv)) {
return rv;
}
RefPtr<Statement> statement(new Statement());
NS_ENSURE_TRUE(statement, NS_ERROR_OUT_OF_MEMORY);
nsresult rv = statement->initialize(this, mDBConn, aSQLStatement);
rv = statement->initialize(this, mDBConn, aSQLStatement);
NS_ENSURE_SUCCESS(rv, rv);
Statement *rawPtr;
@ -1697,12 +1759,15 @@ NS_IMETHODIMP
Connection::CreateAsyncStatement(const nsACString &aSQLStatement,
mozIStorageAsyncStatement **_stmt) {
NS_ENSURE_ARG_POINTER(_stmt);
if (!mDBConn) return NS_ERROR_NOT_INITIALIZED;
nsresult rv = connectionReady(ASYNCHRONOUS);
if (NS_FAILED(rv)) {
return rv;
}
RefPtr<AsyncStatement> statement(new AsyncStatement());
NS_ENSURE_TRUE(statement, NS_ERROR_OUT_OF_MEMORY);
nsresult rv = statement->initialize(this, mDBConn, aSQLStatement);
rv = statement->initialize(this, mDBConn, aSQLStatement);
NS_ENSURE_SUCCESS(rv, rv);
AsyncStatement *rawPtr;
@ -1714,7 +1779,10 @@ Connection::CreateAsyncStatement(const nsACString &aSQLStatement,
NS_IMETHODIMP
Connection::ExecuteSimpleSQL(const nsACString &aSQLStatement) {
CHECK_MAINTHREAD_ABUSE();
if (!mDBConn) return NS_ERROR_NOT_INITIALIZED;
nsresult rv = connectionReady(SYNCHRONOUS);
if (NS_FAILED(rv)) {
return rv;
}
int srv = executeSql(mDBConn, PromiseFlatCString(aSQLStatement).get());
return convertResultCode(srv);
@ -1781,7 +1849,10 @@ Connection::IndexExists(const nsACString &aIndexName, bool *_exists) {
NS_IMETHODIMP
Connection::GetTransactionInProgress(bool *_inProgress) {
if (!mDBConn) return NS_ERROR_NOT_INITIALIZED;
nsresult rv = connectionReady(SYNCHRONOUS);
if (NS_FAILED(rv)) {
return rv;
}
SQLiteMutexAutoLock lockedScope(sharedDBMutex);
*_inProgress = mTransactionInProgress;
@ -1803,7 +1874,10 @@ Connection::SetDefaultTransactionType(int32_t aType) {
NS_IMETHODIMP
Connection::BeginTransaction() {
if (!mDBConn) return NS_ERROR_NOT_INITIALIZED;
nsresult rv = connectionReady(SYNCHRONOUS);
if (NS_FAILED(rv)) {
return rv;
}
return beginTransactionInternal(mDBConn, mDefaultTransactionType);
}
@ -1832,7 +1906,10 @@ nsresult Connection::beginTransactionInternal(sqlite3 *aNativeConnection,
NS_IMETHODIMP
Connection::CommitTransaction() {
if (!mDBConn) return NS_ERROR_NOT_INITIALIZED;
nsresult rv = connectionReady(SYNCHRONOUS);
if (NS_FAILED(rv)) {
return rv;
}
return commitTransactionInternal(mDBConn);
}
@ -1848,7 +1925,10 @@ nsresult Connection::commitTransactionInternal(sqlite3 *aNativeConnection) {
NS_IMETHODIMP
Connection::RollbackTransaction() {
if (!mDBConn) return NS_ERROR_NOT_INITIALIZED;
nsresult rv = connectionReady(SYNCHRONOUS);
if (NS_FAILED(rv)) {
return rv;
}
return rollbackTransactionInternal(mDBConn);
}
@ -1865,7 +1945,10 @@ nsresult Connection::rollbackTransactionInternal(sqlite3 *aNativeConnection) {
NS_IMETHODIMP
Connection::CreateTable(const char *aTableName, const char *aTableSchema) {
if (!mDBConn) return NS_ERROR_NOT_INITIALIZED;
nsresult rv = connectionReady(SYNCHRONOUS);
if (NS_FAILED(rv)) {
return rv;
}
SmprintfPointer buf =
::mozilla::Smprintf("CREATE TABLE %s (%s)", aTableName, aTableSchema);
@ -1880,7 +1963,10 @@ NS_IMETHODIMP
Connection::CreateFunction(const nsACString &aFunctionName,
int32_t aNumArguments,
mozIStorageFunction *aFunction) {
if (!mDBConn) return NS_ERROR_NOT_INITIALIZED;
nsresult rv = connectionReady(ASYNCHRONOUS);
if (NS_FAILED(rv)) {
return rv;
}
// Check to see if this function is already defined. We only check the name
// because a function can be defined with the same body but different names.
@ -1903,7 +1989,10 @@ NS_IMETHODIMP
Connection::CreateAggregateFunction(const nsACString &aFunctionName,
int32_t aNumArguments,
mozIStorageAggregateFunction *aFunction) {
if (!mDBConn) return NS_ERROR_NOT_INITIALIZED;
nsresult rv = connectionReady(ASYNCHRONOUS);
if (NS_FAILED(rv)) {
return rv;
}
// Check to see if this function name is already defined.
SQLiteMutexAutoLock lockedScope(sharedDBMutex);
@ -1929,7 +2018,10 @@ Connection::CreateAggregateFunction(const nsACString &aFunctionName,
NS_IMETHODIMP
Connection::RemoveFunction(const nsACString &aFunctionName) {
if (!mDBConn) return NS_ERROR_NOT_INITIALIZED;
nsresult rv = connectionReady(ASYNCHRONOUS);
if (NS_FAILED(rv)) {
return rv;
}
SQLiteMutexAutoLock lockedScope(sharedDBMutex);
NS_ENSURE_TRUE(mFunctions.Get(aFunctionName, nullptr), NS_ERROR_FAILURE);
@ -1948,7 +2040,10 @@ NS_IMETHODIMP
Connection::SetProgressHandler(int32_t aGranularity,
mozIStorageProgressHandler *aHandler,
mozIStorageProgressHandler **_oldHandler) {
if (!mDBConn) return NS_ERROR_NOT_INITIALIZED;
nsresult rv = connectionReady(ASYNCHRONOUS);
if (NS_FAILED(rv)) {
return rv;
}
// Return previous one
SQLiteMutexAutoLock lockedScope(sharedDBMutex);
@ -1981,13 +2076,17 @@ Connection::RemoveProgressHandler(mozIStorageProgressHandler **_oldHandler) {
NS_IMETHODIMP
Connection::SetGrowthIncrement(int32_t aChunkSize,
const nsACString &aDatabaseName) {
nsresult rv = connectionReady(SYNCHRONOUS);
if (NS_FAILED(rv)) {
return rv;
}
// Bug 597215: Disk space is extremely limited on Android
// so don't preallocate space. This is also not effective
// on log structured file systems used by Android devices
#if !defined(ANDROID) && !defined(MOZ_PLATFORM_MAEMO)
// Don't preallocate if less than 500MiB is available.
int64_t bytesAvailable;
nsresult rv = mDatabaseFile->GetDiskSpaceAvailable(&bytesAvailable);
rv = mDatabaseFile->GetDiskSpaceAvailable(&bytesAvailable);
NS_ENSURE_SUCCESS(rv, rv);
if (bytesAvailable < MIN_AVAILABLE_BYTES_PER_CHUNKED_GROWTH) {
return NS_ERROR_FILE_TOO_BIG;
@ -2004,7 +2103,10 @@ Connection::SetGrowthIncrement(int32_t aChunkSize,
NS_IMETHODIMP
Connection::EnableModule(const nsACString &aModuleName) {
if (!mDBConn) return NS_ERROR_NOT_INITIALIZED;
nsresult rv = connectionReady(SYNCHRONOUS);
if (NS_FAILED(rv)) {
return rv;
}
for (auto &gModule : gModules) {
struct Module *m = &gModule;
@ -2028,8 +2130,9 @@ Connection::GetQuotaObjects(QuotaObject **aDatabaseQuotaObject,
MOZ_ASSERT(aDatabaseQuotaObject);
MOZ_ASSERT(aJournalQuotaObject);
if (!mDBConn) {
return NS_ERROR_NOT_INITIALIZED;
nsresult rv = connectionReady(SYNCHRONOUS);
if (NS_FAILED(rv)) {
return rv;
}
sqlite3_file *file;

View File

@ -44,6 +44,15 @@ class Connection final : public mozIStorageConnection,
NS_DECL_MOZISTORAGECONNECTION
NS_DECL_NSIINTERFACEREQUESTOR
/**
* Indicates if a database operation is synchronous or asynchronous.
*
* - Async operations may be called from any thread for all connections.
* - Sync operations may be called from any thread for sync connections, and
* from background threads for async connections.
*/
enum ConnectionOperation { ASYNCHRONOUS, SYNCHRONOUS };
/**
* Structure used to describe user functions on the database connection.
*/
@ -61,11 +70,11 @@ class Connection final : public mozIStorageConnection,
* connection.
* @param aFlags
* The flags to pass to sqlite3_open_v2.
* @param aAsyncOnly
* If |true|, the Connection only implements asynchronous interface:
* - |mozIStorageAsyncConnection|;
* If |false|, the result also implements synchronous interface:
* - |mozIStorageConnection|.
* @param aSupportedOperations
* The operation types supported on this connection. All connections
* implement both the async (`mozIStorageAsyncConnection`) and sync
* (`mozIStorageConnection`) interfaces, but async connections may not
* call sync operations from the main thread.
* @param aIgnoreLockingMode
* If |true|, ignore locks in force on the file. Only usable with
* read-only connections. Defaults to false.
@ -74,7 +83,8 @@ class Connection final : public mozIStorageConnection,
* corrupt) or produce wrong results without any indication that has
* happened.
*/
Connection(Service *aService, int aFlags, bool aAsyncOnly,
Connection(Service *aService, int aFlags,
ConnectionOperation aSupportedOperations,
bool aIgnoreLockingMode = false);
/**
@ -225,7 +235,17 @@ class Connection final : public mozIStorageConnection,
nsresult commitTransactionInternal(sqlite3 *aNativeConnection);
nsresult rollbackTransactionInternal(sqlite3 *aNativeConnection);
bool connectionReady();
/**
* Indicates if this database connection is ready and supports the given
* operation.
*
* @param aOperationType
* The operation type, sync or async.
* @throws NS_ERROR_NOT_AVAILABLE if the operation isn't supported on this
* connection.
* @throws NS_ERROR_NOT_INITIALIZED if the connection isn't set up.
*/
nsresult connectionReady(ConnectionOperation aOperationType);
/**
* Thread-aware version of connectionReady, results per caller's thread are:
@ -418,10 +438,11 @@ class Connection final : public mozIStorageConnection,
RefPtr<Service> mStorageService;
/**
* If |false|, this instance supports synchronous operations
* and it can be cast to |mozIStorageConnection|.
* Indicates which operations are supported on this connection.
*/
const bool mAsyncOnly;
const ConnectionOperation mSupportedOperations;
nsresult synchronousClose();
};
/**

View File

@ -119,7 +119,8 @@ Service::CollectReports(nsIHandleReportCallback *aHandleReport,
// main-thread, like the DOM Cache and IndexedDB, and as such we must be
// sure that we have a connection.
MutexAutoLock lockedAsyncScope(conn->sharedAsyncExecutionMutex);
if (!conn->connectionReady()) {
nsresult rv = conn->connectionReady(Connection::ASYNCHRONOUS);
if (NS_FAILED(rv)) {
continue;
}
@ -303,7 +304,10 @@ void Service::minimizeMemory() {
RefPtr<Connection> conn = connections[i];
// For non-main-thread owning/opening threads, we may be racing against them
// closing their connection or their thread. That's okay, see below.
if (!conn->connectionReady()) continue;
nsresult rv = conn->connectionReady(Connection::ASYNCHRONOUS);
if (NS_FAILED(rv)) {
continue;
}
NS_NAMED_LITERAL_CSTRING(shrinkPragma, "PRAGMA shrink_memory");
nsCOMPtr<mozIStorageConnection> syncConn = do_QueryInterface(
@ -453,7 +457,8 @@ Service::OpenSpecialDatabase(const char *aStorageKey,
return NS_ERROR_INVALID_ARG;
}
RefPtr<Connection> msc = new Connection(this, SQLITE_OPEN_READWRITE, false);
RefPtr<Connection> msc =
new Connection(this, SQLITE_OPEN_READWRITE, Connection::SYNCHRONOUS);
rv = storageFile ? msc->initialize(storageFile) : msc->initialize();
NS_ENSURE_SUCCESS(rv, rv);
@ -604,7 +609,8 @@ Service::OpenAsyncDatabase(nsIVariant *aDatabaseStore,
}
// Create connection on this thread, but initialize it on its helper thread.
RefPtr<Connection> msc = new Connection(this, flags, true, ignoreLockingMode);
RefPtr<Connection> msc =
new Connection(this, flags, Connection::ASYNCHRONOUS, ignoreLockingMode);
nsCOMPtr<nsIEventTarget> target = msc->getAsyncExecutionTarget();
MOZ_ASSERT(target,
"Cannot initialize a connection that has been closed already");
@ -623,7 +629,7 @@ Service::OpenDatabase(nsIFile *aDatabaseFile,
// reasons.
int flags =
SQLITE_OPEN_READWRITE | SQLITE_OPEN_SHAREDCACHE | SQLITE_OPEN_CREATE;
RefPtr<Connection> msc = new Connection(this, flags, false);
RefPtr<Connection> msc = new Connection(this, flags, Connection::SYNCHRONOUS);
nsresult rv = msc->initialize(aDatabaseFile);
NS_ENSURE_SUCCESS(rv, rv);
@ -641,7 +647,7 @@ Service::OpenUnsharedDatabase(nsIFile *aDatabaseFile,
// reasons.
int flags =
SQLITE_OPEN_READWRITE | SQLITE_OPEN_PRIVATECACHE | SQLITE_OPEN_CREATE;
RefPtr<Connection> msc = new Connection(this, flags, false);
RefPtr<Connection> msc = new Connection(this, flags, Connection::SYNCHRONOUS);
nsresult rv = msc->initialize(aDatabaseFile);
NS_ENSURE_SUCCESS(rv, rv);
@ -659,7 +665,7 @@ Service::OpenDatabaseWithFileURL(nsIFileURL *aFileURL,
// reasons.
int flags = SQLITE_OPEN_READWRITE | SQLITE_OPEN_SHAREDCACHE |
SQLITE_OPEN_CREATE | SQLITE_OPEN_URI;
RefPtr<Connection> msc = new Connection(this, flags, false);
RefPtr<Connection> msc = new Connection(this, flags, Connection::SYNCHRONOUS);
nsresult rv = msc->initialize(aFileURL);
NS_ENSURE_SUCCESS(rv, rv);

View File

@ -675,6 +675,52 @@ Statement::GetString(uint32_t aIndex, nsAString &_value) {
return NS_OK;
}
NS_IMETHODIMP
Statement::GetVariant(uint32_t aIndex, nsIVariant **_value) {
if (!mDBStatement) {
return NS_ERROR_NOT_INITIALIZED;
}
ENSURE_INDEX_VALUE(aIndex, mResultColumnCount);
if (!mExecuting) {
return NS_ERROR_UNEXPECTED;
}
nsCOMPtr<nsIVariant> variant;
int type = ::sqlite3_column_type(mDBStatement, aIndex);
switch (type) {
case SQLITE_INTEGER:
variant =
new IntegerVariant(::sqlite3_column_int64(mDBStatement, aIndex));
break;
case SQLITE_FLOAT:
variant = new FloatVariant(::sqlite3_column_double(mDBStatement, aIndex));
break;
case SQLITE_TEXT: {
const char16_t *value = static_cast<const char16_t *>(
::sqlite3_column_text16(mDBStatement, aIndex));
nsDependentString str(value,
::sqlite3_column_bytes16(mDBStatement, aIndex) / 2);
variant = new TextVariant(str);
break;
}
case SQLITE_NULL:
variant = new NullVariant();
break;
case SQLITE_BLOB: {
int size = ::sqlite3_column_bytes(mDBStatement, aIndex);
const void *data = ::sqlite3_column_blob(mDBStatement, aIndex);
variant = new BlobVariant(std::pair<const void *, int>(data, size));
break;
}
}
NS_ENSURE_TRUE(variant, NS_ERROR_UNEXPECTED);
variant.forget(_value);
return NS_OK;
}
NS_IMETHODIMP
Statement::GetBlob(uint32_t aIndex, uint32_t *_size, uint8_t **_blob) {
if (!mDBStatement) return NS_ERROR_NOT_INITIALIZED;

13
storage/rust/Cargo.toml Normal file
View File

@ -0,0 +1,13 @@
[package]
name = "storage"
description = "Rust bindings for mozStorage."
version = "0.1.0"
authors = ["Lina Cambridge <lina@yakshaving.ninja>"]
edition = "2018"
[dependencies]
libc = "0.2"
nserror = { path = "../../xpcom/rust/nserror" }
nsstring = { path = "../../xpcom/rust/nsstring" }
storage_variant = { path = "../variant" }
xpcom = { path = "../../xpcom/rust/xpcom" }

281
storage/rust/src/lib.rs Normal file
View File

@ -0,0 +1,281 @@
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
//! A Rust wrapper for mozStorage.
//!
//! mozStorage wraps the SQLite C API with support for XPCOM data structures,
//! asynchronous statement execution, cleanup on shutdown, and connection
//! cloning that propagates attached databases, pragmas, functions, and
//! temporary entities. It also collects timing and memory usage stats for
//! telemetry, and supports detailed statement logging. Additionally, mozStorage
//! makes it possible to use the same connection handle from JS and native
//! (C++ and Rust) code.
//!
//! Most mozStorage objects, like connections, statements, result rows,
//! and variants, are thread-safe. Each connection manages a background
//! thread that can be used to execute statements asynchronously, without
//! blocking the main thread.
//!
//! This crate provides a thin wrapper to make mozStorage easier to use
//! from Rust. It only wraps the synchronous API, so you can either manage
//! the entire connection from a background thread, or use the `moz_task`
//! crate to dispatch tasks to the connection's async thread. Executing
//! synchronous statements on the main thread is not supported, and will
//! assert in debug builds.
#![allow(non_snake_case)]
use std::{ops::Deref, result};
use nserror::{nsresult, NS_ERROR_NO_INTERFACE};
use nsstring::nsCString;
use storage_variant::VariantType;
use xpcom::{
getter_addrefs,
interfaces::{
mozIStorageAsyncConnection, mozIStorageConnection, mozIStorageStatement, nsIEventTarget,
nsIThread,
},
RefPtr, XpCom,
};
pub type Result<T> = result::Result<T, nsresult>;
/// `Conn` wraps a `mozIStorageConnection`.
#[derive(Clone)]
pub struct Conn {
handle: RefPtr<mozIStorageConnection>,
}
// This is safe as long as our `mozIStorageConnection` is an instance of
// `mozilla::storage::Connection`, which is atomically reference counted.
unsafe impl Send for Conn {}
unsafe impl Sync for Conn {}
impl Conn {
/// Wraps a `mozIStorageConnection` in a `Conn`.
#[inline]
pub fn wrap(connection: RefPtr<mozIStorageConnection>) -> Conn {
Conn { handle: connection }
}
/// Returns the wrapped `mozIStorageConnection`.
#[inline]
pub fn connection(&self) -> &mozIStorageConnection {
&self.handle
}
/// Returns the async thread for this connection. This can be used
/// with `moz_task` to run synchronous statements on the storage
/// thread, without blocking the main thread.
pub fn thread(&self) -> Result<RefPtr<nsIThread>> {
let target = self.handle.get_interface::<nsIEventTarget>();
target
.and_then(|t| t.query_interface::<nsIThread>())
.ok_or(NS_ERROR_NO_INTERFACE)
}
/// Prepares a SQL statement. `query` should only contain one SQL statement.
/// If `query` contains multiple statements, only the first will be prepared,
/// and the rest will be ignored.
pub fn prepare<Q: AsRef<str>>(&self, query: Q) -> Result<Statement> {
let statement = getter_addrefs(|p| unsafe {
self.handle
.CreateStatement(&*nsCString::from(query.as_ref()), p)
})?;
Ok(Statement { handle: statement })
}
/// Executes a SQL statement. `query` may contain one or more
/// semicolon-separated SQL statements.
pub fn exec<Q: AsRef<str>>(&self, query: Q) -> Result<()> {
unsafe {
self.handle
.ExecuteSimpleSQL(&*nsCString::from(query.as_ref()))
}
.to_result()
}
/// Opens a transaction with the default transaction behavior for this
/// connection. The transaction should be committed when done. Uncommitted
/// `Transaction`s will automatically roll back when they go out of scope.
pub fn transaction(&mut self) -> Result<Transaction> {
let behavior = self.get_default_transaction_behavior();
Transaction::new(self, behavior)
}
/// Opens a transaction with the requested behavior.
pub fn transaction_with_behavior(
&mut self,
behavior: TransactionBehavior,
) -> Result<Transaction> {
Transaction::new(self, behavior)
}
fn get_default_transaction_behavior(&self) -> TransactionBehavior {
let mut typ = 0i32;
let rv = unsafe { self.handle.GetDefaultTransactionType(&mut typ) };
if rv.failed() {
return TransactionBehavior::Deferred;
}
match typ as i64 {
mozIStorageAsyncConnection::TRANSACTION_IMMEDIATE => TransactionBehavior::Immediate,
mozIStorageAsyncConnection::TRANSACTION_EXCLUSIVE => TransactionBehavior::Exclusive,
_ => TransactionBehavior::Deferred,
}
}
}
pub enum TransactionBehavior {
Deferred,
Immediate,
Exclusive,
}
pub struct Transaction<'c> {
conn: &'c mut Conn,
active: bool,
}
impl<'c> Transaction<'c> {
/// Opens a transaction on `conn` with the given `behavior`.
fn new(conn: &'c mut Conn, behavior: TransactionBehavior) -> Result<Transaction<'c>> {
conn.exec(match behavior {
TransactionBehavior::Deferred => "BEGIN DEFERRED",
TransactionBehavior::Immediate => "BEGIN IMMEDIATE",
TransactionBehavior::Exclusive => "BEGIN EXCLUSIVE",
})?;
Ok(Transaction { conn, active: true })
}
/// Commits the transaction.
pub fn commit(mut self) -> Result<()> {
if self.active {
self.conn.exec("COMMIT")?;
self.active = false;
}
Ok(())
}
/// Rolls the transaction back.
pub fn rollback(mut self) -> Result<()> {
self.abort()
}
fn abort(&mut self) -> Result<()> {
if self.active {
self.conn.exec("ROLLBACK")?;
self.active = false;
}
Ok(())
}
}
impl<'c> Deref for Transaction<'c> {
type Target = Conn;
fn deref(&self) -> &Conn {
self.conn
}
}
impl<'c> Drop for Transaction<'c> {
fn drop(&mut self) {
let _ = self.abort();
}
}
pub struct Statement {
handle: RefPtr<mozIStorageStatement>,
}
impl Statement {
/// Binds a parameter at the given `index` to the prepared statement.
/// `value` is any type that can be converted into a `Variant`.
pub fn bind_by_index<V: VariantType>(&mut self, index: u32, value: V) -> Result<()> {
let variant = value.into_variant();
unsafe { self.handle.BindByIndex(index as u32, variant.coerce()) }.to_result()
}
/// Binds a parameter with the given `name` to the prepared statement.
pub fn bind_by_name<N: AsRef<str>, V: VariantType>(&mut self, name: N, value: V) -> Result<()> {
let variant = value.into_variant();
unsafe {
self.handle
.BindByName(&*nsCString::from(name.as_ref()), variant.coerce())
}
.to_result()
}
/// Executes the statement and returns the next row of data.
pub fn step<'a>(&'a mut self) -> Result<Option<Step<'a>>> {
let mut has_more = false;
unsafe { self.handle.ExecuteStep(&mut has_more) }.to_result()?;
Ok(if has_more { Some(Step(self)) } else { None })
}
/// Executes the statement once, discards any data, and resets the
/// statement.
pub fn execute(&mut self) -> Result<()> {
unsafe { self.handle.Execute() }.to_result()
}
/// Resets the prepared statement so that it's ready to be executed
/// again, and clears any bound parameters.
pub fn reset(&mut self) -> Result<()> {
unsafe { self.handle.Reset() }.to_result()
}
fn get_column_index<N: AsRef<str>>(&self, name: N) -> Result<u32> {
let mut index = 0u32;
unsafe {
self.handle
.GetColumnIndex(&*nsCString::from(name.as_ref()), &mut index)
}
.to_result()
.map(|_| index)
}
fn get_variant<T: VariantType>(&self, index: u32) -> Result<T> {
let variant = getter_addrefs(|p| unsafe { self.handle.GetVariant(index, p) })?;
T::from_variant(variant.coerce())
}
}
impl Drop for Statement {
fn drop(&mut self) {
unsafe { self.handle.Finalize() };
}
}
/// A step is the next row in the result set for a statement.
pub struct Step<'a>(&'a mut Statement);
impl<'a> Step<'a> {
/// Returns the value of the column at `index` for the current row.
pub fn get_by_index<'s, T: VariantType>(&'s self, index: u32) -> Result<T> {
self.0.get_variant(index)
}
/// A convenience wrapper that returns the default value for the column
/// at `index` if `NULL`.
pub fn get_by_index_or_default<'s, T: VariantType + Default>(&'s self, index: u32) -> T {
self.get_by_index(index).unwrap_or_default()
}
/// Returns the value of the column specified by `name` for the current row.
pub fn get_by_name<'s, N: AsRef<str>, T: VariantType>(&'s self, name: N) -> Result<T> {
let index = self.0.get_column_index(name)?;
self.0.get_variant(index)
}
/// Returns the default value for the column with the given `name`, or the
/// default if the column is `NULL`.
pub fn get_by_name_or_default<'s, N: AsRef<str>, T: VariantType + Default>(
&'s self,
name: N,
) -> T {
self.get_by_name(name).unwrap_or_default()
}
}

View File

@ -314,7 +314,7 @@ async function standardAsyncTest(promisedDB, name, shouldInit = false) {
let adb = await promisedDB;
Assert.ok(adb instanceof Ci.mozIStorageAsyncConnection);
Assert.equal(false, adb instanceof Ci.mozIStorageConnection);
Assert.ok(adb instanceof Ci.mozIStorageConnection);
if (shouldInit) {
let stmt = adb.createAsyncStatement("CREATE TABLE test(name TEXT)");
@ -440,7 +440,7 @@ add_task(async function test_clone_trivial_async() {
info("AsyncClone connection");
let clone = await asyncClone(db, true);
Assert.ok(clone instanceof Ci.mozIStorageAsyncConnection);
Assert.equal(false, clone instanceof Ci.mozIStorageConnection);
Assert.ok(clone instanceof Ci.mozIStorageConnection);
info("Close connection");
await asyncClose(db);
info("Close clone");
@ -452,7 +452,7 @@ add_task(async function test_clone_no_optional_param_async() {
info("Testing async cloning");
let adb1 = await openAsyncDatabase(getTestDB(), null);
Assert.ok(adb1 instanceof Ci.mozIStorageAsyncConnection);
Assert.equal(false, adb1 instanceof Ci.mozIStorageConnection);
Assert.ok(adb1 instanceof Ci.mozIStorageConnection);
info("Cloning database");
@ -460,7 +460,7 @@ add_task(async function test_clone_no_optional_param_async() {
info("Testing that the cloned db is a mozIStorageAsyncConnection " +
"and not a mozIStorageConnection");
Assert.ok(adb2 instanceof Ci.mozIStorageAsyncConnection);
Assert.equal(false, adb2 instanceof Ci.mozIStorageConnection);
Assert.ok(adb2 instanceof Ci.mozIStorageConnection);
info("Inserting data into source db");
let stmt = adb1.

View File

@ -142,6 +142,21 @@ impl VariantType for () {
}
}
impl<T> VariantType for Option<T> where T: VariantType {
fn into_variant(self) -> RefPtr<nsIVariant> {
match self {
Some(v) => v.into_variant(),
None => ().into_variant(),
}
}
fn from_variant(variant: &nsIVariant) -> Result<Self, nsresult> {
match variant.get_data_type() {
DATA_TYPE_EMPTY => Ok(None),
_ => Ok(Some(VariantType::from_variant(variant)?)),
}
}
}
variant!(bool, NS_NewStorageBooleanVariant, GetAsBool);
variant!(i32, NS_NewStorageIntegerVariant, GetAsInt32);
variant!(i64, NS_NewStorageIntegerVariant, GetAsInt64);

View File

@ -1,4 +1,7 @@
[createImageBitmap-transfer.html]
disabled:
if debug and (os == "linux"): https://bugzilla.mozilla.org/show_bug.cgi?id=1524653
if debug and (os == "win") and (bits == 32): https://bugzilla.mozilla.org/show_bug.cgi?id=1524653
if debug and (os == "mac"): https://bugzilla.mozilla.org/show_bug.cgi?id=1524653
[Transfer ImageBitmap created from an OffscreenCanvas]
expected: FAIL

View File

@ -0,0 +1,3 @@
[track-cue-rendering-empty-cue.html]
disabled:
if (os == "linux"): https://bugzilla.mozilla.org/show_bug.cgi?id=1535847

View File

@ -0,0 +1 @@
{"files":{"Cargo.toml":"f427f0dba2855a2e32ccaf3258e6517a90c2b69f5b7a9c34d8669d4a83fb84e7","LICENSE":"c71d239df91726fc519c6eb72d318ec65820627232b2f796219e87dcf35d0ab4","README.md":"303ea5ec53d4e86f2c321056e8158e31aa061353a99e52de3d76859d40919efc","src/driver.rs":"10ecce90c6dee4e7b0ecd87f6d4f10c3cb825b544c6413416167248755097ab2","src/error.rs":"c6e661a7b94119dc8770c482681e97e644507e37f0ba32c04cc3a0b43e7b0077","src/guid.rs":"0330e6e893a550e478c8ac678114ebc112add97cb1d5d803d65cda6588ce7ba5","src/lib.rs":"ef42d0d3b234ffb6e459550f36a5f9220a0dd5fd09867affc7f8f9fe0b5430f2","src/merge.rs":"2d94c9507725de7477d7dc4ca372c721e770d049f57ff4c14a2006e346231a40","src/store.rs":"612d90ea0614aa7cc943c4ac0faaee35c155f57b553195ac28518ae7c0b8ebb1","src/tests.rs":"e5a3a1b9b4cefda9b871348a739f2d66e05a940ad14fb72515cea373f9f3be8b","src/tree.rs":"17d5640e42dcbd979f4e5cc8c52c4b2b634f80596a3504baa40b2e6b55f213b8"},"package":"bcecbcd636b901efb0b61eea73972bda173c02c98a07fc66dd76e8ee1421ffbf"}

28
third_party/rust/dogear/Cargo.toml vendored Normal file
View File

@ -0,0 +1,28 @@
# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO
#
# When uploading crates to the registry Cargo will automatically
# "normalize" Cargo.toml files for maximal compatibility
# with all versions of Cargo and also rewrite `path` dependencies
# to registry (e.g. crates.io) dependencies
#
# If you believe there's an error in this file please file an
# issue against the rust-lang/cargo repository. If you're
# editing this file be aware that the upstream Cargo.toml
# will likely look very different (and much more reasonable)
[package]
edition = "2018"
name = "dogear"
version = "0.2.2"
authors = ["Lina Cambridge <lina@mozilla.com>"]
exclude = ["/.travis/**", ".travis.yml"]
description = "A library for merging bookmark trees."
license = "Apache-2.0"
repository = "https://github.com/mozilla/dogear"
[dependencies.log]
version = "0.4"
[dependencies.smallbitvec]
version = "2.3.0"
[dev-dependencies.env_logger]
version = "0.5.6"

201
third_party/rust/dogear/LICENSE vendored Normal file
View File

@ -0,0 +1,201 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

9
third_party/rust/dogear/README.md vendored Normal file
View File

@ -0,0 +1,9 @@
# Dogear
**Dogear** is a library that implements bookmark tree merging for Firefox Sync. It takes two trees—a valid, consistent local tree, and a possibly inconsistent remote tree—and produces a complete merged tree, with all conflicts and inconsistencies resolved.
Dogear implements the merge algorithm only; it doesn't handle syncing, storage, or application. It's up to the crate that embeds Dogear to store local and incoming bookmarks, describe how to build a tree from a storage backend, persist the merged tree back to storage, and upload records for changed bookmarks.
## Requirements
* Rust 1.31.0 or higher

151
third_party/rust/dogear/src/driver.rs vendored Normal file
View File

@ -0,0 +1,151 @@
// Copyright 2018-2019 Mozilla
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use std::fmt::Arguments;
use log::{Level, LevelFilter, Log};
use crate::error::{ErrorKind, Result};
use crate::guid::Guid;
/// A merge driver provides methods to customize merging behavior.
pub trait Driver {
/// Generates a new GUID for the given invalid GUID. This is used to fix up
/// items with GUIDs that Places can't store (bug 1380606, bug 1313026).
///
/// The default implementation returns an error, forbidding invalid GUIDs.
///
/// Implementations of `Driver` can either use the `rand` and `base64`
/// crates to generate a new, random GUID (9 bytes, Base64url-encoded
/// without padding), or use an existing method like Desktop's
/// `nsINavHistoryService::MakeGuid`. Dogear doesn't generate new GUIDs
/// automatically to avoid depending on those crates.
///
/// Implementations can also return `Ok(invalid_guid.clone())` to pass
/// through all invalid GUIDs, as the tests do.
fn generate_new_guid(&self, invalid_guid: &Guid) -> Result<Guid> {
Err(ErrorKind::InvalidGuid(invalid_guid.clone()).into())
}
/// Returns the maximum log level for merge messages. The default
/// implementation returns the `log` crate's global maximum level.
fn max_log_level(&self) -> LevelFilter {
log::max_level()
}
/// Returns a logger for merge messages.
///
/// The default implementation returns the `log` crate's global logger.
///
/// Implementations can override this method to return a custom logger,
/// where using the global logger won't work. For example, Firefox Desktop
/// has an existing Sync logging setup outside of the `log` crate.
fn logger(&self) -> &dyn Log {
log::logger()
}
}
/// A default implementation of the merge driver.
pub struct DefaultDriver;
impl Driver for DefaultDriver {}
/// Logs a merge message.
pub fn log<D: Driver>(
driver: &D,
level: Level,
args: Arguments,
module_path: &'static str,
file: &'static str,
line: u32,
) {
let meta = log::Metadata::builder()
.level(level)
.target(module_path)
.build();
if driver.logger().enabled(&meta) {
driver.logger().log(
&log::Record::builder()
.args(args)
.metadata(meta)
.module_path(Some(module_path))
.file(Some(file))
.line(Some(line))
.build(),
);
}
}
#[macro_export]
macro_rules! error {
($driver:expr, $($args:tt)+) => {
if log::Level::Error <= $driver.max_log_level() {
$crate::driver::log(
$driver,
log::Level::Error,
format_args!($($args)+),
module_path!(),
file!(),
line!(),
);
}
}
}
macro_rules! warn {
($driver:expr, $($args:tt)+) => {
if log::Level::Warn <= $driver.max_log_level() {
$crate::driver::log(
$driver,
log::Level::Warn,
format_args!($($args)+),
module_path!(),
file!(),
line!(),
);
}
}
}
#[macro_export]
macro_rules! debug {
($driver:expr, $($args:tt)+) => {
if log::Level::Debug <= $driver.max_log_level() {
$crate::driver::log(
$driver,
log::Level::Debug,
format_args!($($args)+),
module_path!(),
file!(),
line!(),
);
}
}
}
#[macro_export]
macro_rules! trace {
($driver:expr, $($args:tt)+) => {
if log::Level::Trace <= $driver.max_log_level() {
$crate::driver::log(
$driver,
log::Level::Trace,
format_args!($($args)+),
module_path!(),
file!(),
line!(),
);
}
}
}

109
third_party/rust/dogear/src/error.rs vendored Normal file
View File

@ -0,0 +1,109 @@
// Copyright 2018-2019 Mozilla
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use std::{error, fmt, result, str::Utf8Error, string::FromUtf16Error};
use crate::guid::Guid;
use crate::tree::Kind;
pub type Result<T> = result::Result<T, Error>;
#[derive(Debug)]
pub struct Error(ErrorKind);
impl Error {
pub fn kind(&self) -> &ErrorKind {
&self.0
}
}
impl error::Error for Error {
fn source(&self) -> Option<&(dyn error::Error + 'static)> {
match self.kind() {
ErrorKind::MalformedString(err) => Some(err.as_ref()),
_ => None,
}
}
}
impl From<ErrorKind> for Error {
fn from(kind: ErrorKind) -> Error {
Error(kind)
}
}
impl From<FromUtf16Error> for Error {
fn from(error: FromUtf16Error) -> Error {
Error(ErrorKind::MalformedString(error.into()))
}
}
impl From<Utf8Error> for Error {
fn from(error: Utf8Error) -> Error {
Error(ErrorKind::MalformedString(error.into()))
}
}
impl fmt::Display for Error {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match self.kind() {
ErrorKind::MismatchedItemKind(local_kind, remote_kind) => write!(
f,
"Can't merge local kind {} and remote kind {}",
local_kind, remote_kind
),
ErrorKind::DuplicateItem(guid) => write!(f, "Item {} already exists in tree", guid),
ErrorKind::MissingItem(guid) => write!(f, "Item {} doesn't exist in tree", guid),
ErrorKind::InvalidParent(child_guid, parent_guid) => write!(
f,
"Can't insert item {} into non-folder {}",
child_guid, parent_guid
),
ErrorKind::MissingParent(child_guid, parent_guid) => write!(
f,
"Can't insert item {} into nonexistent parent {}",
child_guid, parent_guid
),
ErrorKind::Cycle(guid) => write!(f, "Item {} can't contain itself", guid),
ErrorKind::MergeConflict => write!(f, "Local tree changed during merge"),
ErrorKind::UnmergedLocalItems => {
write!(f, "Merged tree doesn't mention all items from local tree")
}
ErrorKind::UnmergedRemoteItems => {
write!(f, "Merged tree doesn't mention all items from remote tree")
}
ErrorKind::InvalidGuid(invalid_guid) => {
write!(f, "Merged tree contains invalid GUID {}", invalid_guid)
}
ErrorKind::InvalidByte(b) => write!(f, "Invalid byte {} in UTF-16 encoding", b),
ErrorKind::MalformedString(err) => err.fmt(f),
}
}
}
#[derive(Debug)]
pub enum ErrorKind {
MismatchedItemKind(Kind, Kind),
DuplicateItem(Guid),
InvalidParent(Guid, Guid),
MissingParent(Guid, Guid),
MissingItem(Guid),
Cycle(Guid),
MergeConflict,
UnmergedLocalItems,
UnmergedRemoteItems,
InvalidGuid(Guid),
InvalidByte(u16),
MalformedString(Box<dyn error::Error + Send + Sync + 'static>),
}

296
third_party/rust/dogear/src/guid.rs vendored Normal file
View File

@ -0,0 +1,296 @@
// Copyright 2018-2019 Mozilla
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use std::{
cmp::Ordering,
fmt,
hash::{Hash, Hasher},
ops, str,
};
use crate::error::{ErrorKind, Result};
/// A GUID for an item in a bookmark tree.
#[derive(Clone)]
pub struct Guid(Repr);
/// Indicates if the GUID is valid. Implemented for byte slices and GUIDs.
pub trait IsValidGuid {
fn is_valid_guid(&self) -> bool;
}
/// The internal representation of a GUID. Valid GUIDs are 12 bytes, and contain
/// only Base64url characters; we can store them on the stack without a heap
/// allocation. However, both local and remote items might have invalid GUIDs,
/// in which case we fall back to a heap-allocated string.
#[derive(Clone)]
enum Repr {
Valid([u8; 12]),
Invalid(String),
}
/// The Places root GUID, used to root all items in a bookmark tree.
pub const ROOT_GUID: Guid = Guid(Repr::Valid(*b"root________"));
/// The bookmarks toolbar GUID.
pub const TOOLBAR_GUID: Guid = Guid(Repr::Valid(*b"toolbar_____"));
/// The bookmarks menu GUID.
pub const MENU_GUID: Guid = Guid(Repr::Valid(*b"menu________"));
/// The "Other Bookmarks" GUID, used to hold items without a parent.
pub const UNFILED_GUID: Guid = Guid(Repr::Valid(*b"unfiled_____"));
/// The mobile bookmarks GUID.
pub const MOBILE_GUID: Guid = Guid(Repr::Valid(*b"mobile______"));
const VALID_GUID_BYTES: [u8; 255] = [
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0,
0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1,
0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
];
impl Guid {
/// Converts a UTF-8 byte slice to a GUID.
pub fn from_utf8(b: &[u8]) -> Result<Guid> {
let repr = if b.is_valid_guid() {
let mut bytes = [0u8; 12];
bytes.copy_from_slice(b);
Repr::Valid(bytes)
} else {
match str::from_utf8(b) {
Ok(s) => Repr::Invalid(s.into()),
Err(err) => return Err(err.into()),
}
};
Ok(Guid(repr))
}
/// Converts a UTF-16 byte slice to a GUID.
pub fn from_utf16(b: &[u16]) -> Result<Guid> {
let repr = if b.is_valid_guid() {
let mut bytes = [0u8; 12];
for (index, &byte) in b.iter().enumerate() {
if byte > u16::from(u8::max_value()) {
return Err(ErrorKind::InvalidByte(byte).into());
}
bytes[index] = byte as u8;
}
Repr::Valid(bytes)
} else {
match String::from_utf16(b) {
Ok(s) => Repr::Invalid(s),
Err(err) => return Err(err.into()),
}
};
Ok(Guid(repr))
}
/// Returns the GUID as a byte slice.
#[inline]
pub fn as_bytes(&self) -> &[u8] {
match self.0 {
Repr::Valid(ref bytes) => bytes,
Repr::Invalid(ref s) => s.as_ref(),
}
}
/// Returns the GUID as a string slice.
#[inline]
pub fn as_str(&self) -> &str {
// We actually could use from_utf8_unchecked here, and depending on how
// often we end up doing this, it's arguable that we should. We know
// already this is valid utf8, since we know that we only ever create
// these while respecting is_valid (and moreover, we assert that
// `s.is_char_boundary(12)` in `Guid::from`).
match self.0 {
Repr::Valid(ref bytes) => str::from_utf8(bytes).unwrap(),
Repr::Invalid(ref s) => s,
}
}
/// Indicates if the GUID is one of the four Places user content roots.
#[inline]
pub fn is_user_content_root(&self) -> bool {
self == TOOLBAR_GUID || self == MENU_GUID || self == UNFILED_GUID || self == MOBILE_GUID
}
}
impl IsValidGuid for Guid {
#[inline]
fn is_valid_guid(&self) -> bool {
match self.0 {
Repr::Valid(_) => true,
Repr::Invalid(_) => false,
}
}
}
impl<T: Copy + Into<usize>> IsValidGuid for [T] {
/// Equivalent to `PlacesUtils.isValidGuid`.
#[inline]
fn is_valid_guid(&self) -> bool {
self.len() == 12
&& self.iter().all(|&byte| {
VALID_GUID_BYTES
.get(byte.into())
.map(|&b| b == 1)
.unwrap_or(false)
})
}
}
impl From<String> for Guid {
#[inline]
fn from(s: String) -> Guid {
Guid::from(s.as_str())
}
}
impl<'a> From<&'a str> for Guid {
#[inline]
fn from(s: &'a str) -> Guid {
let repr = if s.as_bytes().is_valid_guid() {
assert!(s.is_char_boundary(12));
let mut bytes = [0u8; 12];
bytes.copy_from_slice(s.as_bytes());
Repr::Valid(bytes)
} else {
Repr::Invalid(s.into())
};
Guid(repr)
}
}
impl AsRef<str> for Guid {
#[inline]
fn as_ref(&self) -> &str {
self.as_str()
}
}
impl AsRef<[u8]> for Guid {
#[inline]
fn as_ref(&self) -> &[u8] {
self.as_bytes()
}
}
impl ops::Deref for Guid {
type Target = str;
#[inline]
fn deref(&self) -> &str {
self.as_str()
}
}
impl Ord for Guid {
fn cmp(&self, other: &Guid) -> Ordering {
self.as_bytes().cmp(other.as_bytes())
}
}
impl PartialOrd for Guid {
fn partial_cmp(&self, other: &Guid) -> Option<Ordering> {
Some(self.cmp(other))
}
}
// Allow direct comparison with str
impl PartialEq<str> for Guid {
#[inline]
fn eq(&self, other: &str) -> bool {
self.as_str() == other
}
}
impl<'a> PartialEq<&'a str> for Guid {
#[inline]
fn eq(&self, other: &&'a str) -> bool {
self == *other
}
}
impl PartialEq for Guid {
#[inline]
fn eq(&self, other: &Guid) -> bool {
self.as_bytes() == other.as_bytes()
}
}
impl<'a> PartialEq<Guid> for &'a Guid {
#[inline]
fn eq(&self, other: &Guid) -> bool {
*self == other
}
}
impl Eq for Guid {}
impl Hash for Guid {
fn hash<H: Hasher>(&self, state: &mut H) {
self.as_bytes().hash(state);
}
}
// The default Debug impl is pretty unhelpful here.
impl fmt::Debug for Guid {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "Guid({:?})", self.as_str())
}
}
impl fmt::Display for Guid {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.write_str(self.as_str())
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn is_valid() {
let valid_guids = &[
"bookmarkAAAA",
"menu________",
"__folderBB__",
"queryAAAAAAA",
];
for s in valid_guids {
assert!(s.as_bytes().is_valid_guid(), "{:?} should validate", s);
assert!(Guid::from(*s).is_valid_guid());
}
let invalid_guids = &["bookmarkAAA", "folder!", "b@dgu1d!"];
for s in invalid_guids {
assert!(!s.as_bytes().is_valid_guid(), "{:?} should not validate", s);
assert!(!Guid::from(*s).is_valid_guid());
}
let invalid_guid_bytes: &[[u8; 12]] =
&[[113, 117, 101, 114, 121, 65, 225, 193, 65, 65, 65, 65]];
for bytes in invalid_guid_bytes {
assert!(!bytes.is_valid_guid(), "{:?} should not validate", bytes);
Guid::from_utf8(bytes).expect_err("Should not make GUID from invalid UTF-8");
}
}
}

35
third_party/rust/dogear/src/lib.rs vendored Normal file
View File

@ -0,0 +1,35 @@
// Copyright 2018-2019 Mozilla
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#[macro_use]
mod driver;
mod error;
mod guid;
mod merge;
#[macro_use]
mod store;
mod tree;
#[cfg(test)]
mod tests;
pub use crate::driver::{DefaultDriver, Driver};
pub use crate::error::{Error, ErrorKind, Result};
pub use crate::guid::{Guid, MENU_GUID, MOBILE_GUID, ROOT_GUID, TOOLBAR_GUID, UNFILED_GUID};
pub use crate::merge::{Deletion, Merger, StructureCounts};
pub use crate::store::{MergeTimings, Stats, Store};
pub use crate::tree::{
Content, IntoTree, Item, Kind, MergeState, MergedDescendant, MergedNode, MergedRoot, Tree,
UploadReason, Validity,
};

1590
third_party/rust/dogear/src/merge.rs vendored Normal file

File diff suppressed because it is too large Load Diff

153
third_party/rust/dogear/src/store.rs vendored Normal file
View File

@ -0,0 +1,153 @@
// Copyright 2018-2019 Mozilla
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use std::{collections::HashMap, time::Duration};
use crate::driver::{DefaultDriver, Driver};
use crate::error::{Error, ErrorKind};
use crate::guid::Guid;
use crate::merge::{Deletion, Merger, StructureCounts};
use crate::tree::{Content, MergedRoot, Tree};
/// Records timings and counters for telemetry.
#[derive(Clone, Debug, Default, Eq, PartialEq)]
pub struct Stats {
pub timings: MergeTimings,
pub counts: StructureCounts,
}
/// Records timings for merging operations.
#[derive(Clone, Debug, Default, Eq, PartialEq)]
pub struct MergeTimings {
pub fetch_local_tree: Duration,
pub fetch_new_local_contents: Duration,
pub fetch_remote_tree: Duration,
pub fetch_new_remote_contents: Duration,
pub merge: Duration,
pub apply: Duration,
}
macro_rules! time {
($timings:ident, $name:ident, $op:expr) => {{
let now = std::time::Instant::now();
let result = $op;
$timings.$name = now.elapsed();
result
}};
}
/// A store is the main interface to Dogear. It implements methods for building
/// local and remote trees from a storage backend, fetching content info for
/// matching items with similar contents, and persisting the merged tree.
pub trait Store<E: From<Error>> {
/// Builds a fully rooted, consistent tree from the items and tombstones in
/// the local store.
fn fetch_local_tree(&self) -> Result<Tree, E>;
/// Fetches content info for all new local items that haven't been uploaded
/// or merged yet. We'll try to dedupe them to remotely changed items with
/// similar contents and different GUIDs.
fn fetch_new_local_contents(&self) -> Result<HashMap<Guid, Content>, E>;
/// Builds a fully rooted, consistent tree from the items and tombstones in
/// the mirror.
fn fetch_remote_tree(&self) -> Result<Tree, E>;
/// Fetches content info for all items in the mirror that changed since the
/// last sync and don't exist locally. We'll try to match new local items to
/// these.
fn fetch_new_remote_contents(&self) -> Result<HashMap<Guid, Content>, E>;
/// Applies the merged root to the local store, and stages items for
/// upload. On Desktop, this method inserts the merged tree into a temp
/// table, updates Places, and inserts outgoing items into another
/// temp table.
fn apply<'t>(
&mut self,
root: MergedRoot<'t>,
deletions: impl Iterator<Item = Deletion<'t>>,
) -> Result<(), E>;
/// Builds and applies a merged tree using the default merge driver.
fn merge(&mut self) -> Result<Stats, E> {
self.merge_with_driver(&DefaultDriver)
}
/// Builds a complete merged tree from the local and remote trees, resolves
/// conflicts, dedupes local items, and applies the merged tree using the
/// given driver.
fn merge_with_driver<D: Driver>(&mut self, driver: &D) -> Result<Stats, E> {
let mut merge_timings = MergeTimings::default();
let local_tree = time!(merge_timings, fetch_local_tree, { self.fetch_local_tree() })?;
debug!(driver, "Built local tree from mirror\n{}", local_tree);
let new_local_contents = time!(merge_timings, fetch_new_local_contents, {
self.fetch_new_local_contents()
})?;
let remote_tree = time!(merge_timings, fetch_remote_tree, {
self.fetch_remote_tree()
})?;
debug!(driver, "Built remote tree from mirror\n{}", remote_tree);
let new_remote_contents = time!(merge_timings, fetch_new_remote_contents, {
self.fetch_new_remote_contents()
})?;
let mut merger = Merger::with_driver(
driver,
&local_tree,
&new_local_contents,
&remote_tree,
&new_remote_contents,
);
let merged_root = time!(merge_timings, merge, merger.merge())?;
debug!(
driver,
"Built new merged tree\n{}\nDelete Locally: [{}]\nDelete Remotely: [{}]",
merged_root.to_ascii_string(),
merger
.local_deletions()
.map(|d| d.guid.as_str())
.collect::<Vec<_>>()
.join(", "),
merger
.remote_deletions()
.map(|d| d.guid.as_str())
.collect::<Vec<_>>()
.join(", ")
);
// The merged tree should know about all items mentioned in the local
// and remote trees. Otherwise, it's incomplete, and we can't apply it.
// This indicates a bug in the merger.
if !merger.subsumes(&local_tree) {
Err(E::from(ErrorKind::UnmergedLocalItems.into()))?;
}
if !merger.subsumes(&remote_tree) {
Err(E::from(ErrorKind::UnmergedRemoteItems.into()))?;
}
time!(
merge_timings,
apply,
self.apply(merged_root, merger.deletions())
)?;
Ok(Stats {
timings: merge_timings,
counts: *merger.counts(),
})
}
}

2569
third_party/rust/dogear/src/tests.rs vendored Normal file

File diff suppressed because it is too large Load Diff

1447
third_party/rust/dogear/src/tree.rs vendored Normal file

File diff suppressed because it is too large Load Diff

View File

@ -43,7 +43,7 @@ skip-if = serviceworker_e10s
[browser_blockingServiceWorkers.js]
skip-if = (os == "win" && os_version == "6.1" && bits == 32 && !debug) # Bug 1491937
[browser_blockingServiceWorkersStorageAccessAPI.js]
skip-if = (os == "win" && os_version == "6.1" && bits == 32 && !debug) # Bug 1491937
skip-if = (os == "win" && os_version == "6.1" && bits == 32) || (os == "linux") || (os == "mac" && opt) # Bug 1491937, Bug 1536664
[browser_blockingSharedWorkers.js]
skip-if = (os == "win" && os_version == "6.1" && bits == 32 && !debug) # Bug 1491937
[browser_blockingMessaging.js]

View File

@ -210,12 +210,9 @@ namespace {
bool IsChromeJSScript(JSScript* aScript) {
// May be called from another thread or inside a signal handler.
// We assume querying the script is safe but we must not manipulate it.
nsIScriptSecurityManager* const secman =
nsScriptSecurityManager::GetScriptSecurityManager();
NS_ENSURE_TRUE(secman, false);
JSPrincipals* const principals = JS_GetScriptPrincipals(aScript);
return secman->IsSystemPrincipal(nsJSPrincipals::get(principals));
return nsJSPrincipals::get(principals)->IsSystemPrincipal();
}
// Get the full path after the URI scheme, if the URI matches the scheme.

View File

@ -622,7 +622,7 @@ class UserScript extends Script {
const ssm = Services.scriptSecurityManager;
let principal;
if (ssm.isSystemPrincipal(contentPrincipal)) {
if (contentPrincipal.isSystemPrincipal) {
principal = ssm.createNullPrincipal(contentPrincipal.originAttributes);
} else {
principal = [contentPrincipal];
@ -697,7 +697,7 @@ class ContentScriptContextChild extends BaseContext {
});
} else {
let principal;
if (ssm.isSystemPrincipal(contentPrincipal)) {
if (contentPrincipal.isSystemPrincipal) {
// Make sure we don't hand out the system principal by accident.
// Also make sure that the null principal has the right origin attributes.
principal = ssm.createNullPrincipal(attrs);

View File

@ -126,7 +126,7 @@ impl KeyValueService {
nsCString::from(name),
));
TaskRunnable::new("KVService::GetOrCreate", task)?.dispatch(RefPtr::new(thread))
TaskRunnable::new("KVService::GetOrCreate", task)?.dispatch(thread)
}
}
@ -177,7 +177,7 @@ impl KeyValueDatabase {
let thread = self.thread.get_ref().ok_or(NS_ERROR_FAILURE)?;
TaskRunnable::new("KVDatabase::Put", task)?.dispatch(RefPtr::new(thread))
TaskRunnable::new("KVDatabase::Put", task)?.dispatch(thread)
}
xpcom_method!(
@ -204,7 +204,7 @@ impl KeyValueDatabase {
let thread = self.thread.get_ref().ok_or(NS_ERROR_FAILURE)?;
TaskRunnable::new("KVDatabase::Get", task)?.dispatch(RefPtr::new(thread))
TaskRunnable::new("KVDatabase::Get", task)?.dispatch(thread)
}
xpcom_method!(
@ -221,7 +221,7 @@ impl KeyValueDatabase {
let thread = self.thread.get_ref().ok_or(NS_ERROR_FAILURE)?;
TaskRunnable::new("KVDatabase::Has", task)?.dispatch(RefPtr::new(thread))
TaskRunnable::new("KVDatabase::Has", task)?.dispatch(thread)
}
xpcom_method!(
@ -238,7 +238,7 @@ impl KeyValueDatabase {
let thread = self.thread.get_ref().ok_or(NS_ERROR_FAILURE)?;
TaskRunnable::new("KVDatabase::Delete", task)?.dispatch(RefPtr::new(thread))
TaskRunnable::new("KVDatabase::Delete", task)?.dispatch(thread)
}
xpcom_method!(
@ -265,7 +265,7 @@ impl KeyValueDatabase {
let thread = self.thread.get_ref().ok_or(NS_ERROR_FAILURE)?;
TaskRunnable::new("KVDatabase::Enumerate", task)?.dispatch(RefPtr::new(thread))
TaskRunnable::new("KVDatabase::Enumerate", task)?.dispatch(thread)
}
}

View File

@ -22,6 +22,16 @@
// for slightly more efficient SELECTs.
#define MAX_CHARS_TO_HASH 1500U
extern "C" {
// Generates a new Places GUID. This function uses C linkage because it's
// called from the Rust synced bookmarks mirror, on the storage thread.
nsresult NS_GeneratePlacesGUID(nsACString* _guid) {
return mozilla::places::GenerateGUID(*_guid);
}
} // extern "C"
namespace mozilla {
namespace places {

View File

@ -0,0 +1,30 @@
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#ifndef mozilla_places_SyncedBookmarksMirror_h_
#define mozilla_places_SyncedBookmarksMirror_h_
#include "mozISyncedBookmarksMirror.h"
#include "nsCOMPtr.h"
extern "C" {
// Implemented in Rust, in the `bookmark_sync` crate.
void NS_NewSyncedBookmarksMerger(mozISyncedBookmarksMerger** aResult);
} // extern "C"
namespace mozilla {
namespace places {
already_AddRefed<mozISyncedBookmarksMerger> NewSyncedBookmarksMerger() {
nsCOMPtr<mozISyncedBookmarksMerger> merger;
NS_NewSyncedBookmarksMerger(getter_AddRefs(merger));
return merger.forget();
}
} // namespace places
} // namespace mozilla
#endif // mozilla_places_SyncedBookmarksMirror_h_

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,21 @@
[package]
name = "bookmark_sync"
version = "0.1.0"
authors = ["Lina Cambridge <lina@yakshaving.ninja>"]
edition = "2018"
[dependencies]
atomic_refcell = "0.1"
dogear = "0.2.2"
libc = "0.2"
log = "0.4"
moz_task = { path = "../../../../xpcom/rust/moz_task" }
nserror = { path = "../../../../xpcom/rust/nserror" }
nsstring = { path = "../../../../xpcom/rust/nsstring" }
storage = { path = "../../../../storage/rust" }
storage_variant = { path = "../../../../storage/variant" }
xpcom = { path = "../../../../xpcom/rust/xpcom" }
[dependencies.thin-vec]
version = "0.1.0"
features = ["gecko-ffi"]

View File

@ -0,0 +1,134 @@
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use std::fmt::Write;
use dogear::Guid;
use log::{Level, LevelFilter, Log, Metadata, Record};
use moz_task::{Task, TaskRunnable, ThreadPtrHandle};
use nserror::nsresult;
use nsstring::{nsACString, nsCString, nsString};
use xpcom::interfaces::mozISyncedBookmarksMirrorLogger;
extern "C" {
fn NS_GeneratePlacesGUID(guid: *mut nsACString) -> nsresult;
}
fn generate_guid() -> Result<nsCString, nsresult> {
let mut guid = nsCString::new();
let rv = unsafe { NS_GeneratePlacesGUID(&mut *guid) };
if rv.succeeded() {
Ok(guid)
} else {
Err(rv)
}
}
/// The merger driver, created and used on the storage thread.
pub struct Driver {
log: Logger,
}
impl Driver {
#[inline]
pub fn new(log: Logger) -> Driver {
Driver { log }
}
}
impl dogear::Driver for Driver {
fn generate_new_guid(&self, invalid_guid: &Guid) -> dogear::Result<Guid> {
generate_guid()
.map_err(|_| dogear::ErrorKind::InvalidGuid(invalid_guid.clone()).into())
.and_then(|s| Guid::from_utf8(s.as_ref()))
}
#[inline]
fn max_log_level(&self) -> LevelFilter {
self.log.max_level
}
#[inline]
fn logger(&self) -> &dyn Log {
&self.log
}
}
pub struct Logger {
pub max_level: LevelFilter,
logger: Option<ThreadPtrHandle<mozISyncedBookmarksMirrorLogger>>,
}
impl Logger {
#[inline]
pub fn new(
max_level: LevelFilter,
logger: Option<ThreadPtrHandle<mozISyncedBookmarksMirrorLogger>>,
) -> Logger {
Logger { max_level, logger }
}
}
impl Log for Logger {
#[inline]
fn enabled(&self, meta: &Metadata) -> bool {
self.logger.is_some() && meta.level() <= self.max_level
}
fn log(&self, record: &Record) {
if !self.enabled(record.metadata()) {
return;
}
if let Some(logger) = &self.logger {
let mut message = nsString::new();
match write!(message, "{}", record.args()) {
Ok(_) => {
let task = LogTask {
logger: logger.clone(),
level: record.metadata().level(),
message,
};
let _ = TaskRunnable::new("bookmark_sync::Logger::log", Box::new(task))
.and_then(|r| r.dispatch(logger.owning_thread()));
}
Err(_) => {}
}
}
}
fn flush(&self) {}
}
/// Logs a message to the mirror logger. This task is created on the async
/// thread, and dispatched synchronously to the main thread.
struct LogTask {
logger: ThreadPtrHandle<mozISyncedBookmarksMirrorLogger>,
level: Level,
message: nsString,
}
impl Task for LogTask {
fn run(&self) {
let logger = self.logger.get().unwrap();
match self.level {
Level::Error => unsafe {
logger.Error(&*self.message);
},
Level::Warn => unsafe {
logger.Warn(&*self.message);
},
Level::Debug => unsafe {
logger.Debug(&*self.message);
},
Level::Trace => unsafe {
logger.Trace(&*self.message);
},
_ => {}
}
}
fn done(&self) -> Result<(), nsresult> {
Ok(())
}
}

View File

@ -0,0 +1,82 @@
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use std::{error, fmt, result, string::FromUtf16Error};
use nserror::{nsresult, NS_ERROR_INVALID_ARG, NS_ERROR_STORAGE_BUSY, NS_ERROR_UNEXPECTED};
pub type Result<T> = result::Result<T, Error>;
#[derive(Debug)]
pub enum Error {
Dogear(dogear::Error),
InvalidLocalRoots,
InvalidRemoteRoots,
Nsresult(nsresult),
UnknownItemKind(i64),
MalformedString(Box<dyn error::Error + Send + Sync + 'static>),
MergeConflict,
UnknownItemValidity(i64),
}
impl error::Error for Error {
fn source(&self) -> Option<&(dyn error::Error + 'static)> {
match self {
Error::Dogear(err) => Some(err),
_ => None,
}
}
}
impl From<dogear::Error> for Error {
fn from(err: dogear::Error) -> Error {
Error::Dogear(err)
}
}
impl From<nsresult> for Error {
fn from(result: nsresult) -> Error {
Error::Nsresult(result)
}
}
impl From<FromUtf16Error> for Error {
fn from(error: FromUtf16Error) -> Error {
Error::MalformedString(error.into())
}
}
impl From<Error> for nsresult {
fn from(error: Error) -> nsresult {
match error {
Error::Dogear(_) | Error::InvalidLocalRoots | Error::InvalidRemoteRoots => {
NS_ERROR_UNEXPECTED
}
Error::Nsresult(result) => result.clone(),
Error::UnknownItemKind(_)
| Error::MalformedString(_)
| Error::UnknownItemValidity(_) => NS_ERROR_INVALID_ARG,
Error::MergeConflict => NS_ERROR_STORAGE_BUSY,
}
}
}
impl fmt::Display for Error {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match self {
Error::Dogear(err) => err.fmt(f),
Error::InvalidLocalRoots => f.write_str("The Places roots are invalid"),
Error::InvalidRemoteRoots => {
f.write_str("The roots in the mirror database are invalid")
}
Error::Nsresult(result) => write!(f, "Operation failed with {}", result.error_name()),
Error::UnknownItemKind(kind) => write!(f, "Unknown item kind {} in database", kind),
Error::MalformedString(err) => err.fmt(f),
Error::MergeConflict => f.write_str("Local tree changed during merge"),
Error::UnknownItemValidity(validity) => {
write!(f, "Unknown item validity {} in database", validity)
}
}
}
}

View File

@ -0,0 +1,25 @@
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#![allow(non_snake_case)]
#[macro_use]
extern crate xpcom;
mod driver;
mod error;
mod merger;
mod store;
use xpcom::{interfaces::mozISyncedBookmarksMerger, RefPtr};
use crate::merger::SyncedBookmarksMerger;
#[no_mangle]
pub unsafe extern "C" fn NS_NewSyncedBookmarksMerger(
result: *mut *const mozISyncedBookmarksMerger,
) {
let merger = SyncedBookmarksMerger::new();
RefPtr::new(merger.coerce::<mozISyncedBookmarksMerger>()).forget(&mut *result);
}

View File

@ -0,0 +1,252 @@
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use std::{cell::RefCell, fmt::Write, mem, time::Duration};
use atomic_refcell::AtomicRefCell;
use dogear::{MergeTimings, Stats, Store, StructureCounts};
use log::LevelFilter;
use moz_task::{Task, TaskRunnable, ThreadPtrHandle, ThreadPtrHolder};
use nserror::{nsresult, NS_ERROR_FAILURE, NS_ERROR_UNEXPECTED, NS_OK};
use nsstring::nsString;
use storage::Conn;
use storage_variant::HashPropertyBag;
use thin_vec::ThinVec;
use xpcom::{
interfaces::{
mozIStorageConnection, mozISyncedBookmarksMirrorCallback, mozISyncedBookmarksMirrorLogger,
},
RefPtr,
};
use crate::driver::{Driver, Logger};
use crate::error;
use crate::store;
#[derive(xpcom)]
#[xpimplements(mozISyncedBookmarksMerger)]
#[refcnt = "nonatomic"]
pub struct InitSyncedBookmarksMerger {
db: RefCell<Option<Conn>>,
logger: RefCell<Option<RefPtr<mozISyncedBookmarksMirrorLogger>>>,
}
impl SyncedBookmarksMerger {
pub fn new() -> RefPtr<SyncedBookmarksMerger> {
SyncedBookmarksMerger::allocate(InitSyncedBookmarksMerger {
db: RefCell::default(),
logger: RefCell::default(),
})
}
xpcom_method!(get_db => GetDb() -> *const mozIStorageConnection);
fn get_db(&self) -> Result<RefPtr<mozIStorageConnection>, nsresult> {
self.db
.borrow()
.as_ref()
.map(|db| RefPtr::new(db.connection()))
.ok_or(NS_OK)
}
xpcom_method!(set_db => SetDb(connection: *const mozIStorageConnection));
fn set_db(&self, connection: Option<&mozIStorageConnection>) -> Result<(), nsresult> {
self.db
.replace(connection.map(|connection| Conn::wrap(RefPtr::new(connection))));
Ok(())
}
xpcom_method!(get_logger => GetLogger() -> *const mozISyncedBookmarksMirrorLogger);
fn get_logger(&self) -> Result<RefPtr<mozISyncedBookmarksMirrorLogger>, nsresult> {
match *self.logger.borrow() {
Some(ref logger) => Ok(logger.clone()),
None => Err(NS_OK),
}
}
xpcom_method!(set_logger => SetLogger(logger: *const mozISyncedBookmarksMirrorLogger));
fn set_logger(&self, logger: Option<&mozISyncedBookmarksMirrorLogger>) -> Result<(), nsresult> {
self.logger.replace(logger.map(RefPtr::new));
Ok(())
}
xpcom_method!(
merge => Merge(
local_time_seconds: libc::int64_t,
remote_time_seconds: libc::int64_t,
weak_uploads: *const ThinVec<::nsstring::nsString>,
callback: *const mozISyncedBookmarksMirrorCallback
)
);
fn merge(
&self,
local_time_seconds: libc::int64_t,
remote_time_seconds: libc::int64_t,
weak_uploads: Option<&ThinVec<nsString>>,
callback: &mozISyncedBookmarksMirrorCallback,
) -> Result<(), nsresult> {
let callback = RefPtr::new(callback);
let db = match *self.db.borrow() {
Some(ref db) => db.clone(),
None => return Err(NS_ERROR_FAILURE),
};
let logger = &*self.logger.borrow();
let async_thread = db.thread()?;
let task = MergeTask::new(
&db,
logger.as_ref().cloned(),
local_time_seconds,
remote_time_seconds,
weak_uploads
.map(|w| w.as_slice().to_vec())
.unwrap_or_default(),
callback,
)?;
let runnable = TaskRunnable::new(
"bookmark_sync::SyncedBookmarksMerger::merge",
Box::new(task),
)?;
runnable.dispatch(&async_thread)
}
xpcom_method!(finalize => Finalize());
fn finalize(&self) -> Result<(), nsresult> {
mem::drop(self.db.borrow_mut().take());
mem::drop(self.logger.borrow_mut().take());
Ok(())
}
}
struct MergeTask {
db: Conn,
max_log_level: LevelFilter,
logger: Option<ThreadPtrHandle<mozISyncedBookmarksMirrorLogger>>,
local_time_millis: i64,
remote_time_millis: i64,
weak_uploads: Vec<nsString>,
callback: ThreadPtrHandle<mozISyncedBookmarksMirrorCallback>,
result: AtomicRefCell<Option<error::Result<Stats>>>,
}
impl MergeTask {
fn new(
db: &Conn,
logger: Option<RefPtr<mozISyncedBookmarksMirrorLogger>>,
local_time_seconds: i64,
remote_time_seconds: i64,
weak_uploads: Vec<nsString>,
callback: RefPtr<mozISyncedBookmarksMirrorCallback>,
) -> Result<MergeTask, nsresult> {
let max_log_level = logger
.as_ref()
.and_then(|logger| {
let mut level = 0i16;
unsafe { logger.GetMaxLevel(&mut level) }.to_result().ok()?;
Some(level)
})
.map(|level| match level as i64 {
mozISyncedBookmarksMirrorLogger::LEVEL_ERROR => LevelFilter::Error,
mozISyncedBookmarksMirrorLogger::LEVEL_WARN => LevelFilter::Warn,
mozISyncedBookmarksMirrorLogger::LEVEL_DEBUG => LevelFilter::Debug,
mozISyncedBookmarksMirrorLogger::LEVEL_TRACE => LevelFilter::Trace,
_ => LevelFilter::Off,
})
.unwrap_or(LevelFilter::Off);
let logger = match logger {
Some(logger) => Some(ThreadPtrHolder::new(
"mozISyncedBookmarksMirrorLogger",
logger,
)?),
None => None,
};
Ok(MergeTask {
db: db.clone(),
max_log_level,
logger,
local_time_millis: local_time_seconds * 1000,
remote_time_millis: remote_time_seconds * 1000,
weak_uploads,
callback: ThreadPtrHolder::new("mozISyncedBookmarksMirrorCallback", callback)?,
result: AtomicRefCell::default(),
})
}
}
impl Task for MergeTask {
fn run(&self) {
let mut db = self.db.clone();
let mut store = store::Store::new(
&mut db,
self.local_time_millis,
self.remote_time_millis,
&self.weak_uploads,
);
let log = Logger::new(self.max_log_level, self.logger.clone());
let driver = Driver::new(log);
*self.result.borrow_mut() = Some(store.merge_with_driver(&driver));
}
fn done(&self) -> Result<(), nsresult> {
let callback = self.callback.get().unwrap();
match self.result.borrow_mut().take() {
Some(Ok(stats)) => {
let mut telem = HashPropertyBag::new();
telem.set("fetchLocalTreeTime", stats.time(|t| t.fetch_local_tree));
telem.set(
"fetchNewLocalContentsTime",
stats.time(|t| t.fetch_new_local_contents),
);
telem.set("fetchRemoteTreeTime", stats.time(|t| t.fetch_remote_tree));
telem.set(
"fetchNewRemoteContentsTime",
stats.time(|t| t.fetch_new_remote_contents),
);
telem.set("mergeTime", stats.time(|t| t.merge));
telem.set("mergedNodesCount", stats.count(|c| c.merged_nodes));
telem.set("mergedDeletionsCount", stats.count(|c| c.merged_deletions));
telem.set("remoteRevivesCount", stats.count(|c| c.remote_revives));
telem.set("localDeletesCount", stats.count(|c| c.local_deletes));
telem.set("localRevivesCount", stats.count(|c| c.local_revives));
telem.set("remoteDeletesCount", stats.count(|c| c.remote_deletes));
telem.set("dupesCount", stats.count(|c| c.dupes));
telem.set("applyTime", stats.time(|t| t.apply));
unsafe { callback.HandleResult(telem.bag().coerce()) }
}
Some(Err(err)) => {
let message = {
let mut message = nsString::new();
match write!(message, "{}", err) {
Ok(_) => message,
Err(_) => nsString::from("Merge failed with unknown error"),
}
};
unsafe { callback.HandleError(err.into(), &*message) }
}
None => unsafe {
callback.HandleError(
NS_ERROR_UNEXPECTED,
&*nsString::from("Failed to run merge on storage thread"),
)
},
}
.to_result()
}
}
/// Extension methods that convert timings and counters into types that we
/// can store in a `HashPropertyBag`.
trait StatsExt {
fn time(&self, func: impl FnOnce(&MergeTimings) -> Duration) -> i64;
fn count(&self, func: impl FnOnce(&StructureCounts) -> usize) -> i64;
}
impl StatsExt for Stats {
fn time(&self, func: impl FnOnce(&MergeTimings) -> Duration) -> i64 {
let d = func(&self.timings);
d.as_secs() as i64 * 1000 + i64::from(d.subsec_millis())
}
fn count(&self, func: impl FnOnce(&StructureCounts) -> usize) -> i64 {
func(&self.counts) as i64
}
}

View File

@ -0,0 +1,683 @@
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use std::{collections::HashMap, fmt};
use dogear::{
Content, Deletion, Guid, IntoTree, Item, Kind, MergedDescendant, MergedRoot, Tree,
UploadReason, Validity,
};
use nsstring::{nsCString, nsString};
use storage::{Conn, Step};
use xpcom::interfaces::{mozISyncedBookmarksMerger, nsINavBookmarksService};
use crate::error::{Error, Result};
pub const LMANNO_FEEDURI: &'static str = "livemark/feedURI";
pub const LOCAL_ITEMS_SQL_FRAGMENT: &str = "
localItems(id, guid, parentId, parentGuid, position, type, title, parentTitle,
placeId, dateAdded, lastModified, syncChangeCounter, level) AS (
SELECT b.id, b.guid, 0, NULL, b.position, b.type, b.title, NULL,
b.fk, b.dateAdded, b.lastModified, b.syncChangeCounter, 0
FROM moz_bookmarks b
WHERE b.guid = 'root________'
UNION ALL
SELECT b.id, b.guid, s.id, s.guid, b.position, b.type, b.title, s.title,
b.fk, b.dateAdded, b.lastModified, b.syncChangeCounter, s.level + 1
FROM moz_bookmarks b
JOIN localItems s ON s.id = b.parent
WHERE b.guid <> 'tags________')";
extern "C" {
fn NS_NavBookmarksTotalSyncChanges() -> libc::int64_t;
}
fn total_sync_changes() -> i64 {
unsafe { NS_NavBookmarksTotalSyncChanges() }
}
pub struct Store<'s> {
db: &'s mut Conn,
/// The total Sync change count before merging. We store this before
/// accessing Places, and compare the current and stored counts after
/// opening our transaction. If they match, we can safely apply the
/// tree. Otherwise, we bail and try merging again on the next sync.
total_sync_changes: i64,
local_time_millis: i64,
remote_time_millis: i64,
weak_uploads: &'s [nsString],
}
impl<'s> Store<'s> {
pub fn new(
db: &'s mut Conn,
local_time_millis: i64,
remote_time_millis: i64,
weak_uploads: &'s [nsString],
) -> Store<'s> {
Store {
db,
total_sync_changes: total_sync_changes(),
local_time_millis,
remote_time_millis,
weak_uploads,
}
}
/// Creates a local tree item from a row in the `localItems` CTE.
fn local_row_to_item(&self, step: &Step) -> Result<Item> {
let raw_guid: nsString = step.get_by_name("guid")?;
let raw_kind: i64 = step.get_by_name("kind")?;
let guid = Guid::from_utf16(&*raw_guid)?;
let mut item = Item::new(guid, Kind::from_column(raw_kind)?);
let local_modified: i64 = step.get_by_name_or_default("localModified");
item.age = (self.local_time_millis - local_modified).max(0);
let sync_change_counter: i64 = step.get_by_name("syncChangeCounter")?;
item.needs_merge = sync_change_counter > 0;
Ok(item)
}
/// Creates a remote tree item from a row in `mirror.items`.
fn remote_row_to_item(&self, step: &Step) -> Result<Item> {
let raw_guid: nsString = step.get_by_name("guid")?;
let raw_kind: i64 = step.get_by_name("kind")?;
let guid = Guid::from_utf16(&*raw_guid)?;
let mut item = Item::new(guid, Kind::from_column(raw_kind)?);
let remote_modified: i64 = step.get_by_name("serverModified")?;
item.age = (self.remote_time_millis - remote_modified).max(0);
let needs_merge: i32 = step.get_by_name("needsMerge")?;
item.needs_merge = needs_merge == 1;
let raw_validity: i64 = step.get_by_name("validity")?;
item.validity = Validity::from_column(raw_validity)?;
Ok(item)
}
}
impl<'s> dogear::Store<Error> for Store<'s> {
/// Builds a fully rooted, consistent tree from the items and tombstones in
/// Places.
fn fetch_local_tree(&self) -> Result<Tree> {
let mut items_statement = self.db.prepare(format!(
"WITH RECURSIVE
{}
SELECT s.id, s.guid, s.parentGuid,
/* Map Places item types to Sync record kinds. */
(CASE s.type
WHEN :bookmarkType THEN (
CASE SUBSTR((SELECT h.url FROM moz_places h
WHERE h.id = s.placeId), 1, 6)
/* Queries are bookmarks with a `place:` URL scheme. */
WHEN 'place:' THEN :queryKind
ELSE :bookmarkKind END)
WHEN :folderType THEN (
CASE WHEN EXISTS(
/* Livemarks are folders with a feed URL annotation. */
SELECT 1 FROM moz_items_annos a
JOIN moz_anno_attributes n ON n.id = a.anno_attribute_id
WHERE a.item_id = s.id AND
n.name = :feedURLAnno
) THEN :livemarkKind
ELSE :folderKind END)
ELSE :separatorKind END) AS kind,
s.lastModified / 1000 AS localModified, s.syncChangeCounter
FROM localItems s
ORDER BY s.level, s.parentId, s.position",
LOCAL_ITEMS_SQL_FRAGMENT
))?;
items_statement.bind_by_name("bookmarkType", nsINavBookmarksService::TYPE_BOOKMARK)?;
items_statement.bind_by_name("queryKind", mozISyncedBookmarksMerger::KIND_QUERY)?;
items_statement.bind_by_name("bookmarkKind", mozISyncedBookmarksMerger::KIND_BOOKMARK)?;
items_statement.bind_by_name("folderType", nsINavBookmarksService::TYPE_FOLDER)?;
items_statement.bind_by_name("feedURLAnno", nsCString::from(LMANNO_FEEDURI))?;
items_statement.bind_by_name("livemarkKind", mozISyncedBookmarksMerger::KIND_LIVEMARK)?;
items_statement.bind_by_name("folderKind", mozISyncedBookmarksMerger::KIND_FOLDER)?;
items_statement.bind_by_name("separatorKind", mozISyncedBookmarksMerger::KIND_SEPARATOR)?;
let mut builder = match items_statement.step()? {
// The first row is always the root.
Some(step) => Tree::with_root(self.local_row_to_item(&step)?),
None => return Err(Error::InvalidLocalRoots.into()),
};
while let Some(step) = items_statement.step()? {
// All subsequent rows are descendants.
let raw_parent_guid: nsString = step.get_by_name("parentGuid")?;
let parent_guid = Guid::from_utf16(&*raw_parent_guid)?;
builder
.item(self.local_row_to_item(&step)?)?
.by_structure(&parent_guid)?;
}
let mut tree = builder.into_tree()?;
let mut deletions_statement = self.db.prepare("SELECT guid FROM moz_bookmarks_deleted")?;
while let Some(step) = deletions_statement.step()? {
let raw_guid: nsString = step.get_by_name("guid")?;
let guid = Guid::from_utf16(&*raw_guid)?;
tree.note_deleted(guid);
}
Ok(tree)
}
/// Fetches content info for all NEW and UNKNOWN local items that don't exist
/// in the mirror. We'll try to dedupe them to changed items with similar
/// contents and different GUIDs in the mirror.
fn fetch_new_local_contents(&self) -> Result<HashMap<Guid, Content>> {
let mut contents = HashMap::new();
let mut statement = self.db.prepare(
r#"SELECT b.guid, b.type, IFNULL(b.title, "") AS title, h.url,
b.position
FROM moz_bookmarks b
JOIN moz_bookmarks p ON p.id = b.parent
LEFT JOIN moz_places h ON h.id = b.fk
LEFT JOIN items v ON v.guid = b.guid
WHERE v.guid IS NULL AND
p.guid <> :rootGuid AND
b.syncStatus <> :syncStatus"#,
)?;
statement.bind_by_name("rootGuid", nsCString::from(&*dogear::ROOT_GUID))?;
statement.bind_by_name("syncStatus", nsINavBookmarksService::SYNC_STATUS_NORMAL)?;
while let Some(step) = statement.step()? {
let typ: i64 = step.get_by_name("type")?;
let content = match typ {
nsINavBookmarksService::TYPE_BOOKMARK => {
let raw_title: nsString = step.get_by_name("title")?;
let title = String::from_utf16(&*raw_title)?;
let raw_url_href: nsString = step.get_by_name("url")?;
let url_href = String::from_utf16(&*raw_url_href)?;
Content::Bookmark { title, url_href }
}
nsINavBookmarksService::TYPE_FOLDER => {
let raw_title: nsString = step.get_by_name("title")?;
let title = String::from_utf16(&*raw_title)?;
Content::Folder { title }
}
nsINavBookmarksService::TYPE_SEPARATOR => {
let position: i64 = step.get_by_name("position")?;
Content::Separator { position }
}
_ => continue,
};
let raw_guid: nsString = step.get_by_name("guid")?;
let guid = Guid::from_utf16(&*raw_guid)?;
contents.insert(guid, content);
}
Ok(contents)
}
/// Builds a fully rooted, consistent tree from the items and tombstones in the
/// mirror.
fn fetch_remote_tree(&self) -> Result<Tree> {
let mut root_statement = self.db.prepare(
"SELECT guid, serverModified, kind, needsMerge, validity
FROM items
WHERE NOT isDeleted AND
guid = :rootGuid",
)?;
root_statement.bind_by_name("rootGuid", nsCString::from(&*dogear::ROOT_GUID))?;
let mut builder = match root_statement.step()? {
Some(step) => Tree::with_root(self.remote_row_to_item(&step)?),
None => return Err(Error::InvalidRemoteRoots.into()),
};
builder.reparent_orphans_to(&dogear::UNFILED_GUID);
let mut items_statement = self.db.prepare(
"SELECT guid, parentGuid, serverModified, kind, needsMerge, validity
FROM items
WHERE NOT isDeleted AND
guid <> :rootGuid",
)?;
items_statement.bind_by_name("rootGuid", nsCString::from(&*dogear::ROOT_GUID))?;
while let Some(step) = items_statement.step()? {
let p = builder.item(self.remote_row_to_item(&step)?)?;
let raw_parent_guid: Option<nsString> = step.get_by_name("parentGuid")?;
if let Some(raw_parent_guid) = raw_parent_guid {
p.by_parent_guid(Guid::from_utf16(&*raw_parent_guid)?)?;
}
}
let mut structure_statement = self.db.prepare(
"SELECT guid, parentGuid FROM structure
WHERE guid <> :rootGuid
ORDER BY parentGuid, position",
)?;
structure_statement.bind_by_name("rootGuid", nsCString::from(&*dogear::ROOT_GUID))?;
while let Some(step) = structure_statement.step()? {
let raw_guid: nsString = step.get_by_name("guid")?;
let guid = Guid::from_utf16(&*raw_guid)?;
let raw_parent_guid: nsString = step.get_by_name("parentGuid")?;
let parent_guid = Guid::from_utf16(&*raw_parent_guid)?;
builder.parent_for(&guid).by_children(&parent_guid)?;
}
let mut tree = builder.into_tree()?;
let mut deletions_statement = self.db.prepare(
"SELECT guid FROM items
WHERE isDeleted AND
needsMerge",
)?;
while let Some(step) = deletions_statement.step()? {
let raw_guid: nsString = step.get_by_name("guid")?;
let guid = Guid::from_utf16(&*raw_guid)?;
tree.note_deleted(guid);
}
Ok(tree)
}
/// Fetches content info for all items in the mirror that changed since the
/// last sync and don't exist locally.
fn fetch_new_remote_contents(&self) -> Result<HashMap<Guid, Content>> {
let mut contents = HashMap::new();
let mut statement = self.db.prepare(
r#"SELECT v.guid, v.kind, IFNULL(v.title, "") AS title, u.url,
IFNULL(s.position, -1) AS position
FROM items v
LEFT JOIN urls u ON u.id = v.urlId
LEFT JOIN structure s ON s.guid = v.guid
LEFT JOIN moz_bookmarks b ON b.guid = v.guid
WHERE NOT v.isDeleted AND
v.needsMerge AND
b.guid IS NULL AND
IFNULL(s.parentGuid, :unfiledGuid) <> :rootGuid"#,
)?;
statement.bind_by_name("unfiledGuid", nsCString::from(&*dogear::UNFILED_GUID))?;
statement.bind_by_name("rootGuid", nsCString::from(&*dogear::ROOT_GUID))?;
while let Some(step) = statement.step()? {
let kind: i64 = step.get_by_name("kind")?;
let content = match kind {
mozISyncedBookmarksMerger::KIND_BOOKMARK
| mozISyncedBookmarksMerger::KIND_QUERY => {
let raw_title: nsString = step.get_by_name("title")?;
let title = String::from_utf16(&*raw_title)?;
let raw_url_href: nsString = step.get_by_name("url")?;
let url_href = String::from_utf16(&*raw_url_href)?;
Content::Bookmark { title, url_href }
}
mozISyncedBookmarksMerger::KIND_FOLDER
| mozISyncedBookmarksMerger::KIND_LIVEMARK => {
let raw_title: nsString = step.get_by_name("title")?;
let title = String::from_utf16(&*raw_title)?;
Content::Folder { title }
}
mozISyncedBookmarksMerger::KIND_SEPARATOR => {
let position: i64 = step.get_by_name("position")?;
Content::Separator { position }
}
_ => continue,
};
let raw_guid: nsString = step.get_by_name("guid")?;
let guid = Guid::from_utf16(&*raw_guid)?;
contents.insert(guid, content);
}
Ok(contents)
}
fn apply<'t>(
&mut self,
root: MergedRoot<'t>,
deletions: impl Iterator<Item = Deletion<'t>>,
) -> Result<()> {
let descendants = root.descendants();
let deletions = deletions.collect::<Vec<_>>();
// Apply the merged tree and stage outgoing items. This transaction
// blocks writes from the main connection until it's committed, so we
// try to do as little work as possible within it.
let tx = self.db.transaction()?;
if self.total_sync_changes != total_sync_changes() {
return Err(Error::MergeConflict);
}
update_local_items_in_places(&tx, descendants, deletions)?;
stage_items_to_upload(&tx, &self.weak_uploads)?;
cleanup(&tx)?;
tx.commit()?;
Ok(())
}
}
/// Builds a temporary table with the merge states of all nodes in the merged
/// tree and updates Places to match the merged tree.
///
/// Conceptually, we examine the merge state of each item, and either leave the
/// item unchanged, upload the local side, apply the remote side, or apply and
/// then reupload the remote side with a new structure.
///
/// Note that we update Places and flag items *before* upload, while iOS
/// updates the mirror *after* a successful upload. This simplifies our
/// implementation, though we lose idempotent merges. If upload is interrupted,
/// the next sync won't distinguish between new merge states from the previous
/// sync, and local changes.
fn update_local_items_in_places<'t>(
db: &Conn,
descendants: Vec<MergedDescendant<'t>>,
deletions: Vec<Deletion>,
) -> Result<()> {
for chunk in descendants.chunks(999 / 3) {
let mut statement = db.prepare(format!(
"INSERT INTO mergeStates(localGuid, remoteGuid, mergedGuid, mergedParentGuid, level,
position, useRemote, shouldUpload)
VALUES {}",
repeat_display(chunk.len(), ",", |index, f| {
let d = &chunk[index];
write!(
f,
"(?, ?, ?, ?, {}, {}, {}, {})",
d.level,
d.position,
d.merged_node.merge_state.should_apply() as i8,
(d.merged_node.merge_state.upload_reason() != UploadReason::None) as i8
)
})
))?;
for (index, d) in chunk.iter().enumerate() {
let offset = (index * 4) as u32;
let local_guid = d
.merged_node
.merge_state
.local_node()
.map(|node| nsString::from(node.guid.as_str()));
let remote_guid = d
.merged_node
.merge_state
.remote_node()
.map(|node| nsString::from(node.guid.as_str()));
let merged_guid = nsString::from(d.merged_node.guid.as_str());
let merged_parent_guid = nsString::from(d.merged_parent_node.guid.as_str());
statement.bind_by_index(offset, local_guid)?;
statement.bind_by_index(offset + 1, remote_guid)?;
statement.bind_by_index(offset + 2, merged_guid)?;
statement.bind_by_index(offset + 3, merged_parent_guid)?;
}
statement.execute()?;
}
for chunk in deletions.chunks(999) {
// This fires the `noteItemRemoved` trigger, which records observer infos
// for deletions. It's important we do this before updating the structure,
// so that the trigger captures the old parent and position.
let mut statement = db.prepare(format!(
"INSERT INTO itemsToRemove(guid, localLevel, shouldUploadTombstone)
VALUES {}",
repeat_display(chunk.len(), ",", |index, f| {
let d = &chunk[index];
write!(
f,
"(?, {}, {})",
d.local_level, d.should_upload_tombstone as i8
)
})
))?;
for (index, d) in chunk.iter().enumerate() {
statement.bind_by_index(index as u32, nsString::from(d.guid.as_str()))?;
}
statement.execute()?;
}
insert_new_urls_into_places(&db)?;
// "Deleting" from `itemsToMerge` fires the `insertNewLocalItems` and
// `updateExistingLocalItems` triggers.
db.exec("DELETE FROM itemsToMerge")?;
// "Deleting" from `structureToMerge` fires the `updateLocalStructure`
// trigger.
db.exec("DELETE FROM structureToMerge")?;
db.exec("DELETE FROM itemsToRemove")?;
db.exec("DELETE FROM relatedIdsToReupload")?;
Ok(())
}
/// Inserts URLs for new remote items from the mirror's `urls` table into the
/// `moz_places` table.
fn insert_new_urls_into_places(db: &Conn) -> Result<()> {
let mut statement = db.prepare(
"INSERT OR IGNORE INTO moz_places(url, url_hash, rev_host, hidden,
frecency, guid)
SELECT u.url, u.hash, u.revHost, 0,
(CASE v.kind WHEN :queryKind THEN 0 ELSE -1 END),
IFNULL((SELECT h.guid FROM moz_places h
WHERE h.url_hash = u.hash AND
h.url = u.url), u.guid)
FROM items v
JOIN urls u ON u.id = v.urlId
JOIN mergeStates r ON r.remoteGuid = v.guid
WHERE r.useRemote",
)?;
statement.bind_by_name("queryKind", mozISyncedBookmarksMerger::KIND_QUERY)?;
statement.execute()?;
db.exec("DELETE FROM moz_updateoriginsinsert_temp")?;
Ok(())
}
/// Stores a snapshot of all locally changed items in a temporary table for
/// upload. This is called from within the merge transaction, to ensure that
/// changes made during the sync don't cause us to upload inconsistent records.
///
/// For an example of why we use a temporary table instead of reading directly
/// from Places, consider a user adding a bookmark, then changing its parent
/// folder. We first add the bookmark to the default folder, bump the change
/// counter of the new bookmark and the default folder, then trigger a sync.
/// Depending on how quickly the user picks the new parent, we might upload
/// a record for the default folder, commit the move, then upload the bookmark.
/// We'll still upload the new parent on the next sync, but, in the meantime,
/// we've introduced a parent-child disagreement. This can also happen if the
/// user moves many items between folders.
///
/// Conceptually, `itemsToUpload` is a transient "view" of locally changed
/// items. The change counter in Places is the persistent record of items that
/// we need to upload, so, if upload is interrupted or fails, we'll stage the
/// items again on the next sync.
fn stage_items_to_upload(db: &Conn, weak_upload: &[nsString]) -> Result<()> {
// Stage explicit weak uploads such as repair responses.
for chunk in weak_upload.chunks(999) {
let mut statement = db.prepare(format!(
"INSERT INTO idsToWeaklyUpload(id)
SELECT id
FROM moz_bookmarks
WHERE guid IN ({})",
repeat_display(chunk.len(), ",", |_, f| f.write_str("?")),
))?;
for (index, guid) in chunk.iter().enumerate() {
statement.bind_by_index(index as u32, nsString::from(guid.as_ref()))?;
}
statement.execute()?;
}
// Stage remotely changed items with older local creation dates. These are
// tracked "weakly": if the upload is interrupted or fails, we won't
// reupload the record on the next sync.
db.exec(
r#"
INSERT OR IGNORE INTO idsToWeaklyUpload(id)
SELECT b.id
FROM moz_bookmarks b
JOIN mergeStates r ON r.mergedGuid = b.guid
JOIN items v ON v.guid = r.remoteGuid
WHERE r.useRemote AND
/* "b.dateAdded" is in microseconds; "v.dateAdded" is in
milliseconds. */
b.dateAdded / 1000 < v.dateAdded"#,
)?;
// Stage remaining locally changed items for upload.
db.exec(format!(
"
WITH RECURSIVE
{}
INSERT INTO itemsToUpload(id, guid, syncChangeCounter, parentGuid,
parentTitle, dateAdded, type, title, placeId,
isQuery, url, keyword, position, tagFolderName)
SELECT s.id, s.guid, s.syncChangeCounter, s.parentGuid, s.parentTitle,
s.dateAdded / 1000, s.type, s.title, s.placeId,
IFNULL(SUBSTR(h.url, 1, 6) = 'place:', 0) AS isQuery,
h.url,
(SELECT keyword FROM moz_keywords WHERE place_id = h.id),
s.position,
(SELECT get_query_param(substr(url, 7), 'tag')
WHERE substr(h.url, 1, 6) = 'place:')
FROM localItems s
LEFT JOIN moz_places h ON h.id = s.placeId
LEFT JOIN idsToWeaklyUpload w ON w.id = s.id
WHERE s.guid <> '{}' AND (
s.syncChangeCounter > 0 OR
w.id NOT NULL
)",
LOCAL_ITEMS_SQL_FRAGMENT,
dogear::ROOT_GUID,
))?;
// Record the child GUIDs of locally changed folders, which we use to
// populate the `children` array in the record.
db.exec(
"
INSERT INTO structureToUpload(guid, parentId, position)
SELECT b.guid, b.parent, b.position
FROM moz_bookmarks b
JOIN itemsToUpload o ON o.id = b.parent",
)?;
// Stage tags for outgoing bookmarks.
db.exec(
"
INSERT INTO tagsToUpload(id, tag)
SELECT o.id, t.tag
FROM localTags t
JOIN itemsToUpload o ON o.placeId = t.placeId",
)?;
// Finally, stage tombstones for deleted items. Ignore conflicts if we have
// tombstones for undeleted items; Places Maintenance should clean these up.
db.exec(
"
INSERT OR IGNORE INTO itemsToUpload(guid, syncChangeCounter, isDeleted)
SELECT guid, 1, 1 FROM moz_bookmarks_deleted",
)?;
Ok(())
}
fn cleanup(db: &Conn) -> Result<()> {
db.exec("DELETE FROM mergeStates")?;
db.exec("DELETE FROM idsToWeaklyUpload")?;
Ok(())
}
/// Construct a `RepeatDisplay` that will repeatedly call `fmt_one` with a
/// formatter `count` times, separated by `sep`. This is copied from the
/// `sql_support` crate in `application-services`.
#[inline]
fn repeat_display<'a, F>(count: usize, sep: &'a str, fmt_one: F) -> RepeatDisplay<'a, F>
where
F: Fn(usize, &mut fmt::Formatter) -> fmt::Result,
{
RepeatDisplay {
count,
sep,
fmt_one,
}
}
/// Helper type for printing repeated strings more efficiently.
#[derive(Debug, Clone)]
struct RepeatDisplay<'a, F> {
count: usize,
sep: &'a str,
fmt_one: F,
}
impl<'a, F> fmt::Display for RepeatDisplay<'a, F>
where
F: Fn(usize, &mut fmt::Formatter) -> fmt::Result,
{
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
for i in 0..self.count {
if i != 0 {
f.write_str(self.sep)?;
}
(self.fmt_one)(i, f)?;
}
Ok(())
}
}
/// Converts between a type `T` and its SQL representation.
trait Column<T> {
fn from_column(raw: T) -> Result<Self>
where
Self: Sized;
fn into_column(self) -> T;
}
impl Column<i64> for Kind {
fn from_column(raw: i64) -> Result<Kind> {
Ok(match raw {
mozISyncedBookmarksMerger::KIND_BOOKMARK => Kind::Bookmark,
mozISyncedBookmarksMerger::KIND_QUERY => Kind::Query,
mozISyncedBookmarksMerger::KIND_FOLDER => Kind::Folder,
mozISyncedBookmarksMerger::KIND_LIVEMARK => Kind::Livemark,
mozISyncedBookmarksMerger::KIND_SEPARATOR => Kind::Separator,
_ => return Err(Error::UnknownItemKind(raw)),
})
}
fn into_column(self) -> i64 {
match self {
Kind::Bookmark => mozISyncedBookmarksMerger::KIND_BOOKMARK,
Kind::Query => mozISyncedBookmarksMerger::KIND_QUERY,
Kind::Folder => mozISyncedBookmarksMerger::KIND_FOLDER,
Kind::Livemark => mozISyncedBookmarksMerger::KIND_LIVEMARK,
Kind::Separator => mozISyncedBookmarksMerger::KIND_SEPARATOR,
}
}
}
impl Column<i64> for Validity {
fn from_column(raw: i64) -> Result<Validity> {
Ok(match raw {
mozISyncedBookmarksMerger::VALIDITY_VALID => Validity::Valid,
mozISyncedBookmarksMerger::VALIDITY_REUPLOAD => Validity::Reupload,
mozISyncedBookmarksMerger::VALIDITY_REPLACE => Validity::Replace,
_ => return Err(Error::UnknownItemValidity(raw).into()),
})
}
fn into_column(self) -> i64 {
match self {
Validity::Valid => mozISyncedBookmarksMerger::VALIDITY_VALID,
Validity::Reupload => mozISyncedBookmarksMerger::VALIDITY_REUPLOAD,
Validity::Replace => mozISyncedBookmarksMerger::VALIDITY_REPLACE,
}
}
}

View File

@ -95,4 +95,12 @@ Classes = [
'jsm': 'resource://gre/modules/PageIconProtocolHandler.jsm',
'constructor': 'PageIconProtocolHandler',
},
{
'cid': '{7d47b41d-7cc5-4882-b293-d8f3f3b48b46}',
'contract_ids': ['@mozilla.org/browser/synced-bookmarks-merger;1'],
'type': 'mozISyncedBookmarksMerger',
'headers': ['mozilla/places/SyncedBookmarksMirror.h'],
'constructor': 'mozilla::places::NewSyncedBookmarksMerger',
},
]

View File

@ -18,6 +18,7 @@ if CONFIG['MOZ_PLACES']:
'mozIAsyncHistory.idl',
'mozIPlacesAutoComplete.idl',
'mozIPlacesPendingOperation.idl',
'mozISyncedBookmarksMirror.idl',
'nsIAnnotationService.idl',
'nsIFaviconService.idl',
'nsINavBookmarksService.idl',
@ -29,6 +30,7 @@ if CONFIG['MOZ_PLACES']:
'History.h',
'INativePlacesEventCallback.h',
'Shutdown.h',
'SyncedBookmarksMirror.h',
]
UNIFIED_SOURCES += [

View File

@ -0,0 +1,70 @@
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#include "nsISupports.idl"
interface mozIStorageConnection;
[scriptable, uuid(d23fdfea-92c8-409d-a516-08ae395d578f)]
interface mozISyncedBookmarksMirrorCallback : nsISupports {
void handleResult(in nsISupports result);
void handleError(in nsresult code, in AString message);
};
[scriptable, uuid(37485984-a6ab-46e3-9b0c-e8b613413ef3)]
interface mozISyncedBookmarksMirrorLogger : nsISupports {
const short LEVEL_OFF = 0;
const short LEVEL_ERROR = 1;
const short LEVEL_WARN = 2;
const short LEVEL_DEBUG = 3;
const short LEVEL_TRACE = 4;
attribute short maxLevel;
void error(in AString message);
void warn(in AString message);
void debug(in AString message);
void trace(in AString message);
};
[scriptable, builtinclass, uuid(f0a6217d-8344-4e68-9995-bbf5554be86e)]
interface mozISyncedBookmarksMerger : nsISupports {
// Synced item kinds. These are stored in the mirror database.
const short KIND_BOOKMARK = 1;
const short KIND_QUERY = 2;
const short KIND_FOLDER = 3;
const short KIND_LIVEMARK = 4;
const short KIND_SEPARATOR = 5;
// Synced item validity states. These are also stored in the mirror
// database. `REUPLOAD` means a remote item can be fixed up and applied,
// and should be reuploaded. `REPLACE` means a remote item isn't valid
// at all, and should either be replaced with a valid local copy, or deleted
// if a valid local copy doesn't exist.
const short VALIDITY_VALID = 1;
const short VALIDITY_REUPLOAD = 2;
const short VALIDITY_REPLACE = 3;
// The mirror database connection to use for merging. The merge runs on the
// connection's async thread, to avoid blocking the main thread. The database
// schema, temp tables, and triggers must be set up before calling
// `merge`.
attribute mozIStorageConnection db;
// Optional; used for logging.
attribute mozISyncedBookmarksMirrorLogger logger;
// Merges the local and remote bookmark trees. Stores the merged tree in the
// `mergeStates` table. Calls `callback.handleResult(nsIPropertyBag2)` with
// telemetry stats on success, or `callback.handleError(error)` if an error
// occurs.
void merge(in long long localTimeSeconds,
in long long remoteTimeSeconds,
in Array<AString> weakUploads,
in mozISyncedBookmarksMirrorCallback callback);
// Finalizes the merger. This does _not_ automatically close the database
// connection.
void finalize();
};

View File

@ -34,6 +34,19 @@ const int32_t nsNavBookmarks::kGetChildrenIndex_SyncStatus = 22;
using namespace mozilla::places;
extern "C" {
// Returns the total number of Sync changes recorded since Places startup for
// all bookmarks. This function uses C linkage because it's called from the
// Rust synced bookmarks mirror, on the storage thread. Using `get_service` to
// access the bookmarks service from Rust trips a thread-safety assertion, so
// we can't use `nsNavBookmarks::GetTotalSyncChanges`.
int64_t NS_NavBookmarksTotalSyncChanges() {
return nsNavBookmarks::sTotalSyncChanges;
}
} // extern "C"
PLACES_FACTORY_SINGLETON_IMPLEMENTATION(nsNavBookmarks, gBookmarksService)
#define BOOKMARKS_ANNO_PREFIX "bookmarks/"

View File

@ -30,7 +30,7 @@ const MobileBookmarksTitle = "mobile";
function run_test() {
let bufLog = Log.repository.getLogger("Sync.Engine.Bookmarks.Mirror");
bufLog.level = Log.Level.Error;
bufLog.level = Log.Level.All;
let sqliteLog = Log.repository.getLogger("Sqlite");
sqliteLog.level = Log.Level.Error;

View File

@ -288,9 +288,9 @@ add_task(async function test_missing_children() {
let idsToUpload = inspectChangeRecords(changesToUpload);
deepEqual(idsToUpload, {
updated: [],
updated: ["menu"],
deleted: [],
}, "Should not reupload menu with missing children (B D E)");
}, "Should reupload menu without missing children (B D E)");
await assertLocalTree(PlacesUtils.bookmarks.menuGuid, {
guid: PlacesUtils.bookmarks.menuGuid,
type: PlacesUtils.bookmarks.TYPE_FOLDER,
@ -332,26 +332,26 @@ add_task(async function test_missing_children() {
let idsToUpload = inspectChangeRecords(changesToUpload);
deepEqual(idsToUpload, {
updated: [],
updated: ["bookmarkBBBB", "bookmarkEEEE", "menu"],
deleted: [],
}, "Should not reupload menu with missing child D");
}, "Should reupload menu and restored children");
await assertLocalTree(PlacesUtils.bookmarks.menuGuid, {
guid: PlacesUtils.bookmarks.menuGuid,
type: PlacesUtils.bookmarks.TYPE_FOLDER,
index: 0,
title: BookmarksMenuTitle,
children: [{
guid: "bookmarkBBBB",
type: PlacesUtils.bookmarks.TYPE_BOOKMARK,
index: 0,
title: "B",
url: "http://example.com/b",
}, {
guid: "bookmarkCCCC",
type: PlacesUtils.bookmarks.TYPE_BOOKMARK,
index: 1,
index: 0,
title: "C",
url: "http://example.com/c",
}, {
guid: "bookmarkBBBB",
type: PlacesUtils.bookmarks.TYPE_BOOKMARK,
index: 1,
title: "B",
url: "http://example.com/b",
}, {
guid: "bookmarkEEEE",
type: PlacesUtils.bookmarks.TYPE_BOOKMARK,
@ -359,12 +359,13 @@ add_task(async function test_missing_children() {
title: "E",
url: "http://example.com/e",
}],
}, "Menu children should be (B C E)");
}, "Menu children should be (C B E)");
deepEqual(await buf.fetchRemoteOrphans(), {
missingChildren: ["bookmarkDDDD"],
missingParents: [],
missingChildren: [],
missingParents: ["bookmarkBBBB", "bookmarkEEEE"],
parentsWithGaps: [],
}, "Should report (D) as missing");
}, "Should report missing parents for (B E)");
await storeChangesInMirror(buf, changesToUpload);
}
info("Add D to remote");
@ -381,45 +382,46 @@ add_task(async function test_missing_children() {
let idsToUpload = inspectChangeRecords(changesToUpload);
deepEqual(idsToUpload, {
updated: [],
updated: ["bookmarkDDDD", "menu"],
deleted: [],
}, "Should not reupload complete menu");
}, "Should reupload complete menu");
await assertLocalTree(PlacesUtils.bookmarks.menuGuid, {
guid: PlacesUtils.bookmarks.menuGuid,
type: PlacesUtils.bookmarks.TYPE_FOLDER,
index: 0,
title: BookmarksMenuTitle,
children: [{
guid: "bookmarkBBBB",
type: PlacesUtils.bookmarks.TYPE_BOOKMARK,
index: 0,
title: "B",
url: "http://example.com/b",
}, {
guid: "bookmarkCCCC",
type: PlacesUtils.bookmarks.TYPE_BOOKMARK,
index: 1,
index: 0,
title: "C",
url: "http://example.com/c",
}, {
guid: "bookmarkDDDD",
guid: "bookmarkBBBB",
type: PlacesUtils.bookmarks.TYPE_BOOKMARK,
index: 2,
title: "D",
url: "http://example.com/d",
index: 1,
title: "B",
url: "http://example.com/b",
}, {
guid: "bookmarkEEEE",
type: PlacesUtils.bookmarks.TYPE_BOOKMARK,
index: 3,
index: 2,
title: "E",
url: "http://example.com/e",
}, {
guid: "bookmarkDDDD",
type: PlacesUtils.bookmarks.TYPE_BOOKMARK,
index: 3,
title: "D",
url: "http://example.com/d",
}],
}, "Menu children should be (B C D E)");
}, "Menu children should be (C B E D)");
deepEqual(await buf.fetchRemoteOrphans(), {
missingChildren: [],
missingParents: [],
missingParents: ["bookmarkDDDD"],
parentsWithGaps: [],
}, "Should not report any missing children");
}, "Should report missing parent for D");
await storeChangesInMirror(buf, changesToUpload);
}
await buf.finalize();
@ -463,9 +465,10 @@ add_task(async function test_new_orphan_without_local_parent() {
deepEqual(await buf.fetchUnmergedGuids(), [], "Should merge all items");
let idsToUpload = inspectChangeRecords(changesToUpload);
deepEqual(idsToUpload, {
updated: [],
updated: ["bookmarkBBBB", "bookmarkCCCC", "bookmarkDDDD", "unfiled"],
deleted: [],
}, "Should not reupload orphans (B C D)");
}, "Should reupload orphans (B C D)");
await storeChangesInMirror(buf, changesToUpload);
}
await assertLocalTree(PlacesUtils.bookmarks.unfiledGuid, {
@ -511,9 +514,10 @@ add_task(async function test_new_orphan_without_local_parent() {
deepEqual(await buf.fetchUnmergedGuids(), [], "Should merge all items");
let idsToUpload = inspectChangeRecords(changesToUpload);
deepEqual(idsToUpload, {
updated: [],
updated: ["bookmarkBBBB", "bookmarkCCCC", "bookmarkDDDD", "folderAAAAAA", "unfiled"],
deleted: [],
}, "Should not reupload orphan A");
}, "Should reupload A and its children");
await storeChangesInMirror(buf, changesToUpload);
}
await assertLocalTree(PlacesUtils.bookmarks.unfiledGuid, {
@ -563,17 +567,18 @@ add_task(async function test_new_orphan_without_local_parent() {
deepEqual(await buf.fetchUnmergedGuids(), [], "Should merge all items");
let idsToUpload = inspectChangeRecords(changesToUpload);
deepEqual(idsToUpload, {
updated: [],
updated: ["folderAAAAAA", "folderEEEEEE", "menu"],
deleted: [],
}, "Should not reupload orphan E");
}, "...");
await storeChangesInMirror(buf, changesToUpload);
}
// E is still in unfiled because we don't have a record for the menu.
await assertLocalTree(PlacesUtils.bookmarks.unfiledGuid, {
guid: PlacesUtils.bookmarks.unfiledGuid,
await assertLocalTree(PlacesUtils.bookmarks.menuGuid, {
guid: PlacesUtils.bookmarks.menuGuid,
type: PlacesUtils.bookmarks.TYPE_FOLDER,
index: 3,
title: UnfiledBookmarksTitle,
index: 0,
title: BookmarksMenuTitle,
children: [{
guid: "folderEEEEEE",
type: PlacesUtils.bookmarks.TYPE_FOLDER,
@ -605,7 +610,7 @@ add_task(async function test_new_orphan_without_local_parent() {
}],
}],
}],
}, "Should move A into E");
}, "Should move Menu > E > A");
info("Add Menu > E to remote");
await storeRecords(buf, [{
@ -806,9 +811,9 @@ add_task(async function test_move_into_orphaned() {
let idsToUpload = inspectChangeRecords(changesToUpload);
deepEqual(idsToUpload, {
updated: ["bookmarkIIII", "folderCCCCCC", "folderEEEEEE"],
updated: ["bookmarkAAAA", "bookmarkIIII", "folderCCCCCC", "folderEEEEEE", "menu"],
deleted: ["bookmarkDDDD"],
}, "Should upload records for (I C E); tombstone for D");
}, "Should upload records for (A I C E); tombstone for D");
await assertLocalTree(PlacesUtils.bookmarks.rootGuid, {
guid: PlacesUtils.bookmarks.rootGuid,
@ -962,9 +967,10 @@ add_task(async function test_new_orphan_with_local_parent() {
deepEqual(await buf.fetchUnmergedGuids(), [], "Should merge all items");
let idsToUpload = inspectChangeRecords(changesToUpload);
deepEqual(idsToUpload, {
updated: [],
updated: ["bookmarkCCCC", "bookmarkDDDD", "folderAAAAAA"],
deleted: [],
}, "Should not reupload orphans (C D)");
}, "Should reupload orphans (C D) and folder A");
await storeChangesInMirror(buf, changesToUpload);
}
await assertLocalTree(PlacesUtils.bookmarks.rootGuid, {
@ -994,6 +1000,18 @@ add_task(async function test_new_orphan_with_local_parent() {
index: 1,
title: "E",
url: "http://example.com/e",
}, {
guid: "bookmarkCCCC",
type: PlacesUtils.bookmarks.TYPE_BOOKMARK,
index: 2,
title: "C (remote)",
url: "http://example.com/c-remote",
}, {
guid: "bookmarkDDDD",
type: PlacesUtils.bookmarks.TYPE_BOOKMARK,
index: 3,
title: "D (remote)",
url: "http://example.com/d-remote",
}],
}],
}, {
@ -1006,26 +1024,13 @@ add_task(async function test_new_orphan_with_local_parent() {
type: PlacesUtils.bookmarks.TYPE_FOLDER,
index: 3,
title: UnfiledBookmarksTitle,
children: [{
guid: "bookmarkCCCC",
type: PlacesUtils.bookmarks.TYPE_BOOKMARK,
index: 0,
title: "C (remote)",
url: "http://example.com/c-remote",
}, {
guid: "bookmarkDDDD",
type: PlacesUtils.bookmarks.TYPE_BOOKMARK,
index: 1,
title: "D (remote)",
url: "http://example.com/d-remote",
}],
}, {
guid: PlacesUtils.bookmarks.mobileGuid,
type: PlacesUtils.bookmarks.TYPE_FOLDER,
index: 4,
title: MobileBookmarksTitle,
}],
}, "Should move (C D) to unfiled");
}, "Should move (C D) to end of A");
// The partial uploader returns and uploads A.
info("Add A to remote");
@ -1123,10 +1128,7 @@ add_task(async function test_tombstone_as_child() {
let changesToUpload = await buf.apply();
let idsToUpload = inspectChangeRecords(changesToUpload);
deepEqual(idsToUpload.deleted, [], "no new tombstones were created.");
// Note that we do not attempt to re-upload the folder with the correct
// list of children - but we might take some action in the future around
// this.
deepEqual(idsToUpload.updated, [], "parent is not re-uploaded");
deepEqual(idsToUpload.updated, ["folderAAAAAA"], "parent is re-uploaded");
await assertLocalTree(PlacesUtils.bookmarks.rootGuid, {
guid: PlacesUtils.bookmarks.rootGuid,
@ -1670,8 +1672,7 @@ add_task(async function test_partial_cycle() {
await PlacesTestUtils.markBookmarksAsSynced();
// Try to create a cycle: move A into B, and B into the menu, but don't upload
// a record for the menu. B is still a child of A locally. Since we ignore the
// `parentid`, we'll move (B A) into unfiled.
// a record for the menu.
info("Make remote changes: A > C");
await storeRecords(buf, [{
id: "folderAAAAAA",
@ -1687,60 +1688,8 @@ add_task(async function test_partial_cycle() {
children: ["folderAAAAAA"],
}]);
info("Apply remote");
let changesToUpload = await buf.apply();
deepEqual(await buf.fetchUnmergedGuids(), [], "Should merge all items");
let idsToUpload = inspectChangeRecords(changesToUpload);
deepEqual(idsToUpload, { updated: [], deleted: [] },
"Should not mark any local items for upload");
await assertLocalTree(PlacesUtils.bookmarks.rootGuid, {
guid: PlacesUtils.bookmarks.rootGuid,
type: PlacesUtils.bookmarks.TYPE_FOLDER,
index: 0,
title: "",
children: [{
guid: PlacesUtils.bookmarks.menuGuid,
type: PlacesUtils.bookmarks.TYPE_FOLDER,
index: 0,
title: BookmarksMenuTitle,
}, {
guid: PlacesUtils.bookmarks.toolbarGuid,
type: PlacesUtils.bookmarks.TYPE_FOLDER,
index: 1,
title: BookmarksToolbarTitle,
}, {
guid: PlacesUtils.bookmarks.unfiledGuid,
type: PlacesUtils.bookmarks.TYPE_FOLDER,
index: 3,
title: UnfiledBookmarksTitle,
children: [{
guid: "folderBBBBBB",
type: PlacesUtils.bookmarks.TYPE_FOLDER,
index: 0,
title: "B (remote)",
children: [{
guid: "folderAAAAAA",
type: PlacesUtils.bookmarks.TYPE_FOLDER,
index: 0,
title: "A (remote)",
children: [{
guid: "bookmarkCCCC",
type: PlacesUtils.bookmarks.TYPE_BOOKMARK,
index: 0,
title: "C",
url: "http://example.com/c",
}],
}],
}],
}, {
guid: PlacesUtils.bookmarks.mobileGuid,
type: PlacesUtils.bookmarks.TYPE_FOLDER,
index: 4,
title: MobileBookmarksTitle,
}],
}, "Should move A and B to unfiled");
await Assert.rejects(buf.apply(), /Item folderBBBBBB can't contain itself/,
"Should abort merge if remote tree parents form `parentid` cycle");
await buf.finalize();
await PlacesUtils.bookmarks.eraseEverything();
@ -1789,43 +1738,8 @@ add_task(async function test_complete_cycle() {
children: ["folderAAAAAA"],
}]);
info("Apply remote");
let changesToUpload = await buf.apply();
deepEqual((await buf.fetchUnmergedGuids()).sort(), ["folderAAAAAA",
"folderBBBBBB", "folderCCCCCC", "folderDDDDDD"],
"Should leave items in circular subtree unmerged");
let idsToUpload = inspectChangeRecords(changesToUpload);
deepEqual(idsToUpload, { updated: [], deleted: [] },
"Should not mark any local items for upload");
await assertLocalTree(PlacesUtils.bookmarks.rootGuid, {
guid: PlacesUtils.bookmarks.rootGuid,
type: PlacesUtils.bookmarks.TYPE_FOLDER,
index: 0,
title: "",
children: [{
guid: PlacesUtils.bookmarks.menuGuid,
type: PlacesUtils.bookmarks.TYPE_FOLDER,
index: 0,
title: BookmarksMenuTitle,
}, {
guid: PlacesUtils.bookmarks.toolbarGuid,
type: PlacesUtils.bookmarks.TYPE_FOLDER,
index: 1,
title: BookmarksToolbarTitle,
}, {
guid: PlacesUtils.bookmarks.unfiledGuid,
type: PlacesUtils.bookmarks.TYPE_FOLDER,
index: 3,
title: UnfiledBookmarksTitle,
}, {
guid: PlacesUtils.bookmarks.mobileGuid,
type: PlacesUtils.bookmarks.TYPE_FOLDER,
index: 4,
title: MobileBookmarksTitle,
}],
}, "Should not be confused into creating a cycle");
await Assert.rejects(buf.apply(), /Item folderAAAAAA can't contain itself/,
"Should abort merge if remote tree parents form cycle through `children`");
await buf.finalize();
await PlacesUtils.bookmarks.eraseEverything();
@ -1833,6 +1747,8 @@ add_task(async function test_complete_cycle() {
});
add_task(async function test_invalid_guid() {
let now = new Date();
let buf = await openMirror("invalid_guid");
info("Set up empty mirror");
@ -1851,12 +1767,12 @@ add_task(async function test_invalid_guid() {
title: "A",
bmkUri: "http://example.com/a",
}, {
// Should be ignored.
id: "bad!guid~",
parentid: "menu",
type: "bookmark",
title: "Bad GUID",
bmkUri: "http://example.com/bad-guid",
title: "C",
bmkUri: "http://example.com/c",
dateAdded: now.getTime(),
}, {
id: "bookmarkBBBB",
parentid: "menu",
@ -1868,13 +1784,88 @@ add_task(async function test_invalid_guid() {
let changesToUpload = await buf.apply();
deepEqual(await buf.fetchUnmergedGuids(), [], "Should merge all items");
deepEqual(changesToUpload, {}, "Should not reupload menu with gaps");
let datesAdded = await promiseManyDatesAdded([PlacesUtils.bookmarks.menuGuid]);
let recordIdsToUpload = Object.keys(changesToUpload);
let newGuid = recordIdsToUpload.find(recordId =>
!["bad!guid~", "menu"].includes(recordId));
equal(recordIdsToUpload.length, 3,
"Should reupload menu, C, and tombstone for bad GUID");
deepEqual(changesToUpload["bad!guid~"], {
tombstone: true,
counter: 1,
synced: false,
cleartext: {
id: "bad!guid~",
deleted: true,
},
}, "Should upload tombstone for C's invalid GUID");
deepEqual(changesToUpload[newGuid], {
tombstone: false,
counter: 1,
synced: false,
cleartext: {
id: newGuid,
type: "bookmark",
parentid: "menu",
hasDupe: true,
parentName: BookmarksMenuTitle,
dateAdded: now.getTime(),
bmkUri: "http://example.com/c",
title: "C",
},
}, "Should reupload C with new GUID");
deepEqual(changesToUpload.menu, {
tombstone: false,
counter: 1,
synced: false,
cleartext: {
id: "menu",
type: "folder",
parentid: "places",
hasDupe: true,
parentName: "",
dateAdded: datesAdded.get(PlacesUtils.bookmarks.menuGuid),
title: BookmarksMenuTitle,
children: ["bookmarkAAAA", newGuid, "bookmarkBBBB"],
},
}, "Should reupload menu with new child GUID for C");
deepEqual(await buf.fetchRemoteOrphans(), {
missingChildren: [],
missingParents: [],
parentsWithGaps: [PlacesUtils.bookmarks.menuGuid],
}, "Should report gaps in menu");
parentsWithGaps: [],
}, "Should not report problems");
await assertLocalTree(PlacesUtils.bookmarks.menuGuid, {
guid: PlacesUtils.bookmarks.menuGuid,
type: PlacesUtils.bookmarks.TYPE_FOLDER,
index: 0,
title: BookmarksMenuTitle,
children: [{
guid: "bookmarkAAAA",
type: PlacesUtils.bookmarks.TYPE_BOOKMARK,
index: 0,
title: "A",
url: "http://example.com/a",
}, {
guid: newGuid,
type: PlacesUtils.bookmarks.TYPE_BOOKMARK,
index: 1,
title: "C",
url: "http://example.com/c",
}, {
guid: "bookmarkBBBB",
type: PlacesUtils.bookmarks.TYPE_BOOKMARK,
index: 2,
title: "B",
url: "http://example.com/b",
}],
});
await buf.finalize();
await PlacesUtils.bookmarks.eraseEverything();

View File

@ -133,7 +133,7 @@ add_task(async function test_queries() {
});
// Bug 632287.
add_task(async function test_mismatched_but_compatible_folder_types() {
add_task(async function test_mismatched_folder_types() {
let buf = await openMirror("mismatched_types");
info("Set up mirror");
@ -261,12 +261,12 @@ add_task(async function test_incompatible_types() {
let recordTelemetryEvent = (object, method, value, extra) => {
// expecting to see an error for kind mismatches.
if (method == "apply" && value == "error" &&
extra && extra.why == "Can't merge different item kinds") {
extra && extra.why == "Can't merge local kind Bookmark and remote kind Folder") {
sawMismatchError = true;
}
};
try {
let buf = await openMirror("partial_queries", {recordTelemetryEvent});
let buf = await openMirror("incompatible_types", {recordTelemetryEvent});
await PlacesUtils.bookmarks.insertTree({
guid: PlacesUtils.bookmarks.menuGuid,
@ -297,7 +297,7 @@ add_task(async function test_incompatible_types() {
}], { needsMerge: true });
await PlacesTestUtils.markBookmarksAsSynced();
await Assert.rejects(buf.apply(), /Can't merge different item kinds/);
await Assert.rejects(buf.apply(), /Can't merge local kind Bookmark and remote kind Folder/);
Assert.ok(sawMismatchError, "saw expected mismatch event");
} finally {
await PlacesUtils.bookmarks.eraseEverything();

View File

@ -1169,7 +1169,40 @@ add_task(async function test_rewrite_tag_queries() {
let changesToUpload = await buf.apply();
deepEqual(await buf.fetchUnmergedGuids(), [], "Should merge all items");
deepEqual(changesToUpload, {}, "Should not reupload any local records");
deepEqual(changesToUpload, {
queryBBBBBBB: {
tombstone: false,
counter: 1,
synced: false,
cleartext: {
id: "queryBBBBBBB",
type: "query",
parentid: "toolbar",
hasDupe: true,
parentName: BookmarksToolbarTitle,
dateAdded: undefined,
bmkUri: "place:tag=taggy",
title: "Tagged stuff",
folderName: "taggy",
},
},
queryCCCCCCC: {
tombstone: false,
counter: 1,
synced: false,
cleartext: {
id: "queryCCCCCCC",
type: "query",
parentid: "toolbar",
hasDupe: true,
parentName: BookmarksToolbarTitle,
dateAdded: undefined,
bmkUri: "place:tag=kitty",
title: "Cats",
folderName: "kitty",
},
},
}, "Should reupload (E C) with rewritten URLs");
let bmWithTaggy = await PlacesUtils.bookmarks.fetch({tags: ["taggy"]});
equal(bmWithTaggy.url.href, "http://example.com/e",
@ -1384,11 +1417,11 @@ add_task(async function test_duplicate_url_rows() {
syncStatus: PlacesUtils.bookmarks.SYNC_STATUS.NEW });
await buf.db.executeCached(`
INSERT INTO items(guid, needsMerge, kind, title, urlId)
VALUES(:guid, 1, :kind, :remoteTitle,
INSERT INTO items(guid, parentGuid, needsMerge, kind, title, urlId)
VALUES(:guid, :parentGuid, 1, :kind, :remoteTitle,
(SELECT id FROM urls WHERE guid = :placeGuid))`,
{ guid, placeGuid, kind: SyncedBookmarksMirror.KIND.BOOKMARK,
remoteTitle });
{ guid, parentGuid, placeGuid,
kind: Ci.mozISyncedBookmarksMerger.KIND_BOOKMARK, remoteTitle });
await buf.db.executeCached(`
INSERT INTO structure(guid, parentGuid, position)

View File

@ -312,7 +312,7 @@ const Preferences = window.Preferences = (function() {
if (Preferences.type == "child" && window.opener &&
window.opener.Preferences &&
Services.scriptSecurityManager.isSystemPrincipal(window.opener.document.nodePrincipal)) {
window.opener.document.nodePrincipal.isSystemPrincipal) {
// Try to find the preference in the parent window.
const preference = window.opener.Preferences.get(this.name);

View File

@ -14,6 +14,7 @@ cubeb_pulse_rust = ["gkrust-shared/cubeb_pulse_rust"]
gecko_debug = ["gkrust-shared/gecko_debug"]
simd-accel = ["gkrust-shared/simd-accel"]
moz_memory = ["gkrust-shared/moz_memory"]
moz_places = ["gkrust-shared/moz_places"]
spidermonkey_rust = ["gkrust-shared/spidermonkey_rust"]
cranelift_x86 = ["gkrust-shared/cranelift_x86"]
cranelift_arm32 = ["gkrust-shared/cranelift_arm32"]

View File

@ -14,6 +14,7 @@ cubeb_pulse_rust = ["gkrust-shared/cubeb_pulse_rust"]
gecko_debug = ["gkrust-shared/gecko_debug"]
simd-accel = ["gkrust-shared/simd-accel"]
moz_memory = ["gkrust-shared/moz_memory"]
moz_places = ["gkrust-shared/moz_places"]
spidermonkey_rust = ["gkrust-shared/spidermonkey_rust"]
cranelift_x86 = ["gkrust-shared/cranelift_x86"]
cranelift_arm32 = ["gkrust-shared/cranelift_arm32"]

View File

@ -26,6 +26,9 @@ if (CONFIG['OS_ARCH'] == 'Linux' and CONFIG['OS_TARGET'] != 'Android') or CONFIG
if CONFIG['MOZ_MEMORY']:
gkrust_features += ['moz_memory']
if CONFIG['MOZ_PLACES']:
gkrust_features += ['moz_places']
if CONFIG['ENABLE_WASM_CRANELIFT']:
gkrust_features += ['spidermonkey_rust']
if CONFIG['JS_CODEGEN_X86'] or CONFIG['JS_CODEGEN_X64']:

View File

@ -34,6 +34,8 @@ jsrust_shared = { path = "../../../../js/src/rust/shared", optional = true }
arrayvec = "0.4"
cert_storage = { path = "../../../../security/manager/ssl/cert_storage" }
bitsdownload = { path = "../../../components/bitsdownload", optional = true }
storage = { path = "../../../../storage/rust" }
bookmark_sync = { path = "../../../components/places/bookmark_sync", optional = true }
[build-dependencies]
rustc_version = "0.2"
@ -48,6 +50,7 @@ cubeb_pulse_rust = ["cubeb-sys", "cubeb-pulse"]
gecko_debug = ["geckoservo/gecko_debug", "nsstring/gecko_debug"]
simd-accel = ["encoding_c/simd-accel", "encoding_glue/simd-accel"]
moz_memory = ["mp4parse_capi/mp4parse_fallible"]
moz_places = ["bookmark_sync"]
spidermonkey_rust = ["jsrust_shared"]
cranelift_x86 = ["jsrust_shared/cranelift_x86"]
cranelift_arm32 = ["jsrust_shared/cranelift_arm32"]

Some files were not shown because too many files have changed in this diff Show More