Bug 1577788 - Fix unique constraint errors when syncing bookmarks with tags. r=markh

Our `INSTEAD OF INSERT` trigger for local tags reused existing tag
folders if they already existed, but not existing tag entries. This
caused us to insert duplicate tag entries for items whose tags
didn't change, which, in turn, threw unique constraint violations
when we tried to insert rows for tag entries that already existed
into `itemsAdded`. This commit gives tag entries the same treatment as
tag folders.

This commit also improves debug logging during application, so we
can pinpoint errors like this better in the future.

Differential Revision: https://phabricator.services.mozilla.com/D44769

--HG--
extra : moz-landing-system : lando
This commit is contained in:
Lina Cambridge 2019-09-08 23:55:58 +00:00
parent 908e629971
commit 1046b50b0b
4 changed files with 405 additions and 31 deletions

View File

@ -1809,7 +1809,8 @@ async function initializeTempMirrorEntities(db) {
"CHANGES()" returns the number of rows affected by the INSERT above:
1 if we created the folder, or 0 if the folder already existed. */
INSERT INTO itemsAdded(guid, isTagging)
SELECT b.guid, 1 FROM moz_bookmarks b
SELECT b.guid, 1
FROM moz_bookmarks b
JOIN moz_bookmarks p ON p.id = b.parent
WHERE CHANGES() > 0 AND
b.title = NEW.tag AND
@ -1819,7 +1820,15 @@ async function initializeTempMirrorEntities(db) {
ID creates a tag folder without tagging the URL. */
INSERT OR IGNORE INTO moz_bookmarks(guid, parent, position, type, fk,
dateAdded, lastModified)
SELECT GENERATE_GUID(),
SELECT IFNULL((SELECT b.guid FROM moz_bookmarks b
JOIN moz_bookmarks p ON p.id = b.parent
WHERE b.fk = NEW.placeId AND
p.title = NEW.tag AND
p.parent = (SELECT id FROM moz_bookmarks
WHERE guid = '${
PlacesUtils.bookmarks.tagsGuid
}')),
GENERATE_GUID()),
(SELECT b.id FROM moz_bookmarks b
JOIN moz_bookmarks p ON p.id = b.parent
WHERE p.guid = '${PlacesUtils.bookmarks.tagsGuid}' AND
@ -1838,9 +1847,11 @@ async function initializeTempMirrorEntities(db) {
/* Record an item added notification for the tag entry. */
INSERT INTO itemsAdded(guid, isTagging)
SELECT b.guid, 1 FROM moz_bookmarks b
SELECT b.guid, 1
FROM moz_bookmarks b
JOIN moz_bookmarks p ON p.id = b.parent
WHERE b.fk = NEW.placeId AND
WHERE CHANGES() > 0 AND
b.fk = NEW.placeId AND
p.title = NEW.tag AND
p.parent = (SELECT id FROM moz_bookmarks
WHERE guid = '${PlacesUtils.bookmarks.tagsGuid}');

View File

@ -428,13 +428,17 @@ impl<'s> dogear::Store for Store<'s> {
return Err(Error::MergeConflict);
}
self.controller.err_if_aborted()?;
debug!(self.driver, "Updating local items in Places");
update_local_items_in_places(&tx, &self.driver, &ops, deletions)?;
update_local_items_in_places(&tx, &self.driver, &self.controller, &ops, deletions)?;
self.controller.err_if_aborted()?;
debug!(self.driver, "Staging items to upload");
stage_items_to_upload(&tx, &ops.upload, &self.weak_uploads)?;
stage_items_to_upload(
&tx,
&self.driver,
&self.controller,
&ops.upload,
&self.weak_uploads,
)?;
cleanup(&tx)?;
tx.commit()?;
@ -458,10 +462,15 @@ impl<'s> dogear::Store for Store<'s> {
fn update_local_items_in_places<'t>(
db: &Conn,
driver: &Driver,
controller: &AbortController,
ops: &CompletionOps<'t>,
deletions: Vec<Deletion>,
) -> Result<()> {
// Clean up any observer notifications left over from the last sync.
debug!(
driver,
"Cleaning up observer notifications left from last sync"
);
controller.err_if_aborted()?;
db.exec(
"DELETE FROM itemsAdded;
DELETE FROM guidsChanged;
@ -492,6 +501,7 @@ fn update_local_items_in_places<'t>(
repeat_sql_vars(chunk.len()),
))?;
for (index, op) in chunk.iter().enumerate() {
controller.err_if_aborted()?;
let remote_guid = nsString::from(&*op.remote_node().guid);
statement.bind_by_index(index as u32, remote_guid)?;
}
@ -500,6 +510,7 @@ fn update_local_items_in_places<'t>(
// Trigger frecency updates for all new origins.
debug!(driver, "Updating origins for new URLs");
controller.err_if_aborted()?;
db.exec("DELETE FROM moz_updateoriginsinsert_temp")?;
// Build a table of new and updated items.
@ -556,6 +567,8 @@ fn update_local_items_in_places<'t>(
now,
))?;
for (index, op) in chunk.iter().enumerate() {
controller.err_if_aborted()?;
let offset = (index * 3) as u32;
// In most cases, the merged and remote GUIDs are the same for new
@ -605,6 +618,8 @@ fn update_local_items_in_places<'t>(
})
))?;
for (index, op) in chunk.iter().enumerate() {
controller.err_if_aborted()?;
let offset = (index * 2) as u32;
let local_guid = nsString::from(&*op.local_node().guid);
@ -632,6 +647,8 @@ fn update_local_items_in_places<'t>(
})
))?;
for (index, op) in chunk.iter().enumerate() {
controller.err_if_aborted()?;
let offset = (index * 2) as u32;
let merged_guid = nsString::from(&*op.merged_node.guid);
@ -663,6 +680,7 @@ fn update_local_items_in_places<'t>(
})
))?;
for (index, d) in chunk.iter().enumerate() {
controller.err_if_aborted()?;
statement.bind_by_index(index as u32, nsString::from(d.guid.as_str()))?;
}
statement.execute()?;
@ -680,21 +698,24 @@ fn update_local_items_in_places<'t>(
)?;
debug!(driver, "Removing local items");
remove_local_items(&db)?;
remove_local_items(db, driver, controller)?;
// Fires the `changeGuids` trigger.
debug!(driver, "Changing GUIDs");
controller.err_if_aborted()?;
db.exec("DELETE FROM changeGuidOps")?;
debug!(driver, "Applying remote items");
apply_remote_items(&db)?;
apply_remote_items(db, driver, controller)?;
// Trigger frecency updates for all affected origins.
debug!(driver, "Updating origins for changed URLs");
controller.err_if_aborted()?;
db.exec("DELETE FROM moz_updateoriginsupdate_temp")?;
// Fires the `applyNewLocalStructure` trigger.
debug!(driver, "Applying new local structure");
controller.err_if_aborted()?;
db.exec("DELETE FROM applyNewLocalStructureOps")?;
// Reset the change counter for items that we don't need to upload.
@ -707,6 +728,7 @@ fn update_local_items_in_places<'t>(
repeat_sql_vars(chunk.len()),
))?;
for (index, op) in chunk.iter().enumerate() {
controller.err_if_aborted()?;
statement.bind_by_index(index as u32, nsString::from(&*op.merged_node.guid))?;
}
statement.execute()?;
@ -722,6 +744,7 @@ fn update_local_items_in_places<'t>(
repeat_sql_vars(chunk.len()),
))?;
for (index, op) in chunk.iter().enumerate() {
controller.err_if_aborted()?;
statement.bind_by_index(index as u32, nsString::from(&*op.merged_node.guid))?;
}
statement.execute()?;
@ -737,6 +760,7 @@ fn update_local_items_in_places<'t>(
repeat_sql_vars(chunk.len()),
))?;
for (index, op) in chunk.iter().enumerate() {
controller.err_if_aborted()?;
statement.bind_by_index(index as u32, nsString::from(op.guid().as_str()))?;
}
statement.execute()?;
@ -746,8 +770,9 @@ fn update_local_items_in_places<'t>(
}
/// Upserts all new and updated items from the `itemsToApply` table into Places.
fn apply_remote_items(db: &Conn) -> Result<()> {
// Record item added notifications for new items.
fn apply_remote_items(db: &Conn, driver: &Driver, controller: &AbortController) -> Result<()> {
debug!(driver, "Recording item added notifications for new items");
controller.err_if_aborted()?;
db.exec(
"INSERT INTO itemsAdded(guid, keywordChanged, level)
SELECT n.mergedGuid, n.newKeyword NOT NULL OR
@ -759,7 +784,11 @@ fn apply_remote_items(db: &Conn) -> Result<()> {
WHERE n.localId IS NULL",
)?;
// Record item changed notifications for existing items.
debug!(
driver,
"Recording item changed notifications for existing items"
);
controller.err_if_aborted()?;
db.exec(
"INSERT INTO itemsChanged(itemId, oldTitle, oldPlaceId, keywordChanged,
level)
@ -777,6 +806,8 @@ fn apply_remote_items(db: &Conn) -> Result<()> {
// Remove all keywords from old and new URLs, and remove new keywords from
// all existing URLs. The `NOT NULL` conditions are important; they ensure
// that SQLite uses our partial indexes, instead of a table scan.
debug!(driver, "Removing old keywords");
controller.err_if_aborted()?;
db.exec(
"DELETE FROM moz_keywords
WHERE place_id IN (SELECT oldPlaceId FROM itemsToApply
@ -788,7 +819,8 @@ fn apply_remote_items(db: &Conn) -> Result<()> {
",
)?;
// Remove existing tags.
debug!(driver, "Removing old tags");
controller.err_if_aborted()?;
db.exec(
"DELETE FROM localTags
WHERE placeId IN (SELECT oldPlaceId FROM itemsToApply
@ -797,10 +829,12 @@ fn apply_remote_items(db: &Conn) -> Result<()> {
WHERE newPlaceId NOT NULL)",
)?;
// Insert new items, using -1 as a placeholder for the parent ID and
// position. We'll update these later, when we apply the new local
// Insert and update items, using -1 for new items' parent IDs and
// positions. We'll update these later, when we apply the new local
// structure. This is a full table scan on `itemsToApply`. The no-op
// `WHERE` clause is necessary to avoid a parsing ambiguity.
debug!(driver, "Upserting new items");
controller.err_if_aborted()?;
db.exec(format!(
"INSERT INTO moz_bookmarks(id, guid, parent, position, type, fk, title,
dateAdded,
@ -835,6 +869,8 @@ fn apply_remote_items(db: &Conn) -> Result<()> {
// would scan `itemsToApply` twice. The `oldPlaceId <> newPlaceId` and
// `newPlaceId <> oldPlaceId` checks exclude items where the URL didn't
// change; we don't need to recalculate their frecencies.
debug!(driver, "Flagging frecencies for recalculation");
controller.err_if_aborted()?;
db.exec(
"UPDATE moz_places SET
frecency = -frecency
@ -849,7 +885,8 @@ fn apply_remote_items(db: &Conn) -> Result<()> {
)",
)?;
// Insert new keywords for new URLs.
debug!(driver, "Inserting new keywords for new URLs");
controller.err_if_aborted()?;
db.exec(
"INSERT OR IGNORE INTO moz_keywords(keyword, place_id, post_data)
SELECT newKeyword, newPlaceId, ''
@ -857,7 +894,8 @@ fn apply_remote_items(db: &Conn) -> Result<()> {
WHERE newKeyword NOT NULL",
)?;
// Insert new tags for new URLs.
debug!(driver, "Inserting new tags for new URLs");
controller.err_if_aborted()?;
db.exec(
"INSERT INTO localTags(tag, placeId, lastModifiedMicroseconds)
SELECT t.tag, n.newPlaceId, n.lastModifiedMicroseconds
@ -870,8 +908,9 @@ fn apply_remote_items(db: &Conn) -> Result<()> {
/// Removes items that are deleted on one or both sides from Places, and inserts
/// new tombstones for non-syncable items to delete remotely.
fn remove_local_items(db: &Conn) -> Result<()> {
// Record observer notifications for removed items.
fn remove_local_items(db: &Conn, driver: &Driver, controller: &AbortController) -> Result<()> {
debug!(driver, "Recording observer notifications for removed items");
controller.err_if_aborted()?;
db.exec(
"INSERT INTO itemsRemoved(itemId, parentId, position, type, placeId,
guid, parentGuid, level)
@ -882,7 +921,8 @@ fn remove_local_items(db: &Conn) -> Result<()> {
JOIN moz_bookmarks p ON p.id = b.parent",
)?;
// Flag URL frecency for recalculation.
debug!(driver, "Flag URL frecencies for recalculation");
controller.err_if_aborted()?;
db.exec(
"UPDATE moz_places SET
frecency = -frecency
@ -891,24 +931,31 @@ fn remove_local_items(db: &Conn) -> Result<()> {
frecency > 0",
)?;
// Remove annos for the deleted items. This can be removed in bug 1460577.
// This can be removed in bug 1460577.
debug!(driver, "Removing annos for deleted items");
controller.err_if_aborted()?;
db.exec(
"DELETE FROM moz_items_annos
WHERE item_id = (SELECT b.id FROM moz_bookmarks b
JOIN itemsToRemove d ON d.guid = b.guid)",
)?;
// Don't reupload tombstones for items that are already deleted on the
// server.
debug!(
driver,
"Removing tombstones for items already deleted remotely"
);
controller.err_if_aborted()?;
db.exec(
"DELETE FROM moz_bookmarks_deleted
WHERE guid IN (SELECT guid FROM itemsToRemove
WHERE NOT shouldUploadTombstone)",
)?;
// Upload tombstones for non-syncable items. We can remove the
// Upload tombstones for non-syncable and invalid items. We can remove the
// `shouldUploadTombstone` check and persist tombstones unconditionally in
// bug 1343103.
debug!(driver, "Inserting tombstones for all deleted items");
controller.err_if_aborted()?;
db.exec(
"INSERT OR IGNORE INTO moz_bookmarks_deleted(guid, dateRemoved)
SELECT guid, dateRemovedMicroseconds
@ -916,7 +963,8 @@ fn remove_local_items(db: &Conn) -> Result<()> {
WHERE shouldUploadTombstone",
)?;
// Remove the item from Places.
debug!(driver, "Removing deleted items from Places");
controller.err_if_aborted()?;
db.exec(
"DELETE FROM moz_bookmarks
WHERE guid IN (SELECT guid FROM itemsToRemove)",
@ -943,11 +991,18 @@ fn remove_local_items(db: &Conn) -> Result<()> {
/// items. The change counter in Places is the persistent record of items that
/// we need to upload, so, if upload is interrupted or fails, we'll stage the
/// items again on the next sync.
fn stage_items_to_upload(db: &Conn, upload: &[Upload], weak_upload: &[nsString]) -> Result<()> {
// Clean up staged items left over from the last sync.
fn stage_items_to_upload(
db: &Conn,
driver: &Driver,
controller: &AbortController,
upload: &[Upload],
weak_upload: &[nsString],
) -> Result<()> {
debug!(driver, "Cleaning up staged items left from last sync");
controller.err_if_aborted()?;
db.exec("DELETE FROM itemsToUpload")?;
// Stage explicit weak uploads such as repair responses.
debug!(driver, "Staging weak uploads");
for chunk in weak_upload.chunks(SQLITE_MAX_VARIABLE_NUMBER) {
let mut statement = db.prepare(format!(
"INSERT INTO itemsToUpload(id, guid, syncChangeCounter, parentGuid,
@ -960,6 +1015,7 @@ fn stage_items_to_upload(db: &Conn, upload: &[Upload], weak_upload: &[nsString])
repeat_sql_vars(chunk.len()),
))?;
for (index, guid) in chunk.iter().enumerate() {
controller.err_if_aborted()?;
statement.bind_by_index(index as u32, nsString::from(guid.as_ref()))?;
}
statement.execute()?;
@ -968,6 +1024,8 @@ fn stage_items_to_upload(db: &Conn, upload: &[Upload], weak_upload: &[nsString])
// Stage remotely changed items with older local creation dates. These are
// tracked "weakly": if the upload is interrupted or fails, we won't
// reupload the record on the next sync.
debug!(driver, "Staging items with older local dates added");
controller.err_if_aborted()?;
db.exec(format!(
"INSERT OR IGNORE INTO itemsToUpload(id, guid, syncChangeCounter,
parentGuid, parentTitle, dateAdded,
@ -979,7 +1037,7 @@ fn stage_items_to_upload(db: &Conn, upload: &[Upload], weak_upload: &[nsString])
UploadItemsFragment("b"),
))?;
// Stage remaining locally changed items for upload.
debug!(driver, "Staging remaining locally changed items for upload");
for chunk in upload.chunks(SQLITE_MAX_VARIABLE_NUMBER) {
let mut statement = db.prepare(format!(
"INSERT OR IGNORE INTO itemsToUpload(id, guid, syncChangeCounter,
@ -993,6 +1051,7 @@ fn stage_items_to_upload(db: &Conn, upload: &[Upload], weak_upload: &[nsString])
repeat_sql_vars(chunk.len()),
))?;
for (index, op) in chunk.iter().enumerate() {
controller.err_if_aborted()?;
statement.bind_by_index(index as u32, nsString::from(&*op.merged_node.guid))?;
}
statement.execute()?;
@ -1000,6 +1059,8 @@ fn stage_items_to_upload(db: &Conn, upload: &[Upload], weak_upload: &[nsString])
// Record the child GUIDs of locally changed folders, which we use to
// populate the `children` array in the record.
debug!(driver, "Staging structure to upload");
controller.err_if_aborted()?;
db.exec(
"
INSERT INTO structureToUpload(guid, parentId, position)
@ -1009,6 +1070,8 @@ fn stage_items_to_upload(db: &Conn, upload: &[Upload], weak_upload: &[nsString])
)?;
// Stage tags for outgoing bookmarks.
debug!(driver, "Staging tags to upload");
controller.err_if_aborted()?;
db.exec(
"
INSERT INTO tagsToUpload(id, tag)
@ -1019,6 +1082,8 @@ fn stage_items_to_upload(db: &Conn, upload: &[Upload], weak_upload: &[nsString])
// Finally, stage tombstones for deleted items. Ignore conflicts if we have
// tombstones for undeleted items; Places Maintenance should clean these up.
controller.err_if_aborted()?;
debug!(driver, "Staging tombstones to upload");
db.exec(
"
INSERT OR IGNORE INTO itemsToUpload(guid, syncChangeCounter, isDeleted)

View File

@ -212,6 +212,9 @@ async function fetchLocalTree(rootGuid) {
if (node.children) {
itemInfo.children = node.children.map(bookmarkNodeToInfo);
}
if (node.tags) {
itemInfo.tags = node.tags.split(",").sort();
}
return itemInfo;
}
let root = await PlacesUtils.promiseBookmarksTree(rootGuid);

View File

@ -1485,6 +1485,301 @@ add_task(async function test_keywords_complex() {
await PlacesSyncUtils.bookmarks.reset();
});
add_task(async function test_tags_complex() {
let buf = await openMirror("tags_complex");
info("Set up mirror");
await PlacesUtils.bookmarks.insertTree({
guid: PlacesUtils.bookmarks.menuGuid,
children: [
{
guid: "bookmarkAAA1",
title: "A1",
url: "http://example.com/a",
tags: ["one", "two"],
},
{
guid: "bookmarkAAA2",
title: "A2",
url: "http://example.com/a",
tags: ["one", "two"],
},
{
guid: "bookmarkBBB1",
title: "B1",
url: "http://example.com/b",
tags: ["one"],
},
{
guid: "bookmarkBBB2",
title: "B2",
url: "http://example.com/b",
tags: ["one"],
},
{
guid: "bookmarkCCC1",
title: "C1",
url: "http://example.com/c",
tags: ["two", "three"],
},
{
guid: "bookmarkCCC2",
title: "C2",
url: "http://example.com/c",
tags: ["two", "three"],
},
],
});
await storeRecords(
buf,
shuffle([
{
id: "menu",
parentid: "places",
type: "folder",
children: [
"bookmarkAAA1",
"bookmarkAAA2",
"bookmarkBBB1",
"bookmarkBBB2",
"bookmarkCCC1",
"bookmarkCCC2",
],
},
{
id: "bookmarkAAA1",
parentid: "menu",
type: "bookmark",
title: "A",
bmkUri: "http://example.com/a",
tags: ["one", "two"],
},
{
id: "bookmarkAAA2",
parentid: "menu",
type: "bookmark",
title: "A",
bmkUri: "http://example.com/a",
tags: ["one", "two"],
},
{
id: "bookmarkBBB1",
parentid: "menu",
type: "bookmark",
title: "B1",
bmkUri: "http://example.com/b",
tags: ["one"],
},
{
id: "bookmarkBBB2",
parentid: "menu",
type: "bookmark",
title: "B2",
bmkUri: "http://example.com/b",
tags: ["one"],
},
{
id: "bookmarkCCC1",
parentid: "menu",
type: "bookmark",
title: "C1",
bmkUri: "http://example.com/c",
tags: ["two", "three"],
},
{
id: "bookmarkCCC2",
parentid: "menu",
type: "bookmark",
title: "C2",
bmkUri: "http://example.com/c",
tags: ["two", "three"],
},
]),
{ needsMerge: false }
);
await PlacesTestUtils.markBookmarksAsSynced();
info("Add tags for B locally");
PlacesUtils.tagging.tagURI(Services.io.newURI("http://example.com/b"), [
"four",
"five",
]);
info("Remove tag from C locally");
PlacesUtils.tagging.untagURI(Services.io.newURI("http://example.com/c"), [
"two",
]);
info("Update tags for A remotely");
await storeRecords(
buf,
shuffle([
{
id: "bookmarkAAA1",
parentid: "menu",
type: "bookmark",
title: "A1",
bmkUri: "http://example.com/a",
tags: ["one", "two", "four", "six"],
},
{
id: "bookmarkAAA2",
parentid: "menu",
type: "bookmark",
title: "A2",
bmkUri: "http://example.com/a",
tags: ["one", "two", "four", "six"],
},
])
);
info("Apply remote");
let changesToUpload = await buf.apply();
let datesAdded = await promiseManyDatesAdded([
"bookmarkBBB1",
"bookmarkBBB2",
"bookmarkCCC1",
"bookmarkCCC2",
]);
deepEqual(await buf.fetchUnmergedGuids(), [], "Should merge all items");
deepEqual(
changesToUpload,
{
bookmarkBBB1: {
counter: 2,
synced: false,
tombstone: false,
cleartext: {
id: "bookmarkBBB1",
type: "bookmark",
parentid: "menu",
hasDupe: true,
parentName: BookmarksMenuTitle,
dateAdded: datesAdded.get("bookmarkBBB1"),
bmkUri: "http://example.com/b",
title: "B1",
tags: ["five", "four", "one"],
},
},
bookmarkBBB2: {
counter: 2,
synced: false,
tombstone: false,
cleartext: {
id: "bookmarkBBB2",
type: "bookmark",
parentid: "menu",
hasDupe: true,
parentName: BookmarksMenuTitle,
dateAdded: datesAdded.get("bookmarkBBB2"),
bmkUri: "http://example.com/b",
title: "B2",
tags: ["five", "four", "one"],
},
},
bookmarkCCC1: {
counter: 1,
synced: false,
tombstone: false,
cleartext: {
id: "bookmarkCCC1",
type: "bookmark",
parentid: "menu",
hasDupe: true,
parentName: BookmarksMenuTitle,
dateAdded: datesAdded.get("bookmarkCCC1"),
bmkUri: "http://example.com/c",
title: "C1",
tags: ["three"],
},
},
bookmarkCCC2: {
counter: 1,
synced: false,
tombstone: false,
cleartext: {
id: "bookmarkCCC2",
type: "bookmark",
parentid: "menu",
hasDupe: true,
parentName: BookmarksMenuTitle,
dateAdded: datesAdded.get("bookmarkCCC2"),
bmkUri: "http://example.com/c",
title: "C2",
tags: ["three"],
},
},
},
"Should upload local records with new tags"
);
await assertLocalTree(
PlacesUtils.bookmarks.menuGuid,
{
guid: PlacesUtils.bookmarks.menuGuid,
type: PlacesUtils.bookmarks.TYPE_FOLDER,
index: 0,
title: BookmarksMenuTitle,
children: [
{
guid: "bookmarkAAA1",
type: PlacesUtils.bookmarks.TYPE_BOOKMARK,
index: 0,
title: "A1",
url: "http://example.com/a",
tags: ["four", "one", "six", "two"],
},
{
guid: "bookmarkAAA2",
type: PlacesUtils.bookmarks.TYPE_BOOKMARK,
index: 1,
title: "A2",
url: "http://example.com/a",
tags: ["four", "one", "six", "two"],
},
{
guid: "bookmarkBBB1",
type: PlacesUtils.bookmarks.TYPE_BOOKMARK,
index: 2,
title: "B1",
url: "http://example.com/b",
tags: ["five", "four", "one"],
},
{
guid: "bookmarkBBB2",
type: PlacesUtils.bookmarks.TYPE_BOOKMARK,
index: 3,
title: "B2",
url: "http://example.com/b",
tags: ["five", "four", "one"],
},
{
guid: "bookmarkCCC1",
type: PlacesUtils.bookmarks.TYPE_BOOKMARK,
index: 4,
title: "C1",
url: "http://example.com/c",
tags: ["three"],
},
{
guid: "bookmarkCCC2",
type: PlacesUtils.bookmarks.TYPE_BOOKMARK,
index: 5,
title: "C2",
url: "http://example.com/c",
tags: ["three"],
},
],
},
"Should update local items with new tags"
);
await buf.finalize();
await PlacesUtils.bookmarks.eraseEverything();
await PlacesSyncUtils.bookmarks.reset();
});
add_task(async function test_tags() {
let buf = await openMirror("tags");