Bug 1368951 - add x-if-unmodified-since support to the sync test server. r=tcsc

MozReview-Commit-ID: FF7smwdeQ0k

--HG--
extra : rebase_source : 82f8706d6a27f1bf93b95b93572a2fa37b268b3d
This commit is contained in:
Mark Hammond 2017-05-31 18:42:45 +10:00
parent dbdaad76f3
commit fa4625acc7
12 changed files with 117 additions and 22 deletions

View File

@ -45,6 +45,10 @@ this.initTestLogging = function initTestLogging(level) {
log.ownAppenders = [appender];
log.updateAppenders();
// SQLite logging is noisy in these tests - we make it quiet by default
// (although individual tests are free to bump it later)
Log.repository.getLogger("Sqlite").level = Log.Level.Info;
return logStats;
}

View File

@ -1261,11 +1261,16 @@ SyncEngine.prototype = {
}
}
// Mobile: check if we got the maximum that we requested; get the rest if so.
// History: check if we got the maximum that we requested; get the rest if so.
if (handled.length == newitems.limit) {
// XXX - this block appears to have no test coverage (eg, throwing here,
// or commenting the entire block causes no tests to fail.)
// See bug 1368951 comment 3 for some insightful analysis of why this
// might not be doing what we expect anyway, so it may be the case that
// this needs both fixing *and* tests.
let guidColl = new Collection(this.engineURL, null, this.service);
// Sort and limit so that on mobile we only get the last X records.
// Sort and limit so that we only get the last X records.
guidColl.limit = this.downloadLimit;
guidColl.newer = this.lastSync;
@ -1735,6 +1740,7 @@ SyncEngine.prototype = {
// Remove the key for future uses
delete this._delete[key];
this._log.trace("doing post-sync deletions", {key, val});
// Send a simple delete for the property
if (key != "ids" || val.length <= 100)
await doDelete(key, val);

View File

@ -249,6 +249,7 @@ ServerCollection.prototype = {
* @return the provided WBO.
*/
insertWBO: function insertWBO(wbo) {
this.timestamp = Math.max(this.timestamp, wbo.modified);
return this._wbos[wbo.id] = wbo;
},
@ -445,7 +446,7 @@ ServerCollection.prototype = {
if (nextOffset) {
response.setHeader("X-Weave-Next-Offset", "" + nextOffset);
}
response.setHeader("X-Last-Modified", "" + this.timestamp);
response.setHeader("X-Last-Modified", "" + self.timestamp);
break;
case "POST":
@ -466,16 +467,18 @@ ServerCollection.prototype = {
response.setHeader("X-Weave-Timestamp",
"" + new_timestamp(),
false);
response.setStatusLine(request.httpVersion, statusCode, status);
response.bodyOutputStream.write(body, body.length);
// Update the collection timestamp to the appropriate modified time.
// This is either a value set by the handler, or the current time.
if (request.method != "GET") {
this.timestamp = (response.newModified >= 0) ?
self.timestamp = (response.newModified >= 0) ?
response.newModified :
new_timestamp();
}
response.setHeader("X-Last-Modified", "" + self.timestamp, false);
response.setStatusLine(request.httpVersion, statusCode, status);
response.bodyOutputStream.write(body, body.length);
};
}
@ -492,6 +495,12 @@ function sync_httpd_setup(handlers) {
/*
* Track collection modified times. Return closures.
*
* XXX - DO NOT USE IN NEW TESTS
*
* This code has very limited and very hacky timestamp support - the test
* server now has more complete and correct support - using this helper
* may cause strangeness wrt timestamp headers and 412 responses.
*/
function track_collections_helper() {
@ -942,6 +951,25 @@ SyncServer.prototype = {
}
let [, collection, wboID] = match;
let coll = this.getCollection(username, collection);
let checkXIUSFailure = () => {
if (req.hasHeader("x-if-unmodified-since")) {
let xius = parseFloat(req.getHeader("x-if-unmodified-since"));
// Sadly the way our tests are setup, we often end up with xius of
// zero (typically when syncing just one engine, so the date from
// info/collections isn't used) - so we allow that to work.
// Further, the Python server treats non-existing collections as
// having a timestamp of 0.
let collTimestamp = coll ? coll.timestamp : 0;
if (xius && xius < collTimestamp) {
this._log.info(`x-if-unmodified-since mismatch - request wants ${xius} but our collection has ${collTimestamp}`);
respond(412, "precondition failed", "precondition failed");
return true;
}
}
return false;
}
switch (req.method) {
case "GET": {
if (!coll) {
@ -949,9 +977,9 @@ SyncServer.prototype = {
respond(404, "Not found", "Not found");
return undefined;
}
// *cries inside*: Bug 687299 - now fixed, so apparently the real
// sync server *will* 404 in this case - bug 1347807 is to change
// this to a 404 and fix a handful of test failures it causes.
// *cries inside*: - apparently the real sync server returned 200
// here for some time, then returned 404 for some time (bug 687299),
// and now is back to 200 (bug 963332).
respond(200, "OK", "[]");
return undefined;
}
@ -965,12 +993,14 @@ SyncServer.prototype = {
}
return wbo.handler()(req, resp);
}
// TODO: implement handling of X-If-Unmodified-Since for write verbs.
case "DELETE": {
if (!coll) {
respond(200, "OK", "{}");
return undefined;
}
if (checkXIUSFailure()) {
return undefined;
}
if (wboID) {
let wbo = coll.wbo(wboID);
if (wbo) {
@ -1011,11 +1041,35 @@ SyncServer.prototype = {
}
return undefined;
}
case "POST":
case "PUT":
// PUT and POST have slightly different XIUS semantics - for PUT,
// the check is against the item, whereas for POST it is against
// the collection. So first, a special-case for PUT.
if (req.hasHeader("x-if-unmodified-since")) {
let xius = parseFloat(req.getHeader("x-if-unmodified-since"));
// treat and xius of zero as if it wasn't specified - this happens
// in some of our tests for a new collection.
if (xius > 0) {
let wbo = coll.wbo(wboID);
if (xius < wbo.modified) {
this._log.info(`x-if-unmodified-since mismatch - request wants ${xius} but wbo has ${wbo.modified}`);
respond(412, "precondition failed", "precondition failed");
return undefined;
}
wbo.handler()(req, resp);
coll.timestamp = resp.newModified;
return resp;
}
}
// fall through to post.
case "POST":
if (checkXIUSFailure()) {
return undefined;
}
if (!coll) {
coll = this.createCollection(username, collection);
}
if (wboID) {
let wbo = coll.wbo(wboID);
if (!wbo) {

View File

@ -228,11 +228,12 @@ add_task(async function test_disabled_install_semantics() {
// are sane.
restartManager();
let collection = server.getCollection(USER, "addons");
engine.lastModified = collection.timestamp;
await engine._sync();
// The client should not upload a new record. The old record should be
// retained and unmodified.
let collection = server.getCollection(USER, "addons");
do_check_eq(1, collection.count());
let payload = collection.payloads()[0];

View File

@ -290,7 +290,8 @@ add_task(async function test_dupe_reparented_locally_changed_bookmark() {
};
let deltaSeconds = 500;
collection.insert(newGUID, encryptPayload(to_apply), Date.now() / 1000 + deltaSeconds);
let newWBO = collection.insert(newGUID, encryptPayload(to_apply), Date.now() / 1000 + deltaSeconds);
do_print(`new duplicate of ${bmk1_guid} is ${newGUID}`);
// Make a change to the bookmark that's a dupe, and set the modification
// time further in the future than the incoming record. This will cause
@ -303,7 +304,10 @@ add_task(async function test_dupe_reparented_locally_changed_bookmark() {
});
_("Syncing so new dupe record is processed");
engine.lastSync = engine.lastSync - 5;
// We need to take care to only sync the one new record - if we also see
// our local item as incoming the test fails - bug 1368608.
engine.lastSync = newWBO.modified - 0.000001;
engine.lastModified = null;
await engine.sync();
// We should have logically deleted the dupe record.
@ -311,7 +315,7 @@ add_task(async function test_dupe_reparented_locally_changed_bookmark() {
ok(getServerRecord(collection, bmk1_guid).deleted);
// and physically removed from the local store.
await promiseNoLocalItem(bmk1_guid);
// The original folder still longer has the item
// The original folder still has the item
equal((await getFolderChildrenIDs(folder1_id)).length, 1);
// The second folder does not.
equal((await getFolderChildrenIDs(folder2_id)).length, 0);

View File

@ -750,7 +750,10 @@ add_task(async function test_sync_dateAdded() {
let bzguid = await PlacesUtils.promiseItemGuid(bzid);
// last sync did a POST, which doesn't advance its lastModified value.
// Next sync of the engine doesn't hit info/collections, so lastModified
// remains stale. Setting it to null side-steps that.
engine.lastModified = null;
await sync_engine_and_validate_telem(engine, false);
let newRecord2 = await store.createRecord(item2GUID);
@ -764,8 +767,8 @@ add_task(async function test_sync_dateAdded() {
item2.dateAdded += 10000;
collection.insert(item2GUID, encryptPayload(item2.cleartext), now / 1000 - 10);
engine.lastSync = now / 1000 - 20;
engine.lastModified = null;
await sync_engine_and_validate_telem(engine, false);
let newerRecord2 = await store.createRecord(item2GUID);

View File

@ -140,6 +140,7 @@ async function resolveConflict(engine, collection, timestamp, buildTree,
collection.insert(record.id, encryptPayload(record), timestamp);
}
engine.lastModified = collection.timestamp;
await sync_engine_and_validate_telem(engine, false);
let expectedTree = buildTree(guids);

View File

@ -482,10 +482,12 @@ add_task(async function test_repair_server_deleted() {
await validationPromise;
// Now we will reach into the server and create a tombstone for that bookmark
// but with a last-modified in the past - this way our sync isn't going to
// pick up the record.
server.insertWBO("foo", "bookmarks", new ServerWBO(bookmarkInfo.guid, encryptPayload({
id: bookmarkInfo.guid,
deleted: true,
}), Date.now() / 1000));
}), (Date.now() - 60000) / 1000));
// sync again - we should have a few problems...
_("Syncing again.");

View File

@ -13,8 +13,6 @@ Cu.import("resource://testing-common/services/sync/utils.js");
initTestLogging("Trace");
Log.repository.getLogger("Sync.Engine.Bookmarks").level = Log.Level.Trace;
// sqlite logging generates lots of noise and typically isn't helpful here.
Log.repository.getLogger("Sqlite").level = Log.Level.Error;
// Disable validation so that we don't try to automatically repair the server
// when we sync.

View File

@ -142,6 +142,7 @@ add_task(async function test_bad_hmac() {
deletedCollections = [];
deletedItems = [];
check_clients_count(1);
engine.lastModified = server.getCollection("foo", "clients").timestamp;
await engine._sync();
_("Old record was not deleted, new one uploaded.");
@ -162,6 +163,7 @@ add_task(async function test_bad_hmac() {
await uploadNewKeys();
// Sync once to upload a record.
engine.lastModified = server.getCollection("foo", "clients").timestamp;
await engine._sync();
check_clients_count(1);
@ -241,6 +243,7 @@ add_task(async function test_full_sync() {
_("First sync. 2 records downloaded; our record uploaded.");
strictEqual(engine.lastRecordUpload, 0);
engine.lastModified = server.getCollection("foo", "clients").timestamp;
await engine._sync();
ok(engine.lastRecordUpload > 0);
deepEqual(user.collection("clients").keys().sort(),
@ -292,6 +295,7 @@ add_task(async function test_sync() {
_("First sync. Client record is uploaded.");
equal(clientWBO(), undefined);
equal(engine.lastRecordUpload, 0);
engine.lastModified = server.getCollection("foo", "clients").timestamp;
await engine._sync();
ok(!!clientWBO().payload);
ok(engine.lastRecordUpload > 0);
@ -300,6 +304,7 @@ add_task(async function test_sync() {
engine.lastRecordUpload -= MORE_THAN_CLIENTS_TTL_REFRESH;
let lastweek = engine.lastRecordUpload;
clientWBO().payload = undefined;
engine.lastModified = server.getCollection("foo", "clients").timestamp;
await engine._sync();
ok(!!clientWBO().payload);
ok(engine.lastRecordUpload > lastweek);
@ -377,6 +382,7 @@ add_task(async function test_last_modified() {
let collection = user.collection("clients");
_("Sync to download the record");
engine.lastModified = server.getCollection("foo", "clients").timestamp;
await engine._sync();
equal(engine._store._remoteClients[activeID].serverLastModified, now - 10,
@ -386,6 +392,8 @@ add_task(async function test_last_modified() {
// set a new name to make sure we really did upload.
engine._store._remoteClients[activeID].name = "New name";
engine._modified.set(activeID, 0);
// The sync above also did a POST, so adjust our lastModified.
engine.lastModified = server.getCollection("foo", "clients").timestamp;
await engine._uploadOutgoing();
_("Local record should have updated timestamp");
@ -631,6 +639,7 @@ add_task(async function test_filter_duplicate_names() {
_("First sync");
strictEqual(engine.lastRecordUpload, 0);
engine.lastModified = server.getCollection("foo", "clients").timestamp;
await engine._sync();
ok(engine.lastRecordUpload > 0);
deepEqual(user.collection("clients").keys().sort(),
@ -1023,6 +1032,7 @@ add_task(async function test_merge_commands() {
try {
_("First sync. 2 records downloaded.");
strictEqual(engine.lastRecordUpload, 0);
engine.lastModified = server.getCollection("foo", "clients").timestamp;
await engine._sync();
_("Broadcast logout to all clients");
@ -1075,6 +1085,7 @@ add_task(async function test_duplicate_remote_commands() {
try {
_("First sync. 1 record downloaded.");
strictEqual(engine.lastRecordUpload, 0);
engine.lastModified = server.getCollection("foo", "clients").timestamp;
await engine._sync();
_("Send tab to client");
@ -1093,6 +1104,7 @@ add_task(async function test_duplicate_remote_commands() {
_("Send another tab to the desktop client");
await engine.sendCommand("displayURI", ["https://foobar.com", engine.localID, "Foo bar!"], desktopID);
engine.lastModified = server.getCollection("foo", "clients").timestamp;
await engine._sync();
let collection = server.getCollection("foo", "clients");
@ -1147,6 +1159,7 @@ add_task(async function test_upload_after_reboot() {
try {
_("First sync. 2 records downloaded.");
strictEqual(engine.lastRecordUpload, 0);
engine.lastModified = server.getCollection("foo", "clients").timestamp;
await engine._sync();
_("Send tab to client");
@ -1293,6 +1306,7 @@ add_task(async function test_keep_cleared_commands_after_reboot() {
commandsProcessed = 0;
engine._handleDisplayURIs = (uris) => { commandsProcessed = uris.length };
engine.lastModified = server.getCollection("foo", "clients").timestamp;
await engine._sync();
await engine.processIncomingCommands();
equal(commandsProcessed, 1, "We processed one command (the other were cleared)");
@ -1346,6 +1360,7 @@ add_task(async function test_deleted_commands() {
try {
_("First sync. 2 records downloaded.");
engine.lastModified = server.getCollection("foo", "clients").timestamp;
await engine._sync();
_("Delete a record on the server.");
@ -1354,6 +1369,7 @@ add_task(async function test_deleted_commands() {
_("Broadcast a command to all clients");
await engine.sendCommand("logout", []);
engine.lastModified = server.getCollection("foo", "clients").timestamp;
await engine._sync();
deepEqual(collection.keys().sort(), [activeID, engine.localID].sort(),
@ -1386,6 +1402,7 @@ add_task(async function test_send_uri_ack() {
let fakeSenderID = Utils.makeGUID();
_("Initial sync for empty clients collection");
engine.lastModified = server.getCollection("foo", "clients").timestamp;
await engine._sync();
let collection = server.getCollection("foo", "clients");
let ourPayload = JSON.parse(JSON.parse(collection.payload(engine.localID)).ciphertext);
@ -1400,6 +1417,7 @@ add_task(async function test_send_uri_ack() {
server.insertWBO("foo", "clients", new ServerWBO(engine.localID, encryptPayload(ourPayload), now));
_("Sync again");
engine.lastModified = server.getCollection("foo", "clients").timestamp;
await engine._sync();
compareCommands(engine.localCommands, [{
command: "displayURI",
@ -1463,6 +1481,7 @@ add_task(async function test_command_sync() {
try {
equal(collection.count(), 2, "2 remote records written");
engine.lastModified = server.getCollection("foo", "clients").timestamp;
await engine._sync();
equal(collection.count(), 3, "3 remote records written (+1 for the synced local record)");
@ -1526,6 +1545,7 @@ add_task(async function ensureSameFlowIDs() {
protocols: ["1.5"]
}), Date.now() / 1000));
engine.lastModified = server.getCollection("foo", "clients").timestamp;
await engine._sync();
await engine.sendCommand("wipeAll", []);
await engine._sync();
@ -1660,6 +1680,7 @@ add_task(async function test_other_clients_notified_on_first_sync() {
try {
engine.lastRecordUpload = 0;
_("First sync, should notify other clients");
engine.lastModified = server.getCollection("foo", "clients").timestamp;
await engine._sync();
equal(calls, 1);

View File

@ -19,8 +19,6 @@ const LoginInfo = Components.Constructor(
* read-only.
*/
Log.repository.getLogger("Sqlite").level = Log.Level.Error;
async function assertChildGuids(folderGuid, expectedChildGuids, message) {
let tree = await PlacesUtils.promiseBookmarksTree(folderGuid);
let childGuids = tree.children.map(child => child.guid);

View File

@ -466,6 +466,7 @@ add_task(async function test_processIncoming_reconcile_locally_deleted_dupe_new(
do_check_false((await engine._store.itemExists("DUPE_INCOMING")));
do_check_eq("DUPE_LOCAL", (await engine._findDupe({id: "DUPE_INCOMING"})));
engine.lastModified = server.getCollection(user, engine.name).timestamp;
await engine._sync();
// After the sync, the server's payload for the original ID should be marked
@ -539,6 +540,7 @@ add_task(async function test_processIncoming_reconcile_changed_dupe() {
do_check_true((await engine._store.itemExists("DUPE_LOCAL")));
do_check_eq("DUPE_LOCAL", (await engine._findDupe({id: "DUPE_INCOMING"})));
engine.lastModified = server.getCollection(user, engine.name).timestamp;
await engine._sync();
// The ID should have been changed to incoming.
@ -577,6 +579,7 @@ add_task(async function test_processIncoming_reconcile_changed_dupe_new() {
do_check_true((await engine._store.itemExists("DUPE_LOCAL")));
do_check_eq("DUPE_LOCAL", (await engine._findDupe({id: "DUPE_INCOMING"})));
engine.lastModified = server.getCollection(user, engine.name).timestamp;
await engine._sync();
// The ID should have been changed to incoming.