mirror of
https://github.com/mozilla/gecko-dev.git
synced 2025-01-26 06:38:36 +00:00
Bug 1393659 - Fix inconsistent handling of max_post_bytes and max_request_bytes r=markh
MozReview-Commit-ID: 4jwpAYNuoQj --HG-- extra : rebase_source : f32ecbd9740f7d8647fdf37ad6bbb53e332320ac
This commit is contained in:
parent
25eda66c1a
commit
6348cd2190
@ -64,10 +64,6 @@ PASSWORDS_STORE_BATCH_SIZE: 50,
|
||||
// (how many records are fetched at a time from the server when batching is used).
|
||||
DEFAULT_DOWNLOAD_BATCH_SIZE: 1000,
|
||||
|
||||
|
||||
// Default maximum size for a record payload
|
||||
DEFAULT_MAX_RECORD_PAYLOAD_BYTES: 262144, // 256KB
|
||||
|
||||
// score thresholds for early syncs
|
||||
SINGLE_USER_THRESHOLD: 1000,
|
||||
MULTI_DEVICE_THRESHOLD: 300,
|
||||
@ -86,10 +82,10 @@ SCORE_UPDATE_DELAY: 100,
|
||||
// observed spurious idle/back events and short enough to pre-empt user activity.
|
||||
IDLE_OBSERVER_BACK_DELAY: 100,
|
||||
|
||||
// Max number of records or bytes to upload in a single POST - we'll do multiple POSTS if either
|
||||
// MAX_UPLOAD_RECORDS or MAX_UPLOAD_BYTES is hit)
|
||||
MAX_UPLOAD_RECORDS: 100,
|
||||
MAX_UPLOAD_BYTES: 1024 * 1023, // just under 1MB
|
||||
// Duplicate URI_LENGTH_MAX from Places (from nsNavHistory.h), used to discard
|
||||
// tabs with huge uris during tab sync.
|
||||
URI_LENGTH_MAX: 65536,
|
||||
|
||||
MAX_HISTORY_UPLOAD: 5000,
|
||||
MAX_HISTORY_DOWNLOAD: 5000,
|
||||
|
||||
|
@ -945,14 +945,6 @@ SyncEngine.prototype = {
|
||||
Svc.Prefs.set(this.name + ".lastSyncLocal", value.toString());
|
||||
},
|
||||
|
||||
get maxRecordPayloadBytes() {
|
||||
let serverConfiguration = this.service.serverConfiguration;
|
||||
if (serverConfiguration && serverConfiguration.max_record_payload_bytes) {
|
||||
return serverConfiguration.max_record_payload_bytes;
|
||||
}
|
||||
return DEFAULT_MAX_RECORD_PAYLOAD_BYTES;
|
||||
},
|
||||
|
||||
/*
|
||||
* Returns a changeset for this sync. Engine implementations can override this
|
||||
* method to bypass the tracker for certain or all changed items.
|
||||
@ -1679,13 +1671,6 @@ SyncEngine.prototype = {
|
||||
this._log.trace("Outgoing: " + out);
|
||||
|
||||
out.encrypt(this.service.collectionKeys.keyForCollection(this.name));
|
||||
let payloadLength = JSON.stringify(out.payload).length;
|
||||
if (payloadLength > this.maxRecordPayloadBytes) {
|
||||
if (this.allowSkippedRecord) {
|
||||
this._modified.delete(id); // Do not attempt to sync that record again
|
||||
}
|
||||
throw new Error(`Payload too big: ${payloadLength} bytes`);
|
||||
}
|
||||
ok = true;
|
||||
} catch (ex) {
|
||||
this._log.warn("Error creating record", ex);
|
||||
@ -1704,6 +1689,7 @@ SyncEngine.prototype = {
|
||||
++counts.failed;
|
||||
if (!this.allowSkippedRecord) {
|
||||
Observers.notify("weave:engine:sync:uploaded", counts, this.name);
|
||||
this._log.warn(`Failed to enqueue record "${id}" (aborting)`, error);
|
||||
throw error;
|
||||
}
|
||||
this._modified.delete(id);
|
||||
|
@ -170,7 +170,7 @@ TabStore.prototype = {
|
||||
continue;
|
||||
}
|
||||
|
||||
if (current.url.length >= (MAX_UPLOAD_BYTES - 1000)) {
|
||||
if (current.url.length > URI_LENGTH_MAX) {
|
||||
this._log.trace("Skipping over-long URL.");
|
||||
continue;
|
||||
}
|
||||
@ -218,8 +218,9 @@ TabStore.prototype = {
|
||||
// We use byteLength here because the data is not encrypted in ascii yet.
|
||||
let size = new TextEncoder("utf-8").encode(JSON.stringify(tabs)).byteLength;
|
||||
let origLength = tabs.length;
|
||||
const maxPayloadSize = this.engine.service.getMaxRecordPayloadSize();
|
||||
// See bug 535326 comment 8 for an explanation of the estimation
|
||||
const MAX_TAB_SIZE = this.engine.maxRecordPayloadBytes / 4 * 3 - 1500;
|
||||
const MAX_TAB_SIZE = maxPayloadSize / 4 * 3 - 1500;
|
||||
if (size > MAX_TAB_SIZE) {
|
||||
// Estimate a little more than the direct fraction to maximize packing
|
||||
let cutoff = Math.ceil(tabs.length * MAX_TAB_SIZE / size);
|
||||
|
@ -791,35 +791,39 @@ Collection.prototype = {
|
||||
return Resource.prototype.post.call(this, data);
|
||||
}
|
||||
let getConfig = (name, defaultVal) => {
|
||||
// serverConfiguration is allowed to be missing during tests.
|
||||
if (this._service.serverConfiguration && this._service.serverConfiguration.hasOwnProperty(name)) {
|
||||
return this._service.serverConfiguration[name];
|
||||
}
|
||||
return defaultVal;
|
||||
}
|
||||
};
|
||||
|
||||
// On a server that does not support the batch API, we expect the /info/configuration
|
||||
// endpoint to provide "max_record_payload_bytes" and "max_request_bytes" and limits.
|
||||
// On a server that supports the batching API, we expect "max_record_payload_bytes"
|
||||
// (as before), as well as "max_post_bytes", "max_post_records", "max_total_bytes" and
|
||||
// "max_total_records". Much of the complexity here and in enqueue is attempting to
|
||||
// handle both these cases simultaneously.
|
||||
let config = {
|
||||
max_post_bytes: getConfig("max_post_bytes", MAX_UPLOAD_BYTES),
|
||||
max_post_records: getConfig("max_post_records", MAX_UPLOAD_RECORDS),
|
||||
// Note that from the server's POV, max_post_bytes is the sum of payload
|
||||
// lengths, but we treat it equivalently to max_request_bytes (which is
|
||||
// payload + metadata lengths).
|
||||
max_post_bytes: getConfig("max_post_bytes",
|
||||
getConfig("max_request_bytes", 260 * 1024)),
|
||||
|
||||
max_post_records: getConfig("max_post_records", Infinity),
|
||||
|
||||
max_batch_bytes: getConfig("max_total_bytes", Infinity),
|
||||
max_batch_records: getConfig("max_total_records", Infinity),
|
||||
}
|
||||
max_record_payload_bytes: getConfig("max_record_payload_bytes", 256 * 1024),
|
||||
};
|
||||
|
||||
// Handle config edge cases
|
||||
if (config.max_post_records <= 0) { config.max_post_records = MAX_UPLOAD_RECORDS; }
|
||||
if (config.max_batch_records <= 0) { config.max_batch_records = Infinity; }
|
||||
if (config.max_post_bytes <= 0) { config.max_post_bytes = MAX_UPLOAD_BYTES; }
|
||||
if (config.max_batch_bytes <= 0) { config.max_batch_bytes = Infinity; }
|
||||
|
||||
// Max size of BSO payload is 256k. This assumes at most 4k of overhead,
|
||||
// which sounds like plenty. If the server says it can't handle this, we
|
||||
// might have valid records we can't sync, so we give up on syncing.
|
||||
let requiredMax = 260 * 1024;
|
||||
if (config.max_post_bytes < requiredMax) {
|
||||
if (config.max_post_bytes <= config.max_record_payload_bytes) {
|
||||
this._log.error("Server configuration max_post_bytes is too low", config);
|
||||
throw new Error("Server configuration max_post_bytes is too low");
|
||||
}
|
||||
|
||||
this._log.trace("new PostQueue created with config", config);
|
||||
return new PostQueue(poster, timestamp, config, log, postCallback);
|
||||
},
|
||||
};
|
||||
@ -889,38 +893,42 @@ PostQueue.prototype = {
|
||||
if (!jsonRepr) {
|
||||
throw new Error("You must only call this with objects that explicitly support JSON");
|
||||
}
|
||||
|
||||
let bytes = JSON.stringify(jsonRepr);
|
||||
|
||||
// Do a flush if we can't add this record without exceeding our single-request
|
||||
// limits, or without exceeding the total limit for a single batch.
|
||||
let newLength = this.queued.length + bytes.length + 2; // extras for leading "[" / "," and trailing "]"
|
||||
// Tests sometimes return objects without payloads, and we just use the
|
||||
// byte length for those cases.
|
||||
let payloadLength = jsonRepr.payload ? jsonRepr.payload.length : bytes.length;
|
||||
if (payloadLength > this.config.max_record_payload_bytes) {
|
||||
return { enqueued: false, error: new Error("Single record too large to submit to server") };
|
||||
}
|
||||
|
||||
let maxAllowedBytes = Math.min(256 * 1024, this.config.max_post_bytes);
|
||||
// The `+ 2` is to account for the 2-byte (maximum) overhead (one byte for
|
||||
// the leading comma or "[", which all records will have, and the other for
|
||||
// the final trailing "]", only present for the last record).
|
||||
let newLength = this.queued.length + bytes.length + 2;
|
||||
let newRecordCount = this.numQueued + 1;
|
||||
|
||||
let postSizeExceeded = this.numQueued >= this.config.max_post_records ||
|
||||
newLength >= maxAllowedBytes;
|
||||
// Note that the max_post_records and max_batch_records server limits are
|
||||
// inclusive (e.g. if the max_post_records == 100, it will allow a post with
|
||||
// 100 records), but the byte limits are not. (See
|
||||
// https://github.com/mozilla-services/server-syncstorage/issues/73)
|
||||
|
||||
let batchSizeExceeded = (this.numQueued + this.numAlreadyBatched) >= this.config.max_batch_records ||
|
||||
// Have we exceeeded the maximum size or record count for a single POST?
|
||||
let postSizeExceeded = newRecordCount > this.config.max_post_records ||
|
||||
newLength >= this.config.max_post_bytes;
|
||||
|
||||
// Have we exceeded the maximum size or record count for the entire batch?
|
||||
let batchSizeExceeded = (newRecordCount + this.numAlreadyBatched) > this.config.max_batch_records ||
|
||||
(newLength + this.bytesAlreadyBatched) >= this.config.max_batch_bytes;
|
||||
|
||||
let singleRecordTooBig = bytes.length + 2 > maxAllowedBytes;
|
||||
|
||||
if (postSizeExceeded || batchSizeExceeded) {
|
||||
this.log.trace(`PostQueue flushing due to postSizeExceeded=${postSizeExceeded}, batchSizeExceeded=${batchSizeExceeded}` +
|
||||
`, max_batch_bytes: ${this.config.max_batch_bytes}, max_post_bytes: ${this.config.max_post_bytes}`);
|
||||
|
||||
if (singleRecordTooBig) {
|
||||
return { enqueued: false, error: new Error("Single record too large to submit to server") };
|
||||
}
|
||||
|
||||
this.log.trace("PostQueue flushing due to ", { postSizeExceeded, batchSizeExceeded });
|
||||
// We need to write the queue out before handling this one, but we only
|
||||
// commit the batch (and thus start a new one) if the batch is full.
|
||||
// Note that if a single record is too big for the batch or post, then
|
||||
// the batch may be empty, and so we don't flush in that case.
|
||||
if (this.numQueued) {
|
||||
await this.flush(batchSizeExceeded || singleRecordTooBig);
|
||||
}
|
||||
await this.flush(batchSizeExceeded);
|
||||
}
|
||||
|
||||
// Either a ',' or a '[' depending on whether this is the first record.
|
||||
this.queued += this.numQueued ? "," : "[";
|
||||
this.queued += bytes;
|
||||
|
@ -618,6 +618,16 @@ Sync11Service.prototype = {
|
||||
}
|
||||
},
|
||||
|
||||
getMaxRecordPayloadSize() {
|
||||
let config = this.serverConfiguration;
|
||||
if (!config || !config.max_record_payload_bytes) {
|
||||
this._log.warn("No config or incomplete config in getMaxRecordPayloadSize."
|
||||
+ " Are we running tests?");
|
||||
return 256 * 1024;
|
||||
}
|
||||
return config.max_record_payload_bytes;
|
||||
},
|
||||
|
||||
async verifyLogin(allow40XRecovery = true) {
|
||||
if (!this.identity.username) {
|
||||
this._log.warn("No username in verifyLogin.");
|
||||
|
@ -36,6 +36,7 @@ add_task(async function test_simple() {
|
||||
max_post_records: 100,
|
||||
max_batch_bytes: Infinity,
|
||||
max_batch_records: Infinity,
|
||||
max_record_payload_bytes: 1000,
|
||||
}
|
||||
|
||||
const time = 11111111;
|
||||
@ -60,9 +61,10 @@ add_task(async function test_simple() {
|
||||
add_task(async function test_max_post_bytes_no_batch() {
|
||||
let config = {
|
||||
max_post_bytes: 50,
|
||||
max_post_records: 4,
|
||||
max_post_records: Infinity,
|
||||
max_batch_bytes: Infinity,
|
||||
max_batch_records: Infinity,
|
||||
max_record_payload_bytes: 50,
|
||||
}
|
||||
|
||||
const time = 11111111;
|
||||
@ -76,7 +78,6 @@ add_task(async function test_max_post_bytes_no_batch() {
|
||||
await pq.enqueue(makeRecord(20)); // total size now 43 bytes - "[" + record + "," + record + "]"
|
||||
await pq.enqueue(makeRecord(20)); // this will exceed our byte limit, so will be in the 2nd POST.
|
||||
await pq.flush(true);
|
||||
|
||||
deepEqual(stats.posts, [
|
||||
{
|
||||
nbytes: 43, // 43 for the first post
|
||||
@ -93,42 +94,37 @@ add_task(async function test_max_post_bytes_no_batch() {
|
||||
equal(pq.lastModified, time + 200);
|
||||
});
|
||||
|
||||
// Similar to the above, but we've hit max_records instead of max_bytes.
|
||||
add_task(async function test_max_post_records_no_batch() {
|
||||
|
||||
add_task(async function test_max_record_payload_bytes_no_batch() {
|
||||
let config = {
|
||||
max_post_bytes: 100,
|
||||
max_post_records: 2,
|
||||
max_post_records: Infinity,
|
||||
max_batch_bytes: Infinity,
|
||||
max_batch_records: Infinity,
|
||||
max_record_payload_bytes: 50,
|
||||
}
|
||||
|
||||
const time = 11111111;
|
||||
|
||||
function* responseGenerator() {
|
||||
yield { success: true, status: 200, headers: { "x-weave-timestamp": time + 100, "x-last-modified": time + 100 } };
|
||||
yield { success: true, status: 200, headers: { "x-weave-timestamp": time + 200, "x-last-modified": time + 200 } };
|
||||
}
|
||||
|
||||
let { pq, stats } = makePostQueue(config, time, responseGenerator());
|
||||
await pq.enqueue(makeRecord(20)); // total size now 22 bytes - "[" + record + "]"
|
||||
await pq.enqueue(makeRecord(20)); // total size now 43 bytes - "[" + record + "," + record + "]"
|
||||
await pq.enqueue(makeRecord(20)); // this will exceed our records limit, so will be in the 2nd POST.
|
||||
await pq.enqueue(makeRecord(50)); // total size now 52 bytes - "[" + record + "]"
|
||||
await pq.enqueue(makeRecord(46)); // total size now 99 bytes - "[" + record0 + "," + record1 + "]"
|
||||
|
||||
await pq.flush(true);
|
||||
|
||||
deepEqual(stats.posts, [
|
||||
{
|
||||
nbytes: 43, // 43 for the first post
|
||||
commit: false,
|
||||
nbytes: 99,
|
||||
commit: true, // we know we aren't in a batch, so never commit.
|
||||
batch: "true",
|
||||
headers: [["x-if-unmodified-since", time]],
|
||||
}, {
|
||||
nbytes: 22,
|
||||
commit: false, // we know we aren't in a batch, so never commit.
|
||||
batch: null,
|
||||
headers: [["x-if-unmodified-since", time + 100]],
|
||||
}
|
||||
]);
|
||||
equal(pq.lastModified, time + 200);
|
||||
equal(pq.lastModified, time + 100);
|
||||
});
|
||||
|
||||
// Batch tests.
|
||||
@ -140,6 +136,7 @@ add_task(async function test_single_batch() {
|
||||
max_post_records: 100,
|
||||
max_batch_bytes: 2000,
|
||||
max_batch_records: 200,
|
||||
max_record_payload_bytes: 1000,
|
||||
}
|
||||
const time = 11111111;
|
||||
function* responseGenerator() {
|
||||
@ -170,6 +167,7 @@ add_task(async function test_max_post_bytes_batch() {
|
||||
max_post_records: 4,
|
||||
max_batch_bytes: 5000,
|
||||
max_batch_records: 100,
|
||||
max_record_payload_bytes: 50,
|
||||
}
|
||||
|
||||
const time = 11111111;
|
||||
@ -206,12 +204,13 @@ add_task(async function test_max_post_bytes_batch() {
|
||||
});
|
||||
|
||||
// Test we do the right thing when the batch bytes limit is exceeded.
|
||||
add_task(async function test_max_post_bytes_batch() {
|
||||
add_task(async function test_max_batch_bytes_batch() {
|
||||
let config = {
|
||||
max_post_bytes: 50,
|
||||
max_post_records: 20,
|
||||
max_batch_bytes: 70,
|
||||
max_batch_records: 100,
|
||||
max_record_payload_bytes: 50,
|
||||
}
|
||||
|
||||
const time0 = 11111111;
|
||||
@ -281,6 +280,7 @@ add_task(async function test_max_post_bytes_batch() {
|
||||
max_post_records: 2,
|
||||
max_batch_bytes: 5000,
|
||||
max_batch_records: 100,
|
||||
max_record_payload_bytes: 1000,
|
||||
}
|
||||
|
||||
const time = 11111111;
|
||||
@ -323,6 +323,7 @@ add_task(async function test_huge_record() {
|
||||
max_post_records: 100,
|
||||
max_batch_bytes: 5000,
|
||||
max_batch_records: 100,
|
||||
max_record_payload_bytes: 50,
|
||||
}
|
||||
|
||||
const time = 11111111;
|
||||
@ -373,6 +374,7 @@ add_task(async function test_max_records_batch() {
|
||||
max_post_records: 3,
|
||||
max_batch_bytes: 10000,
|
||||
max_batch_records: 5,
|
||||
max_record_payload_bytes: 1000,
|
||||
}
|
||||
|
||||
const time0 = 11111111;
|
||||
@ -435,3 +437,47 @@ add_task(async function test_max_records_batch() {
|
||||
|
||||
equal(pq.lastModified, time1 + 200);
|
||||
});
|
||||
|
||||
// Test we do the right thing when the batch byte limit is met but not exceeded.
|
||||
add_task(async function test_packed_batch() {
|
||||
let config = {
|
||||
max_post_bytes: 54,
|
||||
max_post_records: 4,
|
||||
max_batch_bytes: 107,
|
||||
max_batch_records: 100,
|
||||
max_record_payload_bytes: 25,
|
||||
}
|
||||
|
||||
const time = 11111111;
|
||||
function* responseGenerator() {
|
||||
yield { success: true, status: 202, obj: { batch: 1234 },
|
||||
headers: { "x-last-modified": time, "x-weave-timestamp": time + 100 },
|
||||
};
|
||||
yield { success: true, status: 202, obj: { batch: 1234 },
|
||||
headers: { "x-last-modified": time + 200, "x-weave-timestamp": time + 200 },
|
||||
};
|
||||
}
|
||||
|
||||
let { pq, stats } = makePostQueue(config, time, responseGenerator());
|
||||
ok((await pq.enqueue(makeRecord(25))).enqueued); // total size now 27 bytes - "[" + record + "]"
|
||||
ok((await pq.enqueue(makeRecord(25))).enqueued); // total size now 53 bytes - "[" + record + "," + record + "]"
|
||||
ok((await pq.enqueue(makeRecord(25))).enqueued); // this will exceed our byte limit, so will be in the 2nd POST.
|
||||
ok((await pq.enqueue(makeRecord(25))).enqueued);
|
||||
await pq.flush(true);
|
||||
|
||||
deepEqual(stats.posts, [
|
||||
{
|
||||
nbytes: 53,
|
||||
commit: false,
|
||||
batch: "true",
|
||||
headers: [["x-if-unmodified-since", time]],
|
||||
}, {
|
||||
nbytes: 53,
|
||||
commit: true,
|
||||
batch: 1234,
|
||||
headers: [["x-if-unmodified-since", time]],
|
||||
}
|
||||
]);
|
||||
|
||||
equal(pq.lastModified, time + 200);
|
||||
});
|
||||
|
@ -1344,73 +1344,6 @@ add_task(async function test_uploadOutgoing_failed() {
|
||||
}
|
||||
});
|
||||
|
||||
/* A couple of "functional" tests to ensure we split records into appropriate
|
||||
POST requests. More comprehensive unit-tests for this "batching" are in
|
||||
test_postqueue.js.
|
||||
*/
|
||||
add_task(async function test_uploadOutgoing_MAX_UPLOAD_RECORDS() {
|
||||
_("SyncEngine._uploadOutgoing uploads in batches of MAX_UPLOAD_RECORDS");
|
||||
|
||||
let collection = new ServerCollection();
|
||||
|
||||
// Let's count how many times the client posts to the server
|
||||
var noOfUploads = 0;
|
||||
collection.post = (function(orig) {
|
||||
return function(data, request) {
|
||||
// This test doesn't arrange for batch semantics - so we expect the
|
||||
// first request to come in with batch=true and the others to have no
|
||||
// batch related headers at all (as the first response did not provide
|
||||
// a batch ID)
|
||||
if (noOfUploads == 0) {
|
||||
do_check_eq(request.queryString, "batch=true");
|
||||
} else {
|
||||
do_check_eq(request.queryString, "");
|
||||
}
|
||||
noOfUploads++;
|
||||
return orig.call(this, data, request);
|
||||
};
|
||||
}(collection.post));
|
||||
|
||||
// Create a bunch of records (and server side handlers)
|
||||
let engine = makeRotaryEngine();
|
||||
for (var i = 0; i < 234; i++) {
|
||||
let id = "record-no-" + i;
|
||||
engine._store.items[id] = "Record No. " + i;
|
||||
engine._tracker.addChangedID(id, 0);
|
||||
collection.insert(id);
|
||||
}
|
||||
|
||||
let meta_global = Service.recordManager.set(engine.metaURL,
|
||||
new WBORecord(engine.metaURL));
|
||||
meta_global.payload.engines = {rotary: {version: engine.version,
|
||||
syncID: engine.syncID}};
|
||||
|
||||
let server = sync_httpd_setup({
|
||||
"/1.1/foo/storage/rotary": collection.handler()
|
||||
});
|
||||
|
||||
await SyncTestingInfrastructure(server);
|
||||
try {
|
||||
|
||||
// Confirm initial environment.
|
||||
do_check_eq(noOfUploads, 0);
|
||||
|
||||
await engine._syncStartup();
|
||||
await engine._uploadOutgoing();
|
||||
|
||||
// Ensure all records have been uploaded.
|
||||
for (i = 0; i < 234; i++) {
|
||||
do_check_true(!!collection.payload("record-no-" + i));
|
||||
}
|
||||
|
||||
// Ensure that the uploads were performed in batches of MAX_UPLOAD_RECORDS.
|
||||
do_check_eq(noOfUploads, Math.ceil(234 / MAX_UPLOAD_RECORDS));
|
||||
|
||||
} finally {
|
||||
await cleanAndGo(engine, server);
|
||||
}
|
||||
});
|
||||
|
||||
async function createRecordFailTelemetry(allowSkippedRecord) {
|
||||
Service.identity.username = "foo";
|
||||
let collection = new ServerCollection();
|
||||
@ -1502,13 +1435,13 @@ add_task(async function test_uploadOutgoing_createRecord_throws_dontAllowSkipRec
|
||||
});
|
||||
|
||||
add_task(async function test_uploadOutgoing_largeRecords() {
|
||||
_("SyncEngine._uploadOutgoing throws on records larger than MAX_UPLOAD_BYTES");
|
||||
_("SyncEngine._uploadOutgoing throws on records larger than the max record payload size");
|
||||
|
||||
let collection = new ServerCollection();
|
||||
|
||||
let engine = makeRotaryEngine();
|
||||
engine.allowSkippedRecord = false;
|
||||
engine._store.items["large-item"] = "Y".repeat(MAX_UPLOAD_BYTES * 2);
|
||||
engine._store.items["large-item"] = "Y".repeat(Service.getMaxRecordPayloadSize() * 2);
|
||||
engine._tracker.addChangedID("large-item", 0);
|
||||
collection.insert("large-item");
|
||||
|
||||
@ -1655,6 +1588,10 @@ add_task(async function test_sync_partialUpload() {
|
||||
let server = sync_httpd_setup({
|
||||
"/1.1/foo/storage/rotary": collection.handler()
|
||||
});
|
||||
let oldServerConfiguration = Service.serverConfiguration;
|
||||
Service.serverConfiguration = {
|
||||
max_post_records: 100
|
||||
};
|
||||
await SyncTestingInfrastructure(server);
|
||||
generateNewKeys(Service.collectionKeys);
|
||||
|
||||
@ -1708,8 +1645,8 @@ add_task(async function test_sync_partialUpload() {
|
||||
let id = "record-no-" + i;
|
||||
// Ensure failed records are back in the tracker:
|
||||
// * records no. 23 and 42 were rejected by the server,
|
||||
// * records no. 200 and higher couldn't be uploaded because we failed
|
||||
// hard on the 3rd upload.
|
||||
// * records after the third batch and higher couldn't be uploaded because
|
||||
// we failed hard on the 3rd upload.
|
||||
if ((i == 23) || (i == 42) || (i >= 200))
|
||||
do_check_eq(engine._tracker.changedIDs[id], i);
|
||||
else
|
||||
@ -1717,6 +1654,7 @@ add_task(async function test_sync_partialUpload() {
|
||||
}
|
||||
|
||||
} finally {
|
||||
Service.serverConfiguration = oldServerConfiguration;
|
||||
await promiseClean(engine, server);
|
||||
}
|
||||
});
|
||||
|
@ -21,7 +21,7 @@ add_task(async function test_getOpenURLs() {
|
||||
_("Test getOpenURLs.");
|
||||
let [engine, store] = await getMocks();
|
||||
|
||||
let superLongURL = "http://" + (new Array(MAX_UPLOAD_BYTES).join("w")) + ".com/";
|
||||
let superLongURL = "http://" + (new Array(URI_LENGTH_MAX).join("w")) + ".com/";
|
||||
let urls = ["http://bar.com", "http://foo.com", "http://foobar.com", superLongURL];
|
||||
function fourURLs() {
|
||||
return urls.pop();
|
||||
|
@ -94,8 +94,9 @@ add_task(async function test_createRecord() {
|
||||
store.getTabState = mockGetTabState;
|
||||
store.shouldSkipWindow = mockShouldSkipWindow;
|
||||
store.getWindowEnumerator = mockGetWindowEnumerator.bind(this, "http://foo.com", 1, 1);
|
||||
|
||||
let numtabs = 2600; // Note: this number is connected to DEFAULT_MAX_RECORD_PAYLOAD_BYTES
|
||||
// This number is sensitive to our hard-coded default max record payload size
|
||||
// in service.js (256 * 1024)
|
||||
let numtabs = 2600;
|
||||
|
||||
store.getWindowEnumerator = mockGetWindowEnumerator.bind(this, "http://foo.com", 1, 1);
|
||||
record = await store.createRecord("fake-guid");
|
||||
|
@ -31,7 +31,7 @@
|
||||
"collection_repair.js": ["getRepairRequestor", "getAllRepairRequestors", "CollectionRepairRequestor", "getRepairResponder", "CollectionRepairResponder"],
|
||||
"collection_validator.js": ["CollectionValidator", "CollectionProblemData"],
|
||||
"Console.jsm": ["console", "ConsoleAPI"],
|
||||
"constants.js": ["WEAVE_VERSION", "SYNC_API_VERSION", "USER_API_VERSION", "MISC_API_VERSION", "STORAGE_VERSION", "PREFS_BRANCH", "PWDMGR_HOST", "PWDMGR_PASSWORD_REALM", "PWDMGR_PASSPHRASE_REALM", "PWDMGR_KEYBUNDLE_REALM", "DEFAULT_KEYBUNDLE_NAME", "HMAC_INPUT", "SYNC_KEY_ENCODED_LENGTH", "SYNC_KEY_DECODED_LENGTH", "SYNC_KEY_HYPHENATED_LENGTH", "NO_SYNC_NODE_INTERVAL", "MAX_ERROR_COUNT_BEFORE_BACKOFF", "MAX_IGNORE_ERROR_COUNT", "MINIMUM_BACKOFF_INTERVAL", "MAXIMUM_BACKOFF_INTERVAL", "HMAC_EVENT_INTERVAL", "MASTER_PASSWORD_LOCKED_RETRY_INTERVAL", "DEFAULT_BLOCK_PERIOD", "DEFAULT_GUID_FETCH_BATCH_SIZE", "DEFAULT_MOBILE_GUID_FETCH_BATCH_SIZE", "DEFAULT_STORE_BATCH_SIZE", "HISTORY_STORE_BATCH_SIZE", "FORMS_STORE_BATCH_SIZE", "PASSWORDS_STORE_BATCH_SIZE", "ADDONS_STORE_BATCH_SIZE", "APPS_STORE_BATCH_SIZE", "DEFAULT_DOWNLOAD_BATCH_SIZE", "DEFAULT_MAX_RECORD_PAYLOAD_BYTES", "SINGLE_USER_THRESHOLD", "MULTI_DEVICE_THRESHOLD", "SCORE_INCREMENT_SMALL", "SCORE_INCREMENT_MEDIUM", "SCORE_INCREMENT_XLARGE", "SCORE_UPDATE_DELAY", "IDLE_OBSERVER_BACK_DELAY", "MAX_UPLOAD_RECORDS", "MAX_UPLOAD_BYTES", "MAX_HISTORY_UPLOAD", "MAX_HISTORY_DOWNLOAD", "NOTIFY_TAB_SENT_TTL_SECS", "STATUS_OK", "SYNC_FAILED", "LOGIN_FAILED", "SYNC_FAILED_PARTIAL", "CLIENT_NOT_CONFIGURED", "STATUS_DISABLED", "MASTER_PASSWORD_LOCKED", "LOGIN_SUCCEEDED", "SYNC_SUCCEEDED", "ENGINE_SUCCEEDED", "LOGIN_FAILED_NO_USERNAME", "LOGIN_FAILED_NO_PASSWORD", "LOGIN_FAILED_NO_PASSPHRASE", "LOGIN_FAILED_NETWORK_ERROR", "LOGIN_FAILED_SERVER_ERROR", "LOGIN_FAILED_INVALID_PASSPHRASE", "LOGIN_FAILED_LOGIN_REJECTED", "METARECORD_DOWNLOAD_FAIL", "VERSION_OUT_OF_DATE", "DESKTOP_VERSION_OUT_OF_DATE", "SETUP_FAILED_NO_PASSPHRASE", "CREDENTIALS_CHANGED", "ABORT_SYNC_COMMAND", "NO_SYNC_NODE_FOUND", "OVER_QUOTA", "PROLONGED_SYNC_FAILURE", "SERVER_MAINTENANCE", "RESPONSE_OVER_QUOTA", "ENGINE_UPLOAD_FAIL", "ENGINE_DOWNLOAD_FAIL", "ENGINE_UNKNOWN_FAIL", "ENGINE_APPLY_FAIL", "ENGINE_METARECORD_DOWNLOAD_FAIL", "ENGINE_METARECORD_UPLOAD_FAIL", "ENGINE_BATCH_INTERRUPTED", "JPAKE_ERROR_CHANNEL", "JPAKE_ERROR_NETWORK", "JPAKE_ERROR_SERVER", "JPAKE_ERROR_TIMEOUT", "JPAKE_ERROR_INTERNAL", "JPAKE_ERROR_INVALID", "JPAKE_ERROR_NODATA", "JPAKE_ERROR_KEYMISMATCH", "JPAKE_ERROR_WRONGMESSAGE", "JPAKE_ERROR_USERABORT", "JPAKE_ERROR_DELAYUNSUPPORTED", "INFO_COLLECTIONS", "INFO_COLLECTION_USAGE", "INFO_COLLECTION_COUNTS", "INFO_QUOTA", "kSyncNotConfigured", "kSyncMasterPasswordLocked", "kSyncWeaveDisabled", "kSyncNetworkOffline", "kSyncBackoffNotMet", "kFirstSyncChoiceNotMade", "FIREFOX_ID", "FENNEC_ID", "SEAMONKEY_ID", "TEST_HARNESS_ID", "MIN_PP_LENGTH", "MIN_PASS_LENGTH", "DEVICE_TYPE_DESKTOP", "DEVICE_TYPE_MOBILE", "SQLITE_MAX_VARIABLE_NUMBER"],
|
||||
"constants.js": ["WEAVE_VERSION", "SYNC_API_VERSION", "USER_API_VERSION", "MISC_API_VERSION", "STORAGE_VERSION", "PREFS_BRANCH", "PWDMGR_HOST", "PWDMGR_PASSWORD_REALM", "PWDMGR_PASSPHRASE_REALM", "PWDMGR_KEYBUNDLE_REALM", "DEFAULT_KEYBUNDLE_NAME", "HMAC_INPUT", "SYNC_KEY_ENCODED_LENGTH", "SYNC_KEY_DECODED_LENGTH", "SYNC_KEY_HYPHENATED_LENGTH", "NO_SYNC_NODE_INTERVAL", "MAX_ERROR_COUNT_BEFORE_BACKOFF", "MAX_IGNORE_ERROR_COUNT", "MINIMUM_BACKOFF_INTERVAL", "MAXIMUM_BACKOFF_INTERVAL", "HMAC_EVENT_INTERVAL", "MASTER_PASSWORD_LOCKED_RETRY_INTERVAL", "DEFAULT_BLOCK_PERIOD", "DEFAULT_GUID_FETCH_BATCH_SIZE", "DEFAULT_MOBILE_GUID_FETCH_BATCH_SIZE", "DEFAULT_STORE_BATCH_SIZE", "HISTORY_STORE_BATCH_SIZE", "FORMS_STORE_BATCH_SIZE", "PASSWORDS_STORE_BATCH_SIZE", "ADDONS_STORE_BATCH_SIZE", "APPS_STORE_BATCH_SIZE", "DEFAULT_DOWNLOAD_BATCH_SIZE", "DEFAULT_MAX_RECORD_PAYLOAD_BYTES", "SINGLE_USER_THRESHOLD", "MULTI_DEVICE_THRESHOLD", "SCORE_INCREMENT_SMALL", "SCORE_INCREMENT_MEDIUM", "SCORE_INCREMENT_XLARGE", "SCORE_UPDATE_DELAY", "IDLE_OBSERVER_BACK_DELAY", "URI_LENGTH_MAX", "MAX_HISTORY_UPLOAD", "MAX_HISTORY_DOWNLOAD", "NOTIFY_TAB_SENT_TTL_SECS", "STATUS_OK", "SYNC_FAILED", "LOGIN_FAILED", "SYNC_FAILED_PARTIAL", "CLIENT_NOT_CONFIGURED", "STATUS_DISABLED", "MASTER_PASSWORD_LOCKED", "LOGIN_SUCCEEDED", "SYNC_SUCCEEDED", "ENGINE_SUCCEEDED", "LOGIN_FAILED_NO_USERNAME", "LOGIN_FAILED_NO_PASSWORD", "LOGIN_FAILED_NO_PASSPHRASE", "LOGIN_FAILED_NETWORK_ERROR", "LOGIN_FAILED_SERVER_ERROR", "LOGIN_FAILED_INVALID_PASSPHRASE", "LOGIN_FAILED_LOGIN_REJECTED", "METARECORD_DOWNLOAD_FAIL", "VERSION_OUT_OF_DATE", "DESKTOP_VERSION_OUT_OF_DATE", "SETUP_FAILED_NO_PASSPHRASE", "CREDENTIALS_CHANGED", "ABORT_SYNC_COMMAND", "NO_SYNC_NODE_FOUND", "OVER_QUOTA", "PROLONGED_SYNC_FAILURE", "SERVER_MAINTENANCE", "RESPONSE_OVER_QUOTA", "ENGINE_UPLOAD_FAIL", "ENGINE_DOWNLOAD_FAIL", "ENGINE_UNKNOWN_FAIL", "ENGINE_APPLY_FAIL", "ENGINE_METARECORD_DOWNLOAD_FAIL", "ENGINE_METARECORD_UPLOAD_FAIL", "ENGINE_BATCH_INTERRUPTED", "JPAKE_ERROR_CHANNEL", "JPAKE_ERROR_NETWORK", "JPAKE_ERROR_SERVER", "JPAKE_ERROR_TIMEOUT", "JPAKE_ERROR_INTERNAL", "JPAKE_ERROR_INVALID", "JPAKE_ERROR_NODATA", "JPAKE_ERROR_KEYMISMATCH", "JPAKE_ERROR_WRONGMESSAGE", "JPAKE_ERROR_USERABORT", "JPAKE_ERROR_DELAYUNSUPPORTED", "INFO_COLLECTIONS", "INFO_COLLECTION_USAGE", "INFO_COLLECTION_COUNTS", "INFO_QUOTA", "kSyncNotConfigured", "kSyncMasterPasswordLocked", "kSyncWeaveDisabled", "kSyncNetworkOffline", "kSyncBackoffNotMet", "kFirstSyncChoiceNotMade", "FIREFOX_ID", "FENNEC_ID", "SEAMONKEY_ID", "TEST_HARNESS_ID", "MIN_PP_LENGTH", "MIN_PASS_LENGTH", "DEVICE_TYPE_DESKTOP", "DEVICE_TYPE_MOBILE", "SQLITE_MAX_VARIABLE_NUMBER"],
|
||||
"Constants.jsm": ["Roles", "Events", "Relations", "Filters", "States", "Prefilters"],
|
||||
"ContactDB.jsm": ["ContactDB", "DB_NAME", "STORE_NAME", "SAVED_GETALL_STORE_NAME", "REVISION_STORE", "DB_VERSION"],
|
||||
"content-server.jsm": ["init"],
|
||||
|
Loading…
x
Reference in New Issue
Block a user