Bug 1640791 - Expose allowSkippedRecord to a bridged engine and enable it for extension-storage. r=lina

Differential Revision: https://phabricator.services.mozilla.com/D76785
This commit is contained in:
Mark Hammond 2020-05-27 02:12:53 +00:00
parent 858e3093fb
commit 35a92088a5
3 changed files with 39 additions and 6 deletions

View File

@ -44,6 +44,23 @@ interface mozIBridgedSyncEngine : nsISupports {
// collection on the server, and upload our data as if on a first sync.
readonly attribute long storageVersion;
// Whether this engine tolerates skipped records, where a "skipped" record
// is one that would cause the server's published limits to be exceeded
// (for example, a single record where the payload is larger than the
// server accepts.)
// If this returns true, we will just skip the record without even attempting
// to upload. If this is false, we'll abort the entire batch.
// If the engine allows this, it will need to detect this scenario by noticing
// the ID is not in the 'success' records reported to `setUploaded`.
// (Note that this is not to be confused with the fact server's can currently
// reject records as part of a POST - but we hope to remove this ability from
// the server API. Note also that this is not bullet-proof - if the count of
// records is high, it's possible that we will have committed a previous
// batch before we hit the relevant limits, so things might have been written.
// We hope to fix this by ensuring batch limits are such that this is
// impossible)
readonly attribute boolean allowSkippedRecord;
// Wires up the Sync logging machinery to the bridged engine. This can be
// `null`, in which case any logs from the engine will be discarded.
attribute mozIServicesLogger logger;

View File

@ -249,13 +249,21 @@ BridgedEngine.prototype = {
return this._bridge.storageVersion;
},
// Legacy engines allow sync to proceed if some records fail to upload. Since
// we've supported batch uploads on our server for a while, and we want to
// make them stricter (for example, failing the entire batch if a record can't
// be stored, instead of returning its ID in the `failed` response field), we
// require all bridged engines to opt out of partial syncs.
// Legacy engines allow sync to proceed if some records are too large to
// upload (eg, a payload that's bigger than the server's published limits).
// If this returns true, we will just skip the record without even attempting
// to upload. If this is false, we'll abort the entire batch.
// If the engine allows this, it will need to detect this scenario by noticing
// the ID is not in the 'success' records reported to `setUploaded`.
// (Note that this is not to be confused with the fact server's can currently
// reject records as part of a POST - but we hope to remove this ability from
// the server API. Note also that this is not bullet-proof - if the count of
// records is high, it's possible that we will have committed a previous
// batch before we hit the relevant limits, so things might have been written.
// We hope to fix this by ensuring batch limits are such that this is
// impossible)
get allowSkippedRecord() {
return false;
return this._bridge.allowSkippedRecord;
},
/**

View File

@ -298,6 +298,14 @@ impl StorageSyncArea {
Ok(STORAGE_VERSION.try_into().unwrap())
}
// It's possible that migration, or even merging, will result in records
// too large for the server. We tolerate that (and hope that the addons do
// too :)
xpcom_method!(get_allow_skipped_record => GetAllowSkippedRecord() -> bool);
fn get_allow_skipped_record(&self) -> Result<bool> {
Ok(true)
}
xpcom_method!(
get_last_sync => GetLastSync(
callback: *const mozIBridgedSyncEngineCallback