mirror of
https://github.com/mozilla/gecko-dev.git
synced 2024-11-23 04:41:11 +00:00
Bug 1918194 - Part 1: Add background task extras to nsIIncrementalDownload
MAR GETs. r=bytesized,bhearsum,necko-reviewers,application-update-reviewers,csadilek,jesup
This adds extra headers and query parameters to identify MAR GET requests as originating from browsing profiles or background task profiles. This extracts a simple existing function for setting headers and then makes it available to `nsIIncrementalDownload`. The existing function is not the most pleasant API, but it exists and is much easier than working through the details of storing header name-value pairs until they are ready to be used. It's also the same API exposed by Windows BITS, so there's no additional manipulation required to support BITS. Differential Revision: https://phabricator.services.mozilla.com/D221860
This commit is contained in:
parent
8eadf5e644
commit
95f004a551
@ -1410,34 +1410,14 @@ nsresult nsWebBrowserPersist::SaveURIInternal(
|
||||
|
||||
// Headers
|
||||
if (aExtraHeaders) {
|
||||
nsAutoCString oneHeader;
|
||||
nsAutoCString headerName;
|
||||
nsAutoCString headerValue;
|
||||
int32_t crlf = 0;
|
||||
int32_t colon = 0;
|
||||
const char* kWhitespace = "\b\t\r\n ";
|
||||
nsAutoCString extraHeaders(aExtraHeaders);
|
||||
while (true) {
|
||||
crlf = extraHeaders.Find("\r\n");
|
||||
if (crlf == -1) break;
|
||||
extraHeaders.Mid(oneHeader, 0, crlf);
|
||||
extraHeaders.Cut(0, crlf + 2);
|
||||
colon = oneHeader.Find(":");
|
||||
if (colon == -1) break; // Should have a colon
|
||||
oneHeader.Left(headerName, colon);
|
||||
colon++;
|
||||
oneHeader.Mid(headerValue, colon, oneHeader.Length() - colon);
|
||||
headerName.Trim(kWhitespace);
|
||||
headerValue.Trim(kWhitespace);
|
||||
// Add the header (merging if required)
|
||||
rv = httpChannel->SetRequestHeader(headerName, headerValue, true);
|
||||
rv = mozilla::net::AddExtraHeaders(httpChannel,
|
||||
nsDependentCString(aExtraHeaders));
|
||||
if (NS_FAILED(rv)) {
|
||||
EndDownload(NS_ERROR_FAILURE);
|
||||
return NS_ERROR_FAILURE;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return SaveChannelInternal(inputChannel, aFile, aCalcFileExt);
|
||||
}
|
||||
|
||||
|
@ -49,9 +49,10 @@ interface nsIIncrementalDownload : nsIRequest
|
||||
* The amount of time to wait between fetching chunks. Pass a
|
||||
* negative to use the default interval, or 0 to fetch the remaining
|
||||
* part of the file in one chunk.
|
||||
* @param extraHeaders Additional headers to supply with the HTTP request.
|
||||
*/
|
||||
void init(in nsIURI uri, in nsIFile destination, in long chunkSize,
|
||||
in long intervalInSeconds);
|
||||
in long intervalInSeconds, in ACString extraHeaders);
|
||||
|
||||
/**
|
||||
* The URI being fetched.
|
||||
|
@ -149,6 +149,7 @@ class nsIncrementalDownload final : public nsIIncrementalDownload,
|
||||
nsCOMPtr<nsIChannel> mNewRedirectChannel;
|
||||
nsCString mPartialValidator;
|
||||
bool mCacheBust{false};
|
||||
nsCString mExtraHeaders;
|
||||
|
||||
// nsITimerCallback is implemented on a subclass so that the name attribute
|
||||
// doesn't conflict with the name attribute of the nsIRequest interface.
|
||||
@ -248,6 +249,11 @@ nsresult nsIncrementalDownload::ProcessTimeout() {
|
||||
rv = ClearRequestHeader(http);
|
||||
if (NS_FAILED(rv)) return rv;
|
||||
|
||||
if (!mExtraHeaders.IsEmpty()) {
|
||||
rv = AddExtraHeaders(http, mExtraHeaders);
|
||||
if (NS_FAILED(rv)) return rv;
|
||||
}
|
||||
|
||||
// Don't bother making a range request if we are just going to fetch the
|
||||
// entire document.
|
||||
if (mInterval || mCurrentSize != int64_t(0)) {
|
||||
@ -416,7 +422,7 @@ nsIncrementalDownload::SetLoadGroup(nsILoadGroup* loadGroup) {
|
||||
|
||||
NS_IMETHODIMP
|
||||
nsIncrementalDownload::Init(nsIURI* uri, nsIFile* dest, int32_t chunkSize,
|
||||
int32_t interval) {
|
||||
int32_t interval, const nsACString& extraHeaders) {
|
||||
// Keep it simple: only allow initialization once
|
||||
NS_ENSURE_FALSE(mURI, NS_ERROR_ALREADY_INITIALIZED);
|
||||
|
||||
@ -428,6 +434,9 @@ nsIncrementalDownload::Init(nsIURI* uri, nsIFile* dest, int32_t chunkSize,
|
||||
|
||||
if (chunkSize > 0) mChunkSize = chunkSize;
|
||||
if (interval >= 0) mInterval = interval;
|
||||
|
||||
mExtraHeaders = extraHeaders;
|
||||
|
||||
return NS_OK;
|
||||
}
|
||||
|
||||
@ -843,6 +852,11 @@ nsIncrementalDownload::AsyncOnChannelRedirect(
|
||||
nsresult rv = ClearRequestHeader(newHttpChannel);
|
||||
if (NS_FAILED(rv)) return rv;
|
||||
|
||||
if (!mExtraHeaders.IsEmpty()) {
|
||||
rv = AddExtraHeaders(http, mExtraHeaders);
|
||||
if (NS_FAILED(rv)) return rv;
|
||||
}
|
||||
|
||||
// If we didn't have a Range header, then we must be doing a full download.
|
||||
nsAutoCString rangeVal;
|
||||
Unused << http->GetRequestHeader(rangeHdr, rangeVal);
|
||||
|
@ -4126,5 +4126,35 @@ bool IsCoepCredentiallessEnabled(bool aIsOriginTrialCoepCredentiallessEnabled) {
|
||||
aIsOriginTrialCoepCredentiallessEnabled;
|
||||
}
|
||||
|
||||
nsresult AddExtraHeaders(nsIHttpChannel* aHttpChannel,
|
||||
const nsACString& aExtraHeaders,
|
||||
bool aMerge /* = true */) {
|
||||
nsresult rv;
|
||||
nsAutoCString oneHeader;
|
||||
nsAutoCString headerName;
|
||||
nsAutoCString headerValue;
|
||||
int32_t crlf = 0;
|
||||
int32_t colon = 0;
|
||||
const char* kWhitespace = "\b\t\r\n ";
|
||||
nsAutoCString extraHeaders(aExtraHeaders);
|
||||
while (true) {
|
||||
crlf = extraHeaders.Find("\r\n");
|
||||
if (crlf == -1) break;
|
||||
extraHeaders.Mid(oneHeader, 0, crlf);
|
||||
extraHeaders.Cut(0, crlf + 2);
|
||||
colon = oneHeader.Find(":");
|
||||
if (colon == -1) break; // Should have a colon.
|
||||
oneHeader.Left(headerName, colon);
|
||||
colon++;
|
||||
oneHeader.Mid(headerValue, colon, oneHeader.Length() - colon);
|
||||
headerName.Trim(kWhitespace);
|
||||
headerValue.Trim(kWhitespace);
|
||||
// Add the header (merging if required).
|
||||
rv = aHttpChannel->SetRequestHeader(headerName, headerValue, aMerge);
|
||||
NS_ENSURE_SUCCESS(rv, rv);
|
||||
}
|
||||
return NS_OK;
|
||||
}
|
||||
|
||||
} // namespace net
|
||||
} // namespace mozilla
|
||||
|
@ -1188,6 +1188,10 @@ void CheckForBrokenChromeURL(nsILoadInfo* aLoadInfo, nsIURI* aURI);
|
||||
|
||||
bool IsCoepCredentiallessEnabled(bool aIsOriginTrialCoepCredentiallessEnabled);
|
||||
|
||||
void ParseSimpleURISchemes(const nsACString& schemeList);
|
||||
|
||||
nsresult AddExtraHeaders(nsIHttpChannel* aHttpChannel,
|
||||
const nsACString& aExtraHeaders, bool aMerge = true);
|
||||
} // namespace net
|
||||
} // namespace mozilla
|
||||
|
||||
|
@ -6142,6 +6142,63 @@ class Downloader {
|
||||
);
|
||||
}
|
||||
|
||||
/**
|
||||
* Given a patch URL, return a URL possibly modified with extra query
|
||||
* parameters and extra headers. The extras help identify whether this update
|
||||
* is driven by a regular browsing Firefox or by a background update task.
|
||||
*
|
||||
* @param {string} [patchURL] Unmodified patch URL.
|
||||
* @return { url, extraHeaders }
|
||||
*/
|
||||
_maybeWithExtras(patchURL) {
|
||||
let shouldAddExtras = true;
|
||||
if (AppConstants.MOZ_APP_NAME !== "firefox") {
|
||||
shouldAddExtras = false;
|
||||
}
|
||||
if (Services.policies) {
|
||||
let policies = Services.policies.getActivePolicies();
|
||||
if (policies) {
|
||||
if ("AppUpdateURL" in policies) {
|
||||
shouldAddExtras = false;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (!shouldAddExtras) {
|
||||
LOG("Downloader:_maybeWithExtras - Not adding extras");
|
||||
return { url: patchURL, extraHeaders: "\r\n" };
|
||||
}
|
||||
|
||||
LOG("Downloader:_maybeWithExtras - Adding extras");
|
||||
|
||||
let modeStr = lazy.gIsBackgroundTaskMode ? "1" : "0";
|
||||
let extraHeaders = `X-BackgroundTaskMode: ${modeStr}\r\n`;
|
||||
let extraParameters = [["backgroundTaskMode", modeStr]];
|
||||
|
||||
if (lazy.gIsBackgroundTaskMode) {
|
||||
const bts = Cc["@mozilla.org/backgroundtasks;1"].getService(
|
||||
Ci.nsIBackgroundTasks
|
||||
);
|
||||
extraHeaders += `X-BackgroundTaskName: ${bts.backgroundTaskName()}\r\n`;
|
||||
extraParameters.push(["backgroundTaskName", bts.backgroundTaskName()]);
|
||||
}
|
||||
|
||||
extraHeaders += "\r\n";
|
||||
|
||||
let url = patchURL;
|
||||
let parsedUrl = URL.parse(url);
|
||||
if (parsedUrl) {
|
||||
for (let [p, v] of extraParameters) {
|
||||
parsedUrl.searchParams.set(p, v);
|
||||
}
|
||||
url = parsedUrl.href;
|
||||
} else {
|
||||
LOG("Downloader:_maybeWithExtras - Failed to parse patch URL!");
|
||||
}
|
||||
|
||||
return { url, extraHeaders };
|
||||
}
|
||||
|
||||
/**
|
||||
* Download and stage the given update.
|
||||
* @param update
|
||||
@ -6196,6 +6253,12 @@ class Downloader {
|
||||
canUseBits = this._canUseBits(this._patch);
|
||||
}
|
||||
|
||||
// When using Firefox and Mozilla's update server, add extra headers and
|
||||
// extra query parameters identifying whether this request is on behalf of a
|
||||
// regular browsing profile (0) or a background task (1). This helps
|
||||
// understand bandwidth usage of background updates in production.
|
||||
let { url, extraHeaders } = this._maybeWithExtras(this._patch.URL);
|
||||
|
||||
if (!canUseBits) {
|
||||
this._pendingRequest = null;
|
||||
|
||||
@ -6247,18 +6310,25 @@ class Downloader {
|
||||
LOG(
|
||||
"Downloader:downloadUpdate - Starting nsIIncrementalDownload with " +
|
||||
"url: " +
|
||||
this._patch.URL +
|
||||
url +
|
||||
", path: " +
|
||||
patchFile.path +
|
||||
", interval: " +
|
||||
interval
|
||||
);
|
||||
let uri = Services.io.newURI(this._patch.URL);
|
||||
let uri = Services.io.newURI(url);
|
||||
|
||||
this._request = Cc[
|
||||
"@mozilla.org/network/incremental-download;1"
|
||||
].createInstance(Ci.nsIIncrementalDownload);
|
||||
this._request.init(uri, patchFile, DOWNLOAD_CHUNK_SIZE, interval);
|
||||
|
||||
this._request.init(
|
||||
uri,
|
||||
patchFile,
|
||||
DOWNLOAD_CHUNK_SIZE,
|
||||
interval,
|
||||
extraHeaders
|
||||
);
|
||||
this._request.start(this, null);
|
||||
} else {
|
||||
this._bitsActiveNotifications = this.hasDownloadListeners;
|
||||
|
Loading…
Reference in New Issue
Block a user