mirror of
https://github.com/mozilla/gecko-dev.git
synced 2024-12-01 00:32:11 +00:00
Bug 1752387: Don't use a timer for recording ServiceWorker running telemetry r=chutten
Differential Revision: https://phabricator.services.mozilla.com/D137600
This commit is contained in:
parent
dda478593e
commit
c5706e1941
@ -432,8 +432,6 @@ NS_IMPL_RELEASE(ServiceWorkerManager)
|
||||
NS_INTERFACE_MAP_BEGIN(ServiceWorkerManager)
|
||||
NS_INTERFACE_MAP_ENTRY(nsIServiceWorkerManager)
|
||||
NS_INTERFACE_MAP_ENTRY(nsIObserver)
|
||||
NS_INTERFACE_MAP_ENTRY(nsITimerCallback)
|
||||
NS_INTERFACE_MAP_ENTRY(nsINamed)
|
||||
NS_INTERFACE_MAP_ENTRY_AMBIGUOUS(nsISupports, nsIServiceWorkerManager)
|
||||
NS_INTERFACE_MAP_END
|
||||
|
||||
@ -443,9 +441,6 @@ ServiceWorkerManager::ServiceWorkerManager()
|
||||
ServiceWorkerManager::~ServiceWorkerManager() {
|
||||
// The map will assert if it is not empty when destroyed.
|
||||
mRegistrationInfos.Clear();
|
||||
if (mTelemetryTimer) {
|
||||
mTelemetryTimer->Cancel();
|
||||
}
|
||||
|
||||
// This can happen if the browser is started up in ProfileManager mode, in
|
||||
// which case XPCOM will startup and shutdown, but there won't be any
|
||||
@ -510,22 +505,45 @@ void ServiceWorkerManager::Init(ServiceWorkerRegistrar* aRegistrar) {
|
||||
}
|
||||
|
||||
mActor = static_cast<ServiceWorkerManagerChild*>(actor);
|
||||
constexpr uint32_t period_ms = 10 * 1000;
|
||||
|
||||
NS_NewTimerWithCallback(getter_AddRefs(mTelemetryTimer), this, period_ms,
|
||||
nsITimer::TYPE_REPEATING_SLACK);
|
||||
mTelemetryLastChange = TimeStamp::Now();
|
||||
}
|
||||
|
||||
NS_IMETHODIMP
|
||||
ServiceWorkerManager::Notify(nsITimer* aTimer) {
|
||||
ServiceWorkerPrivateImpl::ReportRunning();
|
||||
return NS_OK;
|
||||
}
|
||||
void ServiceWorkerManager::RecordTelemetry(uint32_t aNumber, uint32_t aFetch) {
|
||||
// Submit N value pairs to Telemetry for the time we were at those values
|
||||
auto now = TimeStamp::Now();
|
||||
// round down, with a minimum of 1 repeat. In theory this gives
|
||||
// inaccuracy if there are frequent changes, but that's uncommon.
|
||||
uint32_t repeats = (uint32_t)((now - mTelemetryLastChange).ToMilliseconds()) /
|
||||
mTelemetryPeriodMs;
|
||||
mTelemetryLastChange = now;
|
||||
if (repeats == 0) {
|
||||
repeats = 1;
|
||||
}
|
||||
nsCOMPtr<nsIRunnable> runnable = NS_NewRunnableFunction(
|
||||
"ServiceWorkerTelemetryRunnable", [aNumber, aFetch, repeats]() {
|
||||
LOG(("ServiceWorkers running: %u samples of %u/%u", repeats, aNumber,
|
||||
aFetch));
|
||||
// Don't allocate infinitely huge arrays if someone visits a SW site
|
||||
// after a few months running. 1 month is about 500K repeats @ 5s
|
||||
// sampling
|
||||
uint32_t num_repeats = std::min(repeats, 1000000U); // 4MB max
|
||||
nsTArray<uint32_t> values;
|
||||
|
||||
// nsINamed implementation
|
||||
NS_IMETHODIMP ServiceWorkerManager::GetName(nsACString& aNameOut) {
|
||||
aNameOut.AssignLiteral("ServiceWorkerManager");
|
||||
return NS_OK;
|
||||
uint32_t* array = values.AppendElements(num_repeats);
|
||||
for (uint32_t i = 0; i < num_repeats; i++) {
|
||||
array[i] = aNumber;
|
||||
}
|
||||
Telemetry::Accumulate(Telemetry::SERVICE_WORKER_RUNNING, "All"_ns,
|
||||
values);
|
||||
|
||||
for (uint32_t i = 0; i < num_repeats; i++) {
|
||||
array[i] = aFetch;
|
||||
}
|
||||
Telemetry::Accumulate(Telemetry::SERVICE_WORKER_RUNNING, "Fetch"_ns,
|
||||
values);
|
||||
});
|
||||
NS_DispatchBackgroundTask(runnable.forget(), nsIEventTarget::DISPATCH_NORMAL);
|
||||
}
|
||||
|
||||
RefPtr<GenericErrorResultPromise> ServiceWorkerManager::StartControllingClient(
|
||||
@ -700,7 +718,9 @@ void ServiceWorkerManager::MaybeFinishShutdown() {
|
||||
nsresult rv = NS_DispatchToMainThread(runnable);
|
||||
Unused << NS_WARN_IF(NS_FAILED(rv));
|
||||
mActor = nullptr;
|
||||
ServiceWorkerPrivateImpl::CheckRunningShutdown();
|
||||
|
||||
// This also submits final telemetry
|
||||
ServiceWorkerPrivateImpl::RunningShutdown();
|
||||
}
|
||||
|
||||
class ServiceWorkerResolveWindowPromiseOnRegisterCallback final
|
||||
|
@ -101,8 +101,6 @@ class ServiceWorkerUpdateFinishCallback {
|
||||
* }
|
||||
*/
|
||||
class ServiceWorkerManager final : public nsIServiceWorkerManager,
|
||||
public nsITimerCallback,
|
||||
public nsINamed,
|
||||
public nsIObserver {
|
||||
friend class GetRegistrationsRunnable;
|
||||
friend class GetRegistrationRunnable;
|
||||
@ -117,8 +115,6 @@ class ServiceWorkerManager final : public nsIServiceWorkerManager,
|
||||
NS_DECL_ISUPPORTS
|
||||
NS_DECL_NSISERVICEWORKERMANAGER
|
||||
NS_DECL_NSIOBSERVER
|
||||
NS_DECL_NSITIMERCALLBACK
|
||||
NS_DECL_NSINAMED
|
||||
|
||||
// Return true if the given principal and URI matches a registered service
|
||||
// worker which handles fetch event.
|
||||
@ -272,6 +268,15 @@ class ServiceWorkerManager final : public nsIServiceWorkerManager,
|
||||
uint32_t aShutdownStateId,
|
||||
ServiceWorkerShutdownState::Progress aProgress) const;
|
||||
|
||||
// Record periodic telemetry on number of running ServiceWorkers. When
|
||||
// the number of running ServiceWorkers changes (or on shutdown),
|
||||
// ServiceWorkerPrivateImpl will call RecordTelemetry with the number of
|
||||
// running serviceworkers and those supporting Fetch. We use
|
||||
// mTelemetryLastChange to determine how many datapoints to inject into
|
||||
// Telemetry, and dispatch a background runnable to call
|
||||
// RecordTelemetryGap() and Accumulate them.
|
||||
void RecordTelemetry(uint32_t aNumber, uint32_t aFetch);
|
||||
|
||||
private:
|
||||
struct RegistrationDataPerPrincipal;
|
||||
|
||||
@ -422,7 +427,8 @@ class ServiceWorkerManager final : public nsIServiceWorkerManager,
|
||||
|
||||
nsTArray<UniquePtr<PendingReadyData>> mPendingReadyList;
|
||||
|
||||
nsCOMPtr<nsITimer> mTelemetryTimer;
|
||||
const uint32_t mTelemetryPeriodMs = 5 * 1000;
|
||||
TimeStamp mTelemetryLastChange;
|
||||
};
|
||||
|
||||
} // namespace dom
|
||||
|
@ -76,17 +76,6 @@ uint32_t ServiceWorkerPrivateImpl::sRunningServiceWorkersFetch = 0;
|
||||
uint32_t ServiceWorkerPrivateImpl::sRunningServiceWorkersMax = 0;
|
||||
uint32_t ServiceWorkerPrivateImpl::sRunningServiceWorkersFetchMax = 0;
|
||||
|
||||
/*static*/ void ServiceWorkerPrivateImpl::ReportRunning() {
|
||||
if (sRunningServiceWorkers > 0) {
|
||||
LOG(("ServiceWorkers running %d (%d Fetch)", sRunningServiceWorkers,
|
||||
sRunningServiceWorkersFetch));
|
||||
}
|
||||
Telemetry::Accumulate(Telemetry::SERVICE_WORKER_RUNNING, "All"_ns,
|
||||
sRunningServiceWorkers);
|
||||
Telemetry::Accumulate(Telemetry::SERVICE_WORKER_RUNNING, "Fetch"_ns,
|
||||
sRunningServiceWorkersFetch);
|
||||
}
|
||||
|
||||
ServiceWorkerPrivateImpl::RAIIActorPtrHolder::RAIIActorPtrHolder(
|
||||
already_AddRefed<RemoteWorkerControllerChild> aActor)
|
||||
: mActor(aActor) {
|
||||
@ -271,8 +260,13 @@ nsresult ServiceWorkerPrivateImpl::Initialize() {
|
||||
return NS_OK;
|
||||
}
|
||||
|
||||
/* static */
|
||||
void ServiceWorkerPrivateImpl::UpdateRunning(int32_t aDelta,
|
||||
int32_t aFetchDelta) {
|
||||
// Record values for time we were running at the current values
|
||||
RefPtr<ServiceWorkerManager> manager(ServiceWorkerManager::GetInstance());
|
||||
manager->RecordTelemetry(sRunningServiceWorkers, sRunningServiceWorkersFetch);
|
||||
|
||||
MOZ_ASSERT(((int64_t)sRunningServiceWorkers) + aDelta >= 0);
|
||||
sRunningServiceWorkers += aDelta;
|
||||
if (sRunningServiceWorkers > sRunningServiceWorkersMax) {
|
||||
|
@ -53,23 +53,23 @@ class ServiceWorkerPrivateImpl final : public ServiceWorkerPrivate::Inner,
|
||||
|
||||
RefPtr<GenericPromise> SetSkipWaitingFlag();
|
||||
|
||||
static void ReportRunning();
|
||||
|
||||
static void CheckRunningShutdown() {
|
||||
static void RunningShutdown() {
|
||||
// Force a final update of the number of running ServiceWorkers
|
||||
UpdateRunning(0, 0);
|
||||
MOZ_ASSERT(sRunningServiceWorkers == 0);
|
||||
MOZ_ASSERT(sRunningServiceWorkersFetch == 0);
|
||||
}
|
||||
|
||||
/**
|
||||
* Update Telemetry for # of running ServiceWorkers
|
||||
*/
|
||||
static void UpdateRunning(int32_t aDelta, int32_t aFetchDelta);
|
||||
|
||||
private:
|
||||
class RAIIActorPtrHolder;
|
||||
|
||||
~ServiceWorkerPrivateImpl();
|
||||
|
||||
/**
|
||||
* Update Telemetry for # of running ServiceWorkers
|
||||
*/
|
||||
void UpdateRunning(int32_t aDelta, int32_t aFetchDelta);
|
||||
|
||||
/**
|
||||
* ServiceWorkerPrivate::Inner
|
||||
*/
|
||||
|
@ -297,7 +297,40 @@ async function makeFileBlob(blobContents) {
|
||||
return fileBlob;
|
||||
}
|
||||
|
||||
function getSWTelemetrySums() {
|
||||
let telemetry = Cc["@mozilla.org/base/telemetry;1"].getService(
|
||||
Ci.nsITelemetry
|
||||
);
|
||||
let keyedhistograms = telemetry.getSnapshotForKeyedHistograms("main", false)
|
||||
.parent;
|
||||
let keyedscalars = telemetry.getSnapshotForKeyedScalars("main", false).parent;
|
||||
// We're not looking at the distribution of the histograms, just that they changed
|
||||
return {
|
||||
SERVICE_WORKER_RUNNING_All: keyedhistograms.SERVICE_WORKER_RUNNING
|
||||
? keyedhistograms.SERVICE_WORKER_RUNNING.All.sum
|
||||
: 0,
|
||||
SERVICE_WORKER_RUNNING_Fetch: keyedhistograms.SERVICE_WORKER_RUNNING
|
||||
? keyedhistograms.SERVICE_WORKER_RUNNING.Fetch.sum
|
||||
: 0,
|
||||
SERVICEWORKER_RUNNING_MAX_All: keyedscalars["serviceworker.running_max"]
|
||||
? keyedscalars["serviceworker.running_max"].All
|
||||
: 0,
|
||||
SERVICEWORKER_RUNNING_MAX_Fetch: keyedscalars["serviceworker.running_max"]
|
||||
? keyedscalars["serviceworker.running_max"].Fetch
|
||||
: 0,
|
||||
};
|
||||
}
|
||||
|
||||
add_task(async function test() {
|
||||
// Can't test telemetry without this since we may not be on the nightly channel
|
||||
let oldCanRecord = Services.telemetry.canRecordExtended;
|
||||
Services.telemetry.canRecordExtended = true;
|
||||
registerCleanupFunction(() => {
|
||||
Services.telemetry.canRecordExtended = oldCanRecord;
|
||||
});
|
||||
|
||||
let initialSums = getSWTelemetrySums();
|
||||
|
||||
// ## Isolated Privileged Process
|
||||
// Trigger a straightforward intercepted navigation with no request body that
|
||||
// returns a synthetic response.
|
||||
@ -328,4 +361,43 @@ add_task(async function test() {
|
||||
await do_test_sw(isolateUrl, isolateRemoteType, "synthetic", null);
|
||||
await do_test_sw(isolateUrl, isolateRemoteType, "synthetic", fileBlob);
|
||||
}
|
||||
let telemetrySums = getSWTelemetrySums();
|
||||
info(JSON.stringify(telemetrySums));
|
||||
info(
|
||||
"Initial Running All: " +
|
||||
initialSums.SERVICE_WORKER_RUNNING_All +
|
||||
", Fetch: " +
|
||||
initialSums.SERVICE_WORKER_RUNNING_Fetch
|
||||
);
|
||||
info(
|
||||
"Initial Max Running All: " +
|
||||
initialSums.SERVICEWORKER_RUNNING_MAX_All +
|
||||
", Fetch: " +
|
||||
initialSums.SERVICEWORKER_RUNNING_MAX_Fetch
|
||||
);
|
||||
info(
|
||||
"Running All: " +
|
||||
telemetrySums.SERVICE_WORKER_RUNNING_All +
|
||||
", Fetch: " +
|
||||
telemetrySums.SERVICE_WORKER_RUNNING_Fetch
|
||||
);
|
||||
info(
|
||||
"Max Running All: " +
|
||||
telemetrySums.SERVICEWORKER_RUNNING_MAX_All +
|
||||
", Fetch: " +
|
||||
telemetrySums.SERVICEWORKER_RUNNING_MAX_Fetch
|
||||
);
|
||||
ok(
|
||||
telemetrySums.SERVICE_WORKER_RUNNING_All >
|
||||
initialSums.SERVICE_WORKER_RUNNING_All,
|
||||
"ServiceWorker running count changed"
|
||||
);
|
||||
ok(
|
||||
telemetrySums.SERVICE_WORKER_RUNNING_Fetch >
|
||||
initialSums.SERVICE_WORKER_RUNNING_Fetch,
|
||||
"ServiceWorker running count changed"
|
||||
);
|
||||
// We don't use ok()'s for MAX because MAX may have been set before we
|
||||
// set canRecordExtended, and if so we won't record a new value unless
|
||||
// the max increases again.
|
||||
});
|
||||
|
@ -14018,18 +14018,7 @@
|
||||
"keys": ["All", "Fetch"],
|
||||
"bug_numbers": [1740335],
|
||||
"alert_emails": ["sw-telemetry@mozilla.com", "rjesup@mozilla.com", "echuang@mozilla.com"],
|
||||
"description": "The sampled number of simultaneous running ServiceWorkers in a session, sampled once each minute"
|
||||
},
|
||||
"SERVICE_WORKER_FETCH_RUNNING": {
|
||||
"record_in_processes": ["main"],
|
||||
"products": ["firefox", "fennec"],
|
||||
"expires_in_version": "102",
|
||||
"kind": "linear",
|
||||
"high": 31,
|
||||
"n_buckets": 30,
|
||||
"bug_numbers": [1740335],
|
||||
"alert_emails": ["sw-telemetry@mozilla.com", "rjesup@mozilla.com", "echuang@mozilla.com"],
|
||||
"description": "The sampled number of simultaneous running ServiceWorkers handling Fetch in a session, sampled every minute"
|
||||
"description": "The sampled number of simultaneous running ServiceWorkers in a session, sampled once each 10 seconds (lazily on state change)"
|
||||
},
|
||||
"GRAPHICS_SANITY_TEST": {
|
||||
"record_in_processes": ["main", "content"],
|
||||
|
Loading…
Reference in New Issue
Block a user