From 70fd734aff6da5ecc67e1539279c40427c47dd06 Mon Sep 17 00:00:00 2001 From: Andrea Marchesini Date: Fri, 19 Aug 2016 08:38:58 +0200 Subject: [PATCH] Bug 1286895 - Reintroduce a limit on number of dedicated JS web workers in Firefox, r=bkelly --- dom/workers/RuntimeService.cpp | 125 ++++++++++++++++--- dom/workers/RuntimeService.h | 25 +++- dom/workers/test/mochitest.ini | 1 + dom/workers/test/test_bug1241485.html | 82 ++++++++++++ modules/libpref/init/all.js | 7 ++ toolkit/components/telemetry/Histograms.json | 21 ++++ 6 files changed, 235 insertions(+), 26 deletions(-) create mode 100644 dom/workers/test/test_bug1241485.html diff --git a/dom/workers/RuntimeService.cpp b/dom/workers/RuntimeService.cpp index 11d9ce9f649c..98fef09d344e 100644 --- a/dom/workers/RuntimeService.cpp +++ b/dom/workers/RuntimeService.cpp @@ -90,6 +90,15 @@ using mozilla::Preferences; // Half the size of the actual C stack, to be safe. #define WORKER_CONTEXT_NATIVE_STACK_LIMIT 128 * sizeof(size_t) * 1024 +// The maximum number of hardware concurrency, overridable via pref. +#define MAX_HARDWARE_CONCURRENCY 8 + +// The maximum number of threads to use for workers, overridable via pref. +#define MAX_WORKERS_PER_DOMAIN 512 + +static_assert(MAX_WORKERS_PER_DOMAIN >= 1, + "We should allow at least one worker per domain."); + // The default number of seconds that close handlers will be allowed to run for // content workers. #define MAX_SCRIPT_RUN_TIME_SEC 10 @@ -101,6 +110,8 @@ using mozilla::Preferences; #define MAX_IDLE_THREADS 20 #define PREF_WORKERS_PREFIX "dom.workers." +#define PREF_WORKERS_MAX_PER_DOMAIN PREF_WORKERS_PREFIX "maxPerDomain" +#define PREF_WORKERS_MAX_HARDWARE_CONCURRENCY "dom.maxHardwareConcurrency" #define PREF_MAX_SCRIPT_RUN_TIME_CONTENT "dom.max_script_run_time" #define PREF_MAX_SCRIPT_RUN_TIME_CHROME "dom.max_chrome_script_run_time" @@ -137,6 +148,9 @@ namespace { const uint32_t kNoIndex = uint32_t(-1); +uint32_t gMaxWorkersPerDomain = MAX_WORKERS_PER_DOMAIN; +uint32_t gMaxHardwareConcurrency = MAX_HARDWARE_CONCURRENCY; + // Does not hold an owning reference. RuntimeService* gRuntimeService = nullptr; @@ -1357,6 +1371,7 @@ RuntimeService::RegisterWorker(WorkerPrivate* aWorkerPrivate) const bool isServiceWorker = aWorkerPrivate->IsServiceWorker(); const bool isSharedWorker = aWorkerPrivate->IsSharedWorker(); + const bool isDedicatedWorker = aWorkerPrivate->IsDedicatedWorker(); if (isServiceWorker) { AssertIsOnMainThread(); Telemetry::Accumulate(Telemetry::SERVICE_WORKER_SPAWN_ATTEMPTS, 1); @@ -1378,6 +1393,13 @@ RuntimeService::RegisterWorker(WorkerPrivate* aWorkerPrivate) NS_ASSERTION(!sharedWorkerScriptSpec.IsEmpty(), "Empty spec!"); } + bool exemptFromPerDomainMax = false; + if (isServiceWorker) { + AssertIsOnMainThread(); + exemptFromPerDomainMax = Preferences::GetBool("dom.serviceWorkers.exemptFromPerDomainMax", + false); + } + const nsCString& domain = aWorkerPrivate->Domain(); WorkerDomainInfo* domainInfo; @@ -1393,14 +1415,34 @@ RuntimeService::RegisterWorker(WorkerPrivate* aWorkerPrivate) mDomainMap.Put(domain, domainInfo); } - if (parent) { + queued = gMaxWorkersPerDomain && + domainInfo->ActiveWorkerCount() >= gMaxWorkersPerDomain && + !domain.IsEmpty() && + !exemptFromPerDomainMax; + + if (queued) { + domainInfo->mQueuedWorkers.AppendElement(aWorkerPrivate); + + // Worker spawn gets queued due to hitting max workers per domain + // limit so let's log a warning. + WorkerPrivate::ReportErrorToConsole("HittingMaxWorkersPerDomain2"); + + if (isServiceWorker) { + Telemetry::Accumulate(Telemetry::SERVICE_WORKER_SPAWN_GETS_QUEUED, 1); + } else if (isSharedWorker) { + Telemetry::Accumulate(Telemetry::SHARED_WORKER_SPAWN_GETS_QUEUED, 1); + } else if (isDedicatedWorker) { + Telemetry::Accumulate(Telemetry::DEDICATED_WORKER_SPAWN_GETS_QUEUED, 1); + } + } + else if (parent) { domainInfo->mChildWorkerCount++; } else if (isServiceWorker) { - domainInfo->mServiceWorkers.AppendElement(aWorkerPrivate); + domainInfo->mActiveServiceWorkers.AppendElement(aWorkerPrivate); } else { - domainInfo->mWorkers.AppendElement(aWorkerPrivate); + domainInfo->mActiveWorkers.AppendElement(aWorkerPrivate); } if (isSharedWorker) { @@ -1514,26 +1556,50 @@ RuntimeService::UnregisterWorker(WorkerPrivate* aWorkerPrivate) NS_ERROR("Don't have an entry for this domain!"); } - if (parent) { + // Remove old worker from everywhere. + uint32_t index = domainInfo->mQueuedWorkers.IndexOf(aWorkerPrivate); + if (index != kNoIndex) { + // Was queued, remove from the list. + domainInfo->mQueuedWorkers.RemoveElementAt(index); + } + else if (parent) { MOZ_ASSERT(domainInfo->mChildWorkerCount, "Must be non-zero!"); domainInfo->mChildWorkerCount--; } else if (aWorkerPrivate->IsServiceWorker()) { - MOZ_ASSERT(domainInfo->mServiceWorkers.Contains(aWorkerPrivate), + MOZ_ASSERT(domainInfo->mActiveServiceWorkers.Contains(aWorkerPrivate), "Don't know about this worker!"); - domainInfo->mServiceWorkers.RemoveElement(aWorkerPrivate); + domainInfo->mActiveServiceWorkers.RemoveElement(aWorkerPrivate); } else { - MOZ_ASSERT(domainInfo->mWorkers.Contains(aWorkerPrivate), + MOZ_ASSERT(domainInfo->mActiveWorkers.Contains(aWorkerPrivate), "Don't know about this worker!"); - domainInfo->mWorkers.RemoveElement(aWorkerPrivate); + domainInfo->mActiveWorkers.RemoveElement(aWorkerPrivate); } if (aWorkerPrivate->IsSharedWorker()) { RemoveSharedWorker(domainInfo, aWorkerPrivate); } + // See if there's a queued worker we can schedule. + if (domainInfo->ActiveWorkerCount() < gMaxWorkersPerDomain && + !domainInfo->mQueuedWorkers.IsEmpty()) { + queuedWorker = domainInfo->mQueuedWorkers[0]; + domainInfo->mQueuedWorkers.RemoveElementAt(0); + + if (queuedWorker->GetParent()) { + domainInfo->mChildWorkerCount++; + } + else if (queuedWorker->IsServiceWorker()) { + domainInfo->mActiveServiceWorkers.AppendElement(queuedWorker); + } + else { + domainInfo->mActiveWorkers.AppendElement(queuedWorker); + } + } + if (domainInfo->HasNoWorkers()) { + MOZ_ASSERT(domainInfo->mQueuedWorkers.IsEmpty()); mDomainMap.Remove(domain); } } @@ -1814,6 +1880,15 @@ RuntimeService::Init() NS_WARNING("Failed to register timeout cache!"); } + int32_t maxPerDomain = Preferences::GetInt(PREF_WORKERS_MAX_PER_DOMAIN, + MAX_WORKERS_PER_DOMAIN); + gMaxWorkersPerDomain = std::max(0, maxPerDomain); + + int32_t maxHardwareConcurrency = + Preferences::GetInt(PREF_WORKERS_MAX_HARDWARE_CONCURRENCY, + MAX_HARDWARE_CONCURRENCY); + gMaxHardwareConcurrency = std::max(0, maxHardwareConcurrency); + rv = InitOSFileConstants(); if (NS_FAILED(rv)) { return rv; @@ -2005,18 +2080,26 @@ RuntimeService::AddAllTopLevelWorkersToArray(nsTArray& aWorkers) WorkerDomainInfo* aData = iter.UserData(); #ifdef DEBUG - for (uint32_t index = 0; index < aData->mWorkers.Length(); index++) { - MOZ_ASSERT(!aData->mWorkers[index]->GetParent(), + for (uint32_t index = 0; index < aData->mActiveWorkers.Length(); index++) { + MOZ_ASSERT(!aData->mActiveWorkers[index]->GetParent(), "Shouldn't have a parent in this list!"); } - for (uint32_t index = 0; index < aData->mServiceWorkers.Length(); index++) { - MOZ_ASSERT(!aData->mServiceWorkers[index]->GetParent(), + for (uint32_t index = 0; index < aData->mActiveServiceWorkers.Length(); index++) { + MOZ_ASSERT(!aData->mActiveServiceWorkers[index]->GetParent(), "Shouldn't have a parent in this list!"); } #endif - aWorkers.AppendElements(aData->mWorkers); - aWorkers.AppendElements(aData->mServiceWorkers); + aWorkers.AppendElements(aData->mActiveWorkers); + aWorkers.AppendElements(aData->mActiveServiceWorkers); + + // These might not be top-level workers... + for (uint32_t index = 0; index < aData->mQueuedWorkers.Length(); index++) { + WorkerPrivate* worker = aData->mQueuedWorkers[index]; + if (!worker->GetParent()) { + aWorkers.AppendElement(worker); + } + } } } @@ -2041,7 +2124,7 @@ RuntimeService::CancelWorkersForWindow(nsPIDOMWindowInner* aWindow) { AssertIsOnMainThread(); - nsTArray workers; + AutoTArray workers; GetWorkersForWindow(aWindow, workers); if (!workers.IsEmpty()) { @@ -2063,7 +2146,7 @@ RuntimeService::FreezeWorkersForWindow(nsPIDOMWindowInner* aWindow) AssertIsOnMainThread(); MOZ_ASSERT(aWindow); - nsTArray workers; + AutoTArray workers; GetWorkersForWindow(aWindow, workers); for (uint32_t index = 0; index < workers.Length(); index++) { @@ -2077,7 +2160,7 @@ RuntimeService::ThawWorkersForWindow(nsPIDOMWindowInner* aWindow) AssertIsOnMainThread(); MOZ_ASSERT(aWindow); - nsTArray workers; + AutoTArray workers; GetWorkersForWindow(aWindow, workers); for (uint32_t index = 0; index < workers.Length(); index++) { @@ -2091,7 +2174,7 @@ RuntimeService::SuspendWorkersForWindow(nsPIDOMWindowInner* aWindow) AssertIsOnMainThread(); MOZ_ASSERT(aWindow); - nsTArray workers; + AutoTArray workers; GetWorkersForWindow(aWindow, workers); for (uint32_t index = 0; index < workers.Length(); index++) { @@ -2105,7 +2188,7 @@ RuntimeService::ResumeWorkersForWindow(nsPIDOMWindowInner* aWindow) AssertIsOnMainThread(); MOZ_ASSERT(aWindow); - nsTArray workers; + AutoTArray workers; GetWorkersForWindow(aWindow, workers); for (uint32_t index = 0; index < workers.Length(); index++) { @@ -2382,7 +2465,9 @@ RuntimeService::ClampedHardwareConcurrency() const if (numberOfProcessors <= 0) { numberOfProcessors = 1; // Must be one there somewhere } - clampedHardwareConcurrency = numberOfProcessors; + uint32_t clampedValue = std::min(uint32_t(numberOfProcessors), + gMaxHardwareConcurrency); + clampedHardwareConcurrency.compareExchange(0, clampedValue); } return clampedHardwareConcurrency; diff --git a/dom/workers/RuntimeService.h b/dom/workers/RuntimeService.h index e8bfdf6ad500..2de91c810a1b 100644 --- a/dom/workers/RuntimeService.h +++ b/dom/workers/RuntimeService.h @@ -42,21 +42,34 @@ class RuntimeService final : public nsIObserver struct WorkerDomainInfo { nsCString mDomain; - nsTArray mWorkers; - nsTArray mServiceWorkers; + nsTArray mActiveWorkers; + nsTArray mActiveServiceWorkers; + nsTArray mQueuedWorkers; nsClassHashtable mSharedWorkerInfos; uint32_t mChildWorkerCount; WorkerDomainInfo() - : mWorkers(1), mChildWorkerCount(0) + : mActiveWorkers(1), mChildWorkerCount(0) { } + uint32_t + ActiveWorkerCount() const + { + return mActiveWorkers.Length() + + mChildWorkerCount; + } + + uint32_t + ActiveServiceWorkerCount() const + { + return mActiveServiceWorkers.Length(); + } + bool HasNoWorkers() const { - return mWorkers.IsEmpty() && - mServiceWorkers.IsEmpty() && - !mChildWorkerCount; + return ActiveWorkerCount() == 0 && + ActiveServiceWorkerCount() == 0; } }; diff --git a/dom/workers/test/mochitest.ini b/dom/workers/test/mochitest.ini index 57625bc6da3f..d7dc1292c800 100644 --- a/dom/workers/test/mochitest.ini +++ b/dom/workers/test/mochitest.ini @@ -144,6 +144,7 @@ support-files = [test_bug1132395.html] skip-if = true # bug 1176225 [test_bug1132924.html] +[test_bug1241485.html] [test_chromeWorker.html] [test_clearTimeouts.html] [test_close.html] diff --git a/dom/workers/test/test_bug1241485.html b/dom/workers/test/test_bug1241485.html new file mode 100644 index 000000000000..9695fe2e7bec --- /dev/null +++ b/dom/workers/test/test_bug1241485.html @@ -0,0 +1,82 @@ + + + + + + Test for Bug 1241485 + + + + + +Mozilla Bug 1241485 +

+ +
+
+ + diff --git a/modules/libpref/init/all.js b/modules/libpref/init/all.js index 5135954efcb3..0388069b561a 100644 --- a/modules/libpref/init/all.js +++ b/modules/libpref/init/all.js @@ -141,6 +141,10 @@ pref("dom.select_events.enabled", true); // Whether or not Web Workers are enabled. pref("dom.workers.enabled", true); +// The number of workers per domain allowed to run concurrently. +// We're going for effectively infinite, while preventing abuse. +pref("dom.workers.maxPerDomain", 512); + pref("dom.serviceWorkers.enabled", false); // The amount of time (milliseconds) service workers keep running after each event. @@ -5512,6 +5516,9 @@ pref("media.seekToNextFrame.enabled", false); pref("media.seekToNextFrame.enabled", true); #endif +// return the maximum number of cores that navigator.hardwareCurrency returns +pref("dom.maxHardwareConcurrency", 16); + // Shutdown the osfile worker if its no longer needed. #if !defined(RELEASE_BUILD) pref("osfile.reset_worker_delay", 30000); diff --git a/toolkit/components/telemetry/Histograms.json b/toolkit/components/telemetry/Histograms.json index b5ba44ba2400..4e6de4aae8e0 100644 --- a/toolkit/components/telemetry/Histograms.json +++ b/toolkit/components/telemetry/Histograms.json @@ -8478,6 +8478,27 @@ "kind": "count", "description": "Count ServiceWorkers that really did get a thread created for them. File bugs in Core::DOM in case of a Telemetry regression." }, + "SERVICE_WORKER_SPAWN_GETS_QUEUED": { + "alert_emails": ["amarchesini@mozilla.com"], + "bug_numbers": [1286895], + "expires_in_version": "never", + "kind": "count", + "description": "Tracking whether a ServiceWorker spawn gets queued due to hitting max workers per domain limit. File bugs in Core::DOM in case of a Telemetry regression." + }, + "SHARED_WORKER_SPAWN_GETS_QUEUED": { + "alert_emails": ["amarchesini@mozilla.com"], + "bug_numbers": [1286895], + "expires_in_version": "never", + "kind": "count", + "description": "Tracking whether a SharedWorker spawn gets queued due to hitting max workers per domain limit. File bugs in Core::DOM in case of a Telemetry regression." + }, + "DEDICATED_WORKER_SPAWN_GETS_QUEUED": { + "alert_emails": ["amarchesini@mozilla.com"], + "bug_numbers": [1286895], + "expires_in_version": "never", + "kind": "count", + "description": "Tracking whether a DedicatedWorker spawn gets queued due to hitting max workers per domain limit. File bugs in Core::DOM in case of a Telemetry regression." + }, "SERVICE_WORKER_REGISTRATIONS": { "expires_in_version": "50", "kind": "count",