mirror of
https://github.com/mozilla/gecko-dev.git
synced 2024-11-25 05:41:12 +00:00
Bug 1280241 - Remove the limit of number of workers per domain, r=khuey
This commit is contained in:
parent
5ef86607af
commit
da33656040
@ -94,12 +94,6 @@ using mozilla::Preferences;
|
||||
// Half the size of the actual C stack, to be safe.
|
||||
#define WORKER_CONTEXT_NATIVE_STACK_LIMIT 128 * sizeof(size_t) * 1024
|
||||
|
||||
// The maximum number of threads to use for workers, overridable via pref.
|
||||
#define MAX_WORKERS_PER_DOMAIN 10
|
||||
|
||||
static_assert(MAX_WORKERS_PER_DOMAIN >= 1,
|
||||
"We should allow at least one worker per domain.");
|
||||
|
||||
// The default number of seconds that close handlers will be allowed to run for
|
||||
// content workers.
|
||||
#define MAX_SCRIPT_RUN_TIME_SEC 10
|
||||
@ -111,7 +105,6 @@ static_assert(MAX_WORKERS_PER_DOMAIN >= 1,
|
||||
#define MAX_IDLE_THREADS 20
|
||||
|
||||
#define PREF_WORKERS_PREFIX "dom.workers."
|
||||
#define PREF_WORKERS_MAX_PER_DOMAIN PREF_WORKERS_PREFIX "maxPerDomain"
|
||||
|
||||
#define PREF_MAX_SCRIPT_RUN_TIME_CONTENT "dom.max_script_run_time"
|
||||
#define PREF_MAX_SCRIPT_RUN_TIME_CHROME "dom.max_chrome_script_run_time"
|
||||
@ -148,8 +141,6 @@ namespace {
|
||||
|
||||
const uint32_t kNoIndex = uint32_t(-1);
|
||||
|
||||
uint32_t gMaxWorkersPerDomain = MAX_WORKERS_PER_DOMAIN;
|
||||
|
||||
// Does not hold an owning reference.
|
||||
RuntimeService* gRuntimeService = nullptr;
|
||||
|
||||
@ -1417,13 +1408,6 @@ RuntimeService::RegisterWorker(WorkerPrivate* aWorkerPrivate)
|
||||
NS_ASSERTION(!sharedWorkerScriptSpec.IsEmpty(), "Empty spec!");
|
||||
}
|
||||
|
||||
bool exemptFromPerDomainMax = false;
|
||||
if (isServiceWorker) {
|
||||
AssertIsOnMainThread();
|
||||
exemptFromPerDomainMax = Preferences::GetBool("dom.serviceWorkers.exemptFromPerDomainMax",
|
||||
false);
|
||||
}
|
||||
|
||||
const nsCString& domain = aWorkerPrivate->Domain();
|
||||
|
||||
WorkerDomainInfo* domainInfo;
|
||||
@ -1439,31 +1423,14 @@ RuntimeService::RegisterWorker(WorkerPrivate* aWorkerPrivate)
|
||||
mDomainMap.Put(domain, domainInfo);
|
||||
}
|
||||
|
||||
queued = gMaxWorkersPerDomain &&
|
||||
domainInfo->ActiveWorkerCount() >= gMaxWorkersPerDomain &&
|
||||
!domain.IsEmpty() &&
|
||||
!exemptFromPerDomainMax;
|
||||
|
||||
if (queued) {
|
||||
domainInfo->mQueuedWorkers.AppendElement(aWorkerPrivate);
|
||||
|
||||
// Worker spawn gets queued due to hitting max workers per domain
|
||||
// limit so let's log a warning.
|
||||
WorkerPrivate::ReportErrorToConsole("HittingMaxWorkersPerDomain2");
|
||||
|
||||
if (isServiceWorker || isSharedWorker) {
|
||||
Telemetry::Accumulate(isSharedWorker ? Telemetry::SHARED_WORKER_SPAWN_GETS_QUEUED
|
||||
: Telemetry::SERVICE_WORKER_SPAWN_GETS_QUEUED, 1);
|
||||
}
|
||||
}
|
||||
else if (parent) {
|
||||
if (parent) {
|
||||
domainInfo->mChildWorkerCount++;
|
||||
}
|
||||
else if (isServiceWorker) {
|
||||
domainInfo->mActiveServiceWorkers.AppendElement(aWorkerPrivate);
|
||||
domainInfo->mServiceWorkers.AppendElement(aWorkerPrivate);
|
||||
}
|
||||
else {
|
||||
domainInfo->mActiveWorkers.AppendElement(aWorkerPrivate);
|
||||
domainInfo->mWorkers.AppendElement(aWorkerPrivate);
|
||||
}
|
||||
|
||||
if (isSharedWorker) {
|
||||
@ -1577,50 +1544,26 @@ RuntimeService::UnregisterWorker(WorkerPrivate* aWorkerPrivate)
|
||||
NS_ERROR("Don't have an entry for this domain!");
|
||||
}
|
||||
|
||||
// Remove old worker from everywhere.
|
||||
uint32_t index = domainInfo->mQueuedWorkers.IndexOf(aWorkerPrivate);
|
||||
if (index != kNoIndex) {
|
||||
// Was queued, remove from the list.
|
||||
domainInfo->mQueuedWorkers.RemoveElementAt(index);
|
||||
}
|
||||
else if (parent) {
|
||||
if (parent) {
|
||||
MOZ_ASSERT(domainInfo->mChildWorkerCount, "Must be non-zero!");
|
||||
domainInfo->mChildWorkerCount--;
|
||||
}
|
||||
else if (aWorkerPrivate->IsServiceWorker()) {
|
||||
MOZ_ASSERT(domainInfo->mActiveServiceWorkers.Contains(aWorkerPrivate),
|
||||
MOZ_ASSERT(domainInfo->mServiceWorkers.Contains(aWorkerPrivate),
|
||||
"Don't know about this worker!");
|
||||
domainInfo->mActiveServiceWorkers.RemoveElement(aWorkerPrivate);
|
||||
domainInfo->mServiceWorkers.RemoveElement(aWorkerPrivate);
|
||||
}
|
||||
else {
|
||||
MOZ_ASSERT(domainInfo->mActiveWorkers.Contains(aWorkerPrivate),
|
||||
MOZ_ASSERT(domainInfo->mWorkers.Contains(aWorkerPrivate),
|
||||
"Don't know about this worker!");
|
||||
domainInfo->mActiveWorkers.RemoveElement(aWorkerPrivate);
|
||||
domainInfo->mWorkers.RemoveElement(aWorkerPrivate);
|
||||
}
|
||||
|
||||
if (aWorkerPrivate->IsSharedWorker()) {
|
||||
RemoveSharedWorker(domainInfo, aWorkerPrivate);
|
||||
}
|
||||
|
||||
// See if there's a queued worker we can schedule.
|
||||
if (domainInfo->ActiveWorkerCount() < gMaxWorkersPerDomain &&
|
||||
!domainInfo->mQueuedWorkers.IsEmpty()) {
|
||||
queuedWorker = domainInfo->mQueuedWorkers[0];
|
||||
domainInfo->mQueuedWorkers.RemoveElementAt(0);
|
||||
|
||||
if (queuedWorker->GetParent()) {
|
||||
domainInfo->mChildWorkerCount++;
|
||||
}
|
||||
else if (queuedWorker->IsServiceWorker()) {
|
||||
domainInfo->mActiveServiceWorkers.AppendElement(queuedWorker);
|
||||
}
|
||||
else {
|
||||
domainInfo->mActiveWorkers.AppendElement(queuedWorker);
|
||||
}
|
||||
}
|
||||
|
||||
if (domainInfo->HasNoWorkers()) {
|
||||
MOZ_ASSERT(domainInfo->mQueuedWorkers.IsEmpty());
|
||||
mDomainMap.Remove(domain);
|
||||
}
|
||||
}
|
||||
@ -1901,10 +1844,6 @@ RuntimeService::Init()
|
||||
NS_WARNING("Failed to register timeout cache!");
|
||||
}
|
||||
|
||||
int32_t maxPerDomain = Preferences::GetInt(PREF_WORKERS_MAX_PER_DOMAIN,
|
||||
MAX_WORKERS_PER_DOMAIN);
|
||||
gMaxWorkersPerDomain = std::max(0, maxPerDomain);
|
||||
|
||||
rv = InitOSFileConstants();
|
||||
if (NS_FAILED(rv)) {
|
||||
return rv;
|
||||
@ -2096,26 +2035,18 @@ RuntimeService::AddAllTopLevelWorkersToArray(nsTArray<WorkerPrivate*>& aWorkers)
|
||||
WorkerDomainInfo* aData = iter.UserData();
|
||||
|
||||
#ifdef DEBUG
|
||||
for (uint32_t index = 0; index < aData->mActiveWorkers.Length(); index++) {
|
||||
MOZ_ASSERT(!aData->mActiveWorkers[index]->GetParent(),
|
||||
for (uint32_t index = 0; index < aData->mWorkers.Length(); index++) {
|
||||
MOZ_ASSERT(!aData->mWorkers[index]->GetParent(),
|
||||
"Shouldn't have a parent in this list!");
|
||||
}
|
||||
for (uint32_t index = 0; index < aData->mActiveServiceWorkers.Length(); index++) {
|
||||
MOZ_ASSERT(!aData->mActiveServiceWorkers[index]->GetParent(),
|
||||
for (uint32_t index = 0; index < aData->mServiceWorkers.Length(); index++) {
|
||||
MOZ_ASSERT(!aData->mServiceWorkers[index]->GetParent(),
|
||||
"Shouldn't have a parent in this list!");
|
||||
}
|
||||
#endif
|
||||
|
||||
aWorkers.AppendElements(aData->mActiveWorkers);
|
||||
aWorkers.AppendElements(aData->mActiveServiceWorkers);
|
||||
|
||||
// These might not be top-level workers...
|
||||
for (uint32_t index = 0; index < aData->mQueuedWorkers.Length(); index++) {
|
||||
WorkerPrivate* worker = aData->mQueuedWorkers[index];
|
||||
if (!worker->GetParent()) {
|
||||
aWorkers.AppendElement(worker);
|
||||
}
|
||||
}
|
||||
aWorkers.AppendElements(aData->mWorkers);
|
||||
aWorkers.AppendElements(aData->mServiceWorkers);
|
||||
}
|
||||
}
|
||||
|
||||
@ -2140,7 +2071,7 @@ RuntimeService::CancelWorkersForWindow(nsPIDOMWindowInner* aWindow)
|
||||
{
|
||||
AssertIsOnMainThread();
|
||||
|
||||
AutoTArray<WorkerPrivate*, MAX_WORKERS_PER_DOMAIN> workers;
|
||||
nsTArray<WorkerPrivate*> workers;
|
||||
GetWorkersForWindow(aWindow, workers);
|
||||
|
||||
if (!workers.IsEmpty()) {
|
||||
@ -2162,7 +2093,7 @@ RuntimeService::FreezeWorkersForWindow(nsPIDOMWindowInner* aWindow)
|
||||
AssertIsOnMainThread();
|
||||
MOZ_ASSERT(aWindow);
|
||||
|
||||
AutoTArray<WorkerPrivate*, MAX_WORKERS_PER_DOMAIN> workers;
|
||||
nsTArray<WorkerPrivate*> workers;
|
||||
GetWorkersForWindow(aWindow, workers);
|
||||
|
||||
for (uint32_t index = 0; index < workers.Length(); index++) {
|
||||
@ -2176,7 +2107,7 @@ RuntimeService::ThawWorkersForWindow(nsPIDOMWindowInner* aWindow)
|
||||
AssertIsOnMainThread();
|
||||
MOZ_ASSERT(aWindow);
|
||||
|
||||
AutoTArray<WorkerPrivate*, MAX_WORKERS_PER_DOMAIN> workers;
|
||||
nsTArray<WorkerPrivate*> workers;
|
||||
GetWorkersForWindow(aWindow, workers);
|
||||
|
||||
for (uint32_t index = 0; index < workers.Length(); index++) {
|
||||
@ -2190,7 +2121,7 @@ RuntimeService::SuspendWorkersForWindow(nsPIDOMWindowInner* aWindow)
|
||||
AssertIsOnMainThread();
|
||||
MOZ_ASSERT(aWindow);
|
||||
|
||||
AutoTArray<WorkerPrivate*, MAX_WORKERS_PER_DOMAIN> workers;
|
||||
nsTArray<WorkerPrivate*> workers;
|
||||
GetWorkersForWindow(aWindow, workers);
|
||||
|
||||
for (uint32_t index = 0; index < workers.Length(); index++) {
|
||||
@ -2204,7 +2135,7 @@ RuntimeService::ResumeWorkersForWindow(nsPIDOMWindowInner* aWindow)
|
||||
AssertIsOnMainThread();
|
||||
MOZ_ASSERT(aWindow);
|
||||
|
||||
AutoTArray<WorkerPrivate*, MAX_WORKERS_PER_DOMAIN> workers;
|
||||
nsTArray<WorkerPrivate*> workers;
|
||||
GetWorkersForWindow(aWindow, workers);
|
||||
|
||||
for (uint32_t index = 0; index < workers.Length(); index++) {
|
||||
@ -2481,9 +2412,7 @@ RuntimeService::ClampedHardwareConcurrency() const
|
||||
if (numberOfProcessors <= 0) {
|
||||
numberOfProcessors = 1; // Must be one there somewhere
|
||||
}
|
||||
uint32_t clampedValue = std::min(uint32_t(numberOfProcessors),
|
||||
gMaxWorkersPerDomain);
|
||||
clampedHardwareConcurrency.compareExchange(0, clampedValue);
|
||||
clampedHardwareConcurrency = numberOfProcessors;
|
||||
}
|
||||
|
||||
return clampedHardwareConcurrency;
|
||||
|
@ -42,34 +42,21 @@ class RuntimeService final : public nsIObserver
|
||||
struct WorkerDomainInfo
|
||||
{
|
||||
nsCString mDomain;
|
||||
nsTArray<WorkerPrivate*> mActiveWorkers;
|
||||
nsTArray<WorkerPrivate*> mActiveServiceWorkers;
|
||||
nsTArray<WorkerPrivate*> mQueuedWorkers;
|
||||
nsTArray<WorkerPrivate*> mWorkers;
|
||||
nsTArray<WorkerPrivate*> mServiceWorkers;
|
||||
nsClassHashtable<nsCStringHashKey, SharedWorkerInfo> mSharedWorkerInfos;
|
||||
uint32_t mChildWorkerCount;
|
||||
|
||||
WorkerDomainInfo()
|
||||
: mActiveWorkers(1), mChildWorkerCount(0)
|
||||
: mWorkers(1), mChildWorkerCount(0)
|
||||
{ }
|
||||
|
||||
uint32_t
|
||||
ActiveWorkerCount() const
|
||||
{
|
||||
return mActiveWorkers.Length() +
|
||||
mChildWorkerCount;
|
||||
}
|
||||
|
||||
uint32_t
|
||||
ActiveServiceWorkerCount() const
|
||||
{
|
||||
return mActiveServiceWorkers.Length();
|
||||
}
|
||||
|
||||
bool
|
||||
HasNoWorkers() const
|
||||
{
|
||||
return ActiveWorkerCount() == 0 &&
|
||||
ActiveServiceWorkerCount() == 0;
|
||||
return mWorkers.IsEmpty() &&
|
||||
mServiceWorkers.IsEmpty() &&
|
||||
!mChildWorkerCount;
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -166,7 +166,6 @@ support-files =
|
||||
[test_bug1132395.html]
|
||||
skip-if = true # bug 1176225
|
||||
[test_bug1132924.html]
|
||||
[test_bug1241485.html]
|
||||
[test_chromeWorker.html]
|
||||
[test_clearTimeouts.html]
|
||||
[test_close.html]
|
||||
|
@ -1,82 +0,0 @@
|
||||
<!DOCTYPE HTML>
|
||||
<html>
|
||||
<!--
|
||||
https://bugzilla.mozilla.org/show_bug.cgi?id=1241485
|
||||
-->
|
||||
<head>
|
||||
<meta charset="utf-8">
|
||||
<title>Test for Bug 1241485</title>
|
||||
<script type="application/javascript" src="/tests/SimpleTest/SimpleTest.js"></script>
|
||||
<link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css"/>
|
||||
<script type="application/javascript">
|
||||
|
||||
/** Test for Bug 1241485 **/
|
||||
SimpleTest.waitForExplicitFinish();
|
||||
SimpleTest.requestCompleteLog();
|
||||
SimpleTest.requestFlakyTimeout("requestFlakyTimeout is silly.");
|
||||
|
||||
var limit = SpecialPowers.Services.prefs.getIntPref("dom.workers.maxPerDomain");
|
||||
var workers = new Array();
|
||||
var workerToWait = null;
|
||||
var url = URL.createObjectURL(new Blob(["postMessage('loaded');"]));
|
||||
var timeouts = new Array();
|
||||
|
||||
function addTimeout(fn, time) {
|
||||
timeouts.push(setTimeout(fn, time));
|
||||
}
|
||||
|
||||
function createWorker() {
|
||||
workerToWait = new Worker(url);
|
||||
workerToWait.onmessage = function(e) {
|
||||
if (!workers) {
|
||||
// finish() has been called already.
|
||||
return;
|
||||
}
|
||||
workers.push(workerToWait);
|
||||
info(workers.length + " workers");
|
||||
addTimeout(createWorker, 0);
|
||||
if (workers.length == limit) {
|
||||
// Just give the worker creation loop some more time to try to
|
||||
// create more workers to check that we don't go over the limit.
|
||||
addTimeout(finish, 250);
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
function test() {
|
||||
info("Expecting no more than " + limit + " workers.");
|
||||
// Make sure we finish at some point, even if creating workers takes
|
||||
// lots of time.
|
||||
addTimeout(finish, 10000);
|
||||
addTimeout(createWorker, 0);
|
||||
}
|
||||
|
||||
function finish() {
|
||||
for (var i = 0; i < timeouts.length; ++i) {
|
||||
clearTimeout(timeouts[i]);
|
||||
}
|
||||
|
||||
if (workerToWait) {
|
||||
workerToWait.onmessage = null;
|
||||
}
|
||||
|
||||
ok(workers.length <= limit, "Too many workers created!");
|
||||
|
||||
workers = null;
|
||||
SpecialPowers.gc();
|
||||
|
||||
SimpleTest.finish();
|
||||
}
|
||||
|
||||
</script>
|
||||
</head>
|
||||
<body onload="test();">
|
||||
<a target="_blank" href="https://bugzilla.mozilla.org/show_bug.cgi?id=1241485">Mozilla Bug 1241485</a>
|
||||
<p id="display"></p>
|
||||
<div id="content" style="display: none">
|
||||
|
||||
</div>
|
||||
<pre id="test">
|
||||
</pre>
|
||||
</body>
|
||||
</html>
|
@ -138,8 +138,6 @@ pref("dom.select_events.enabled", false);
|
||||
|
||||
// Whether or not Web Workers are enabled.
|
||||
pref("dom.workers.enabled", true);
|
||||
// The number of workers per domain allowed to run concurrently.
|
||||
pref("dom.workers.maxPerDomain", 50);
|
||||
|
||||
pref("dom.serviceWorkers.enabled", false);
|
||||
|
||||
|
@ -9567,16 +9567,6 @@
|
||||
"kind": "count",
|
||||
"description": "Count ServiceWorkers that really did get a thread created for them. File bugs in Core::DOM in case of a Telemetry regression."
|
||||
},
|
||||
"SERVICE_WORKER_SPAWN_GETS_QUEUED": {
|
||||
"expires_in_version": "50",
|
||||
"kind": "count",
|
||||
"description": "Tracking whether a ServiceWorker spawn gets queued due to hitting max workers per domain limit. File bugs in Core::DOM in case of a Telemetry regression."
|
||||
},
|
||||
"SHARED_WORKER_SPAWN_GETS_QUEUED": {
|
||||
"expires_in_version": "50",
|
||||
"kind": "count",
|
||||
"description": "Tracking whether a SharedWorker spawn gets queued due to hitting max workers per domain limit. File bugs in Core::DOM in case of a Telemetry regression."
|
||||
},
|
||||
"SERVICE_WORKER_REGISTRATIONS": {
|
||||
"expires_in_version": "50",
|
||||
"kind": "count",
|
||||
|
Loading…
Reference in New Issue
Block a user