Bug 1373088 - Don't use a timeout for compositor process startup when debugging child process. r=milan

--HG--
extra : rebase_source : 39e5c564fc7b0c2f7e94535a9c7b17bfd03914e3
This commit is contained in:
domfarolino@gmail.com 2017-06-19 13:12:00 -04:00
parent 1115881728
commit af4e81298b
2 changed files with 8 additions and 2 deletions

View File

@ -39,7 +39,7 @@ GPUProcessHost::Launch()
MOZ_ASSERT(mLaunchPhase == LaunchPhase::Unlaunched);
MOZ_ASSERT(!mGPUChild);
#if defined(XP_WIN) && defined(MOZ_SANDBOX)
#if defined(XP_WIN) && defined(MOZ_SANDBOX)
mSandboxLevel = Preferences::GetInt("security.sandbox.gpu.level");
#endif
@ -62,6 +62,12 @@ GPUProcessHost::WaitForLaunch()
int32_t timeoutMs = gfxPrefs::GPUProcessTimeoutMs();
// If one of the following environment variables are set we can effectively
// ignore the timeout - as we can guarantee the compositor process will be terminated
if (PR_GetEnv("MOZ_DEBUG_CHILD_PROCESS") || PR_GetEnv("MOZ_DEBUG_CHILD_PAUSE")) {
timeoutMs = 0;
}
// Our caller expects the connection to be finished after we return, so we
// immediately set up the IPDL actor and fire callbacks. The IO thread will
// still dispatch a notification to the main thread - we'll just ignore it.

View File

@ -551,7 +551,7 @@ private:
DECL_GFX_PREF(Once, "layers.force-shmem-tiles", ForceShmemTiles, bool, false);
DECL_GFX_PREF(Once, "layers.gpu-process.enabled", GPUProcessEnabled, bool, false);
DECL_GFX_PREF(Once, "layers.gpu-process.force-enabled", GPUProcessForceEnabled, bool, false);
DECL_GFX_PREF(Once, "layers.gpu-process.timeout_ms", GPUProcessTimeoutMs, int32_t, 5000);
DECL_GFX_PREF(Once, "layers.gpu-process.startup_timeout_ms", GPUProcessTimeoutMs, int32_t, 5000);
DECL_GFX_PREF(Live, "layers.gpu-process.max_restarts", GPUProcessMaxRestarts, int32_t, 1);
DECL_GFX_PREF(Once, "layers.gpu-process.allow-software", GPUProcessAllowSoftware, bool, false);
// Note: This pref will only be used if it is less than layers.gpu-process.max_restarts.