mirror of
https://github.com/mozilla/gecko-dev.git
synced 2024-11-24 13:21:05 +00:00
Bug 1406872 - Remove perf monitoring code - r=jandem,Gijs
This patch removes the StopWatch code that was used in the first version of about:performance, and not being used anymore. Differential Revision: https://phabricator.services.mozilla.com/D7453 --HG-- extra : moz-landing-system : lando
This commit is contained in:
parent
03398dac7a
commit
c9ddafea1f
@ -154,9 +154,6 @@ var whitelist = [
|
||||
{file: "resource://gre/modules/PerfMeasurement.jsm"},
|
||||
// Bug 1356045
|
||||
{file: "chrome://global/content/test-ipc.xul"},
|
||||
// Bug 1356036
|
||||
{file: "resource://gre/modules/PerformanceWatcher-content.js"},
|
||||
{file: "resource://gre/modules/PerformanceWatcher.jsm"},
|
||||
// Bug 1378173 (warning: still used by devtools)
|
||||
{file: "resource://gre/modules/Promise.jsm"},
|
||||
// Still used by WebIDE, which is going away but not entirely gone.
|
||||
|
@ -1,41 +0,0 @@
|
||||
/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
|
||||
* vim: set ts=4 sw=2 et tw=80:
|
||||
*
|
||||
* This Source Code Form is subject to the terms of the Mozilla Public
|
||||
* License, v. 2.0. If a copy of the MPL was not distributed with this
|
||||
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
|
||||
|
||||
#include "jsfriendapi.h"
|
||||
#include "nsContentUtils.h"
|
||||
#include "CPOWTimer.h"
|
||||
|
||||
#include "jsapi.h"
|
||||
|
||||
CPOWTimer::CPOWTimer(JSContext* cx MOZ_GUARD_OBJECT_NOTIFIER_PARAM_IN_IMPL)
|
||||
: cx_(nullptr), startInterval_(0) {
|
||||
MOZ_GUARD_OBJECT_NOTIFIER_INIT;
|
||||
if (!js::GetStopwatchIsMonitoringCPOW(cx)) {
|
||||
return;
|
||||
}
|
||||
cx_ = cx;
|
||||
startInterval_ = JS_Now();
|
||||
}
|
||||
CPOWTimer::~CPOWTimer() {
|
||||
if (!cx_) {
|
||||
// Monitoring was off when we started the timer.
|
||||
return;
|
||||
}
|
||||
|
||||
if (!js::GetStopwatchIsMonitoringCPOW(cx_)) {
|
||||
// Monitoring has been deactivated while we were in the timer.
|
||||
return;
|
||||
}
|
||||
|
||||
const int64_t endInterval = JS_Now();
|
||||
if (endInterval <= startInterval_) {
|
||||
// Do not assume monotonicity.
|
||||
return;
|
||||
}
|
||||
|
||||
js::AddCPOWPerformanceDelta(cx_, endInterval - startInterval_);
|
||||
}
|
@ -1,44 +0,0 @@
|
||||
/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
|
||||
* vim: set ts=4 sw=2 et tw=80:
|
||||
*
|
||||
* This Source Code Form is subject to the terms of the Mozilla Public
|
||||
* License, v. 2.0. If a copy of the MPL was not distributed with this
|
||||
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
|
||||
|
||||
#ifndef CPOWTIMER_H
|
||||
#define CPOWTIMER_H
|
||||
|
||||
#include "prinrval.h"
|
||||
#include "jsapi.h"
|
||||
|
||||
/**
|
||||
* A stopwatch measuring the duration of a CPOW call.
|
||||
*
|
||||
* As the process is consuming neither user time nor system time
|
||||
* during a CPOW call, we measure such durations using wallclock time.
|
||||
*
|
||||
* This stopwatch is active iff JSRuntime::stopwatch.isActive is set.
|
||||
* Upon destruction, update JSRuntime::stopwatch.data.totalCPOWTime.
|
||||
*/
|
||||
class MOZ_RAII CPOWTimer final {
|
||||
public:
|
||||
explicit inline CPOWTimer(JSContext* cx MOZ_GUARD_OBJECT_NOTIFIER_PARAM);
|
||||
~CPOWTimer();
|
||||
|
||||
private:
|
||||
MOZ_DECL_USE_GUARD_OBJECT_NOTIFIER
|
||||
|
||||
/**
|
||||
* The context in which this timer was created, or `nullptr` if
|
||||
* CPOW monitoring was off when the timer was created.
|
||||
*/
|
||||
JSContext* cx_;
|
||||
|
||||
/**
|
||||
* The instant at which the stopwatch was started. Undefined
|
||||
* if CPOW monitoring was off when the timer was created.
|
||||
*/
|
||||
int64_t startInterval_;
|
||||
};
|
||||
|
||||
#endif
|
@ -12,7 +12,6 @@
|
||||
#include "jsfriendapi.h"
|
||||
#include "js/CharacterEncoding.h"
|
||||
#include "xpcprivate.h"
|
||||
#include "CPOWTimer.h"
|
||||
#include "WrapperFactory.h"
|
||||
|
||||
#include "nsIDocShellTreeItem.h"
|
||||
@ -149,7 +148,6 @@ const CPOWProxyHandler CPOWProxyHandler::singleton;
|
||||
return failRetVal; \
|
||||
} \
|
||||
{ \
|
||||
CPOWTimer timer(cx); \
|
||||
return owner->call args; \
|
||||
}
|
||||
|
||||
|
@ -5,7 +5,6 @@
|
||||
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
|
||||
UNIFIED_SOURCES += [
|
||||
'CPOWTimer.cpp',
|
||||
'JavaScriptChild.cpp',
|
||||
'JavaScriptParent.cpp',
|
||||
'JavaScriptShared.cpp',
|
||||
|
@ -18,10 +18,6 @@
|
||||
struct JSStructuredCloneReader;
|
||||
struct JSStructuredCloneWriter;
|
||||
|
||||
namespace js {
|
||||
struct JS_PUBLIC_API PerformanceGroup;
|
||||
} // namespace js
|
||||
|
||||
struct JSPrincipals {
|
||||
/* Don't call "destroy"; use reference counting macros below. */
|
||||
mozilla::Atomic<int32_t, mozilla::SequentiallyConsistent,
|
||||
|
164
js/src/jsapi.h
164
js/src/jsapi.h
@ -4116,170 +4116,6 @@ namespace js {
|
||||
|
||||
class AutoStopwatch;
|
||||
|
||||
/**
|
||||
* Abstract base class for a representation of the performance of a
|
||||
* component. Embeddings interested in performance monitoring should
|
||||
* provide a concrete implementation of this class, as well as the
|
||||
* relevant callbacks (see below).
|
||||
*/
|
||||
struct JS_PUBLIC_API PerformanceGroup {
|
||||
PerformanceGroup();
|
||||
|
||||
// The current iteration of the event loop.
|
||||
uint64_t iteration() const;
|
||||
|
||||
// `true` if an instance of `AutoStopwatch` is already monitoring
|
||||
// the performance of this performance group for this iteration
|
||||
// of the event loop, `false` otherwise.
|
||||
bool isAcquired(uint64_t it) const;
|
||||
|
||||
// `true` if a specific instance of `AutoStopwatch` is already monitoring
|
||||
// the performance of this performance group for this iteration
|
||||
// of the event loop, `false` otherwise.
|
||||
bool isAcquired(uint64_t it, const AutoStopwatch* owner) const;
|
||||
|
||||
// Mark that an instance of `AutoStopwatch` is monitoring
|
||||
// the performance of this group for a given iteration.
|
||||
void acquire(uint64_t it, const AutoStopwatch* owner);
|
||||
|
||||
// Mark that no `AutoStopwatch` is monitoring the
|
||||
// performance of this group for the iteration.
|
||||
void release(uint64_t it, const AutoStopwatch* owner);
|
||||
|
||||
// The number of cycles spent in this group during this iteration
|
||||
// of the event loop. Note that cycles are not a reliable measure,
|
||||
// especially over short intervals. See Stopwatch.* for a more
|
||||
// complete discussion on the imprecision of cycle measurement.
|
||||
uint64_t recentCycles(uint64_t iteration) const;
|
||||
void addRecentCycles(uint64_t iteration, uint64_t cycles);
|
||||
|
||||
// The number of times this group has been activated during this
|
||||
// iteration of the event loop.
|
||||
uint64_t recentTicks(uint64_t iteration) const;
|
||||
void addRecentTicks(uint64_t iteration, uint64_t ticks);
|
||||
|
||||
// The number of microseconds spent doing CPOW during this
|
||||
// iteration of the event loop.
|
||||
uint64_t recentCPOW(uint64_t iteration) const;
|
||||
void addRecentCPOW(uint64_t iteration, uint64_t CPOW);
|
||||
|
||||
// Get rid of any data that pretends to be recent.
|
||||
void resetRecentData();
|
||||
|
||||
// `true` if new measures should be added to this group, `false`
|
||||
// otherwise.
|
||||
bool isActive() const;
|
||||
void setIsActive(bool);
|
||||
|
||||
// `true` if this group has been used in the current iteration,
|
||||
// `false` otherwise.
|
||||
bool isUsedInThisIteration() const;
|
||||
void setIsUsedInThisIteration(bool);
|
||||
|
||||
protected:
|
||||
// An implementation of `delete` for this object. Must be provided
|
||||
// by the embedding.
|
||||
virtual void Delete() = 0;
|
||||
|
||||
private:
|
||||
// The number of cycles spent in this group during this iteration
|
||||
// of the event loop. Note that cycles are not a reliable measure,
|
||||
// especially over short intervals. See Runtime.cpp for a more
|
||||
// complete discussion on the imprecision of cycle measurement.
|
||||
uint64_t recentCycles_;
|
||||
|
||||
// The number of times this group has been activated during this
|
||||
// iteration of the event loop.
|
||||
uint64_t recentTicks_;
|
||||
|
||||
// The number of microseconds spent doing CPOW during this
|
||||
// iteration of the event loop.
|
||||
uint64_t recentCPOW_;
|
||||
|
||||
// The current iteration of the event loop. If necessary,
|
||||
// may safely overflow.
|
||||
uint64_t iteration_;
|
||||
|
||||
// `true` if new measures should be added to this group, `false`
|
||||
// otherwise.
|
||||
bool isActive_;
|
||||
|
||||
// `true` if this group has been used in the current iteration,
|
||||
// `false` otherwise.
|
||||
bool isUsedInThisIteration_;
|
||||
|
||||
// The stopwatch currently monitoring the group,
|
||||
// or `nullptr` if none. Used ony for comparison.
|
||||
const AutoStopwatch* owner_;
|
||||
|
||||
public:
|
||||
// Compatibility with RefPtr<>
|
||||
void AddRef();
|
||||
void Release();
|
||||
uint64_t refCount_;
|
||||
};
|
||||
|
||||
using PerformanceGroupVector =
|
||||
mozilla::Vector<RefPtr<js::PerformanceGroup>, 8, SystemAllocPolicy>;
|
||||
|
||||
/**
|
||||
* Commit any Performance Monitoring data.
|
||||
*
|
||||
* Until `FlushMonitoring` has been called, all PerformanceMonitoring data is
|
||||
* invisible to the outside world and can cancelled with a call to
|
||||
* `ResetMonitoring`.
|
||||
*/
|
||||
extern JS_PUBLIC_API bool FlushPerformanceMonitoring(JSContext*);
|
||||
|
||||
/**
|
||||
* Cancel any measurement that hasn't been committed.
|
||||
*/
|
||||
extern JS_PUBLIC_API void ResetPerformanceMonitoring(JSContext*);
|
||||
|
||||
/**
|
||||
* Cleanup any memory used by performance monitoring.
|
||||
*/
|
||||
extern JS_PUBLIC_API void DisposePerformanceMonitoring(JSContext*);
|
||||
|
||||
/**
|
||||
* Turn on/off stopwatch-based CPU monitoring.
|
||||
*
|
||||
* `SetStopwatchIsMonitoringCPOW` or `SetStopwatchIsMonitoringJank`
|
||||
* may return `false` if monitoring could not be activated, which may
|
||||
* happen if we are out of memory.
|
||||
*/
|
||||
extern JS_PUBLIC_API bool SetStopwatchIsMonitoringCPOW(JSContext*, bool);
|
||||
extern JS_PUBLIC_API bool GetStopwatchIsMonitoringCPOW(JSContext*);
|
||||
extern JS_PUBLIC_API bool SetStopwatchIsMonitoringJank(JSContext*, bool);
|
||||
extern JS_PUBLIC_API bool GetStopwatchIsMonitoringJank(JSContext*);
|
||||
|
||||
// Extract the CPU rescheduling data.
|
||||
extern JS_PUBLIC_API void GetPerfMonitoringTestCpuRescheduling(JSContext*,
|
||||
uint64_t* stayed,
|
||||
uint64_t* moved);
|
||||
|
||||
/**
|
||||
* Add a number of microseconds to the time spent waiting on CPOWs
|
||||
* since process start.
|
||||
*/
|
||||
extern JS_PUBLIC_API void AddCPOWPerformanceDelta(JSContext*, uint64_t delta);
|
||||
|
||||
typedef bool (*StopwatchStartCallback)(uint64_t, void*);
|
||||
extern JS_PUBLIC_API bool SetStopwatchStartCallback(JSContext*,
|
||||
StopwatchStartCallback,
|
||||
void*);
|
||||
|
||||
typedef bool (*StopwatchCommitCallback)(uint64_t, PerformanceGroupVector&,
|
||||
void*);
|
||||
extern JS_PUBLIC_API bool SetStopwatchCommitCallback(JSContext*,
|
||||
StopwatchCommitCallback,
|
||||
void*);
|
||||
|
||||
typedef bool (*GetGroupsCallback)(JSContext*, PerformanceGroupVector&, void*);
|
||||
extern JS_PUBLIC_API bool SetGetPerformanceGroupsCallback(JSContext*,
|
||||
GetGroupsCallback,
|
||||
void*);
|
||||
|
||||
/**
|
||||
* Hint that we expect a crash. Currently, the only thing that cares is the
|
||||
* breakpad injector, which (if loaded) will suppress minidump generation.
|
||||
|
@ -305,7 +305,6 @@ UNIFIED_SOURCES += [
|
||||
'vm/SharedArrayObject.cpp',
|
||||
'vm/SharedImmutableStringsCache.cpp',
|
||||
'vm/Stack.cpp',
|
||||
'vm/Stopwatch.cpp',
|
||||
'vm/StringType.cpp',
|
||||
'vm/SymbolType.cpp',
|
||||
'vm/TaggedProto.cpp',
|
||||
|
@ -51,7 +51,6 @@
|
||||
#include "vm/Opcodes.h"
|
||||
#include "vm/Scope.h"
|
||||
#include "vm/Shape.h"
|
||||
#include "vm/Stopwatch.h"
|
||||
#include "vm/StringType.h"
|
||||
#include "vm/TraceLogging.h"
|
||||
|
||||
@ -398,10 +397,6 @@ bool js::RunScript(JSContext* cx, RunState& state) {
|
||||
return false;
|
||||
}
|
||||
|
||||
#if defined(MOZ_HAVE_RDTSC)
|
||||
js::AutoStopwatch stopwatch(cx);
|
||||
#endif // defined(MOZ_HAVE_RDTSC)
|
||||
|
||||
GeckoProfilerEntryMarker marker(cx, state.script());
|
||||
|
||||
state.script()->ensureNonLazyCanonicalFunction();
|
||||
|
@ -53,8 +53,7 @@ Realm::Realm(Compartment* comp, const JS::RealmOptions& options)
|
||||
global_(nullptr),
|
||||
objects_(zone_),
|
||||
randomKeyGenerator_(runtime_->forkRandomKeyGenerator()),
|
||||
wasm(runtime_),
|
||||
performanceMonitoring(runtime_) {
|
||||
wasm(runtime_) {
|
||||
MOZ_ASSERT_IF(creationOptions_.mergeable(),
|
||||
creationOptions_.invisibleToDebugger());
|
||||
|
||||
|
@ -409,8 +409,6 @@ class JS::Realm : public JS::shadow::Realm {
|
||||
js::ArraySpeciesLookup arraySpeciesLookup;
|
||||
js::PromiseLookup promiseLookup;
|
||||
|
||||
js::PerformanceGroupHolder performanceMonitoring;
|
||||
|
||||
js::UniquePtr<js::ScriptCountsMap> scriptCountsMap;
|
||||
js::UniquePtr<js::ScriptNameMap> scriptNameMap;
|
||||
js::UniquePtr<js::DebugScriptMap> debugScriptMap;
|
||||
@ -641,18 +639,6 @@ class JS::Realm : public JS::shadow::Realm {
|
||||
*/
|
||||
JSPrincipals* principals() { return principals_; }
|
||||
void setPrincipals(JSPrincipals* principals) {
|
||||
if (principals_ == principals) {
|
||||
return;
|
||||
}
|
||||
|
||||
// If we change principals, we need to unlink immediately this
|
||||
// realm from its PerformanceGroup. For one thing, the performance data
|
||||
// we collect should not be improperly associated with a group to which
|
||||
// we do not belong anymore. For another thing, we use `principals()` as
|
||||
// part of the key to map realms to a `PerformanceGroup`, so if we do
|
||||
// not unlink now, this will be too late once we have updated
|
||||
// `principals_`.
|
||||
performanceMonitoring.unlink();
|
||||
principals_ = principals;
|
||||
}
|
||||
|
||||
|
@ -156,7 +156,6 @@ JSRuntime::JSRuntime(JSRuntime* parentRuntime)
|
||||
autoWritableJitCodeActive_(false),
|
||||
oomCallback(nullptr),
|
||||
debuggerMallocSizeOf(ReturnZeroSize),
|
||||
performanceMonitoring_(),
|
||||
stackFormat_(parentRuntime ? js::StackFormat::Default
|
||||
: js::StackFormat::SpiderMonkey),
|
||||
wasmInstances(mutexid::WasmRuntimeInstances),
|
||||
|
@ -52,7 +52,6 @@
|
||||
#include "vm/Scope.h"
|
||||
#include "vm/SharedImmutableStringsCache.h"
|
||||
#include "vm/Stack.h"
|
||||
#include "vm/Stopwatch.h"
|
||||
#include "vm/SymbolType.h"
|
||||
#include "wasm/WasmTypes.h"
|
||||
|
||||
@ -921,14 +920,6 @@ struct JSRuntime : public js::MallocProvider<JSRuntime> {
|
||||
/* Last time at which an animation was played for this runtime. */
|
||||
js::MainThreadData<mozilla::TimeStamp> lastAnimationTime;
|
||||
|
||||
private:
|
||||
js::MainThreadData<js::PerformanceMonitoring> performanceMonitoring_;
|
||||
|
||||
public:
|
||||
js::PerformanceMonitoring& performanceMonitoring() {
|
||||
return performanceMonitoring_.ref();
|
||||
}
|
||||
|
||||
private:
|
||||
/* The stack format for the current runtime. Only valid on non-child
|
||||
* runtimes. */
|
||||
|
@ -1,595 +0,0 @@
|
||||
/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
|
||||
* vim: set ts=8 sts=2 et sw=2 tw=80:
|
||||
* This Source Code Form is subject to the terms of the Mozilla Public
|
||||
* License, v. 2.0. If a copy of the MPL was not distributed with this
|
||||
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
|
||||
|
||||
#include "vm/Stopwatch.h"
|
||||
|
||||
#include "mozilla/ArrayUtils.h"
|
||||
#include "mozilla/IntegerTypeTraits.h"
|
||||
#include "mozilla/Unused.h"
|
||||
|
||||
#if defined(XP_WIN)
|
||||
#include <processthreadsapi.h>
|
||||
#endif // defined(XP_WIN)
|
||||
|
||||
#include "gc/PublicIterators.h"
|
||||
#include "util/Windows.h"
|
||||
#include "vm/Realm.h"
|
||||
#include "vm/Runtime.h"
|
||||
|
||||
namespace js {
|
||||
|
||||
bool PerformanceMonitoring::addRecentGroup(PerformanceGroup* group) {
|
||||
if (group->isUsedInThisIteration()) {
|
||||
return true;
|
||||
}
|
||||
|
||||
group->setIsUsedInThisIteration(true);
|
||||
return recentGroups_.append(group);
|
||||
}
|
||||
|
||||
void PerformanceMonitoring::reset() {
|
||||
// All ongoing measures are dependent on the current iteration#.
|
||||
// By incrementing it, we mark all data as stale. Stale data will
|
||||
// be overwritten progressively during the execution.
|
||||
++iteration_;
|
||||
recentGroups_.clear();
|
||||
|
||||
// Every so often, we will be rescheduled to another CPU. If this
|
||||
// happens, we may end up with an entirely unsynchronized
|
||||
// timestamp counter. If we do not reset
|
||||
// `highestTimestampCounter_`, we could end up ignoring entirely
|
||||
// valid sets of measures just because we are on a CPU that has a
|
||||
// lower RDTSC.
|
||||
highestTimestampCounter_ = 0;
|
||||
}
|
||||
|
||||
void PerformanceMonitoring::start() {
|
||||
if (!isMonitoringJank_) {
|
||||
return;
|
||||
}
|
||||
|
||||
if (iteration_ == startedAtIteration_) {
|
||||
// The stopwatch is already started for this iteration.
|
||||
return;
|
||||
}
|
||||
|
||||
startedAtIteration_ = iteration_;
|
||||
if (stopwatchStartCallback) {
|
||||
stopwatchStartCallback(iteration_, stopwatchStartClosure);
|
||||
}
|
||||
}
|
||||
|
||||
// Commit the data that has been collected during the iteration
|
||||
// into the actual `PerformanceData`.
|
||||
//
|
||||
// We use the proportion of cycles-spent-in-group over
|
||||
// cycles-spent-in-toplevel-group as an approximation to allocate
|
||||
// system (kernel) time and user (CPU) time to each group. Note
|
||||
// that cycles are not an exact measure:
|
||||
//
|
||||
// 1. if the computer has gone to sleep, the clock may be reset to 0;
|
||||
// 2. if the process is moved between CPUs/cores, it may end up on a CPU
|
||||
// or core with an unsynchronized clock;
|
||||
// 3. the mapping between clock cycles and walltime varies with the current
|
||||
// frequency of the CPU;
|
||||
// 4. other threads/processes using the same CPU will also increment
|
||||
// the counter.
|
||||
//
|
||||
// ** Effect of 1. (computer going to sleep)
|
||||
//
|
||||
// We assume that this will happen very seldom. Since the final numbers
|
||||
// are bounded by the CPU time and Kernel time reported by `getresources`,
|
||||
// the effect will be contained to a single iteration of the event loop.
|
||||
//
|
||||
// ** Effect of 2. (moving between CPUs/cores)
|
||||
//
|
||||
// On platforms that support it, we only measure the number of cycles
|
||||
// if we start and end execution of a group on the same
|
||||
// CPU/core. While there is a small window (a few cycles) during which
|
||||
// the thread can be migrated without us noticing, we expect that this
|
||||
// will happen rarely enough that this won't affect the statistics
|
||||
// meaningfully.
|
||||
//
|
||||
// On other platforms, assuming that the probability of jumping
|
||||
// between CPUs/cores during a given (real) cycle is constant, and
|
||||
// that the distribution of differences between clocks is even, the
|
||||
// probability that the number of cycles reported by a measure is
|
||||
// modified by X cycles should be a gaussian distribution, with groups
|
||||
// with longer execution having a larger amplitude than groups with
|
||||
// shorter execution. Since we discard measures that result in a
|
||||
// negative number of cycles, this distribution is actually skewed
|
||||
// towards over-estimating the number of cycles of groups that already
|
||||
// have many cycles and under-estimating the number of cycles that
|
||||
// already have fewer cycles.
|
||||
//
|
||||
// Since the final numbers are bounded by the CPU time and Kernel time
|
||||
// reported by `getresources`, we accept this bias.
|
||||
//
|
||||
// ** Effect of 3. (mapping between clock cycles and walltime)
|
||||
//
|
||||
// Assuming that this is evenly distributed, we expect that this will
|
||||
// eventually balance out.
|
||||
//
|
||||
// ** Effect of 4. (cycles increase with system activity)
|
||||
//
|
||||
// Assuming that, within an iteration of the event loop, this happens
|
||||
// unformly over time, this will skew towards over-estimating the number
|
||||
// of cycles of groups that already have many cycles and under-estimating
|
||||
// the number of cycles that already have fewer cycles.
|
||||
//
|
||||
// Since the final numbers are bounded by the CPU time and Kernel time
|
||||
// reported by `getresources`, we accept this bias.
|
||||
//
|
||||
// ** Big picture
|
||||
//
|
||||
// Computing the number of cycles is fast and should be accurate
|
||||
// enough in practice. Alternatives (such as calling `getresources`
|
||||
// all the time or sampling from another thread) are very expensive
|
||||
// in system calls and/or battery and not necessarily more accurate.
|
||||
bool PerformanceMonitoring::commit() {
|
||||
#if defined(MOZ_HAVE_RDTSC)
|
||||
// Maximal initialization size, in elements for the vector of groups.
|
||||
static const size_t MAX_GROUPS_INIT_CAPACITY = 1024;
|
||||
|
||||
if (!isMonitoringJank_) {
|
||||
// Either we have not started monitoring or monitoring has
|
||||
// been cancelled during the iteration.
|
||||
return true;
|
||||
}
|
||||
|
||||
if (startedAtIteration_ != iteration_) {
|
||||
// No JS code has been monitored during this iteration.
|
||||
return true;
|
||||
}
|
||||
|
||||
// The move operation is generally constant time, unless
|
||||
// `recentGroups_.length()` is very small, in which case
|
||||
// it's fast just because it's small.
|
||||
PerformanceGroupVector recentGroups(std::move(recentGroups_));
|
||||
recentGroups_ = PerformanceGroupVector(); // Reconstruct after `Move`.
|
||||
|
||||
bool success = true;
|
||||
if (stopwatchCommitCallback) {
|
||||
success = stopwatchCommitCallback(iteration_, recentGroups,
|
||||
stopwatchCommitClosure);
|
||||
}
|
||||
|
||||
// Heuristic: we expect to have roughly the same number of groups as in
|
||||
// the previous iteration.
|
||||
const size_t capacity =
|
||||
std::min(recentGroups.capacity(), MAX_GROUPS_INIT_CAPACITY);
|
||||
success = recentGroups_.reserve(capacity) && success;
|
||||
|
||||
// Reset immediately, to make sure that we're not hit by the end
|
||||
// of a nested event loop (which would cause `commit` to be called
|
||||
// twice in succession).
|
||||
reset();
|
||||
return success;
|
||||
#else
|
||||
// The AutoStopwatch is only executed if `MOZ_HAVE_RDTSC`.
|
||||
return false;
|
||||
#endif // defined(MOZ_HAVE_RDTSC)
|
||||
}
|
||||
|
||||
uint64_t PerformanceMonitoring::monotonicReadTimestampCounter() {
|
||||
#if defined(MOZ_HAVE_RDTSC)
|
||||
const uint64_t hardware = ReadTimestampCounter();
|
||||
if (highestTimestampCounter_ < hardware) {
|
||||
highestTimestampCounter_ = hardware;
|
||||
}
|
||||
return highestTimestampCounter_;
|
||||
#else
|
||||
return 0;
|
||||
#endif // defined(MOZ_HAVE_RDTSC)
|
||||
}
|
||||
|
||||
void PerformanceMonitoring::dispose(JSRuntime* rt) {
|
||||
reset();
|
||||
for (RealmsIter r(rt); !r.done(); r.next()) {
|
||||
r->performanceMonitoring.unlink();
|
||||
}
|
||||
}
|
||||
|
||||
PerformanceGroupHolder::~PerformanceGroupHolder() { unlink(); }
|
||||
|
||||
void PerformanceGroupHolder::unlink() {
|
||||
initialized_ = false;
|
||||
groups_.clear();
|
||||
}
|
||||
|
||||
const PerformanceGroupVector* PerformanceGroupHolder::getGroups(JSContext* cx) {
|
||||
if (initialized_) {
|
||||
return &groups_;
|
||||
}
|
||||
|
||||
if (!runtime_->performanceMonitoring().getGroupsCallback) {
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
if (!runtime_->performanceMonitoring().getGroupsCallback(
|
||||
cx, groups_, runtime_->performanceMonitoring().getGroupsClosure)) {
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
initialized_ = true;
|
||||
return &groups_;
|
||||
}
|
||||
|
||||
AutoStopwatch::AutoStopwatch(
|
||||
JSContext* cx MOZ_GUARD_OBJECT_NOTIFIER_PARAM_IN_IMPL)
|
||||
: cx_(cx),
|
||||
iteration_(0),
|
||||
isMonitoringJank_(false),
|
||||
isMonitoringCPOW_(false),
|
||||
cyclesStart_(0),
|
||||
CPOWTimeStart_(0) {
|
||||
MOZ_GUARD_OBJECT_NOTIFIER_INIT;
|
||||
|
||||
JS::Compartment* compartment = cx_->compartment();
|
||||
if (MOZ_UNLIKELY(compartment->gcState.scheduledForDestruction)) {
|
||||
return;
|
||||
}
|
||||
|
||||
JSRuntime* runtime = cx_->runtime();
|
||||
iteration_ = runtime->performanceMonitoring().iteration();
|
||||
|
||||
const PerformanceGroupVector* groups =
|
||||
cx_->realm()->performanceMonitoring.getGroups(cx);
|
||||
if (!groups) {
|
||||
// Either the embedding has not provided any performance
|
||||
// monitoring logistics or there was an error that prevents
|
||||
// performance monitoring.
|
||||
return;
|
||||
}
|
||||
for (auto group = groups->begin(); group < groups->end(); group++) {
|
||||
auto acquired = acquireGroup(*group);
|
||||
if (acquired) {
|
||||
if (!groups_.append(acquired)) {
|
||||
MOZ_CRASH();
|
||||
}
|
||||
}
|
||||
}
|
||||
if (groups_.length() == 0) {
|
||||
// We are not in charge of monitoring anything.
|
||||
return;
|
||||
}
|
||||
|
||||
// Now that we are sure that JS code is being executed,
|
||||
// initialize the stopwatch for this iteration, lazily.
|
||||
runtime->performanceMonitoring().start();
|
||||
enter();
|
||||
}
|
||||
|
||||
AutoStopwatch::~AutoStopwatch() {
|
||||
if (groups_.length() == 0) {
|
||||
// We are not in charge of monitoring anything.
|
||||
return;
|
||||
}
|
||||
|
||||
JS::Compartment* compartment = cx_->compartment();
|
||||
if (MOZ_UNLIKELY(compartment->gcState.scheduledForDestruction)) {
|
||||
return;
|
||||
}
|
||||
|
||||
JSRuntime* runtime = cx_->runtime();
|
||||
if (MOZ_UNLIKELY(iteration_ !=
|
||||
runtime->performanceMonitoring().iteration())) {
|
||||
// We have entered a nested event loop at some point.
|
||||
// Any information we may have is obsolete.
|
||||
return;
|
||||
}
|
||||
|
||||
mozilla::Unused << exit(); // Sadly, there is nothing we can do about an
|
||||
// error at this point.
|
||||
|
||||
for (auto group = groups_.begin(); group < groups_.end(); group++) {
|
||||
releaseGroup(*group);
|
||||
}
|
||||
}
|
||||
|
||||
void AutoStopwatch::enter() {
|
||||
JSRuntime* runtime = cx_->runtime();
|
||||
|
||||
if (runtime->performanceMonitoring().isMonitoringCPOW()) {
|
||||
CPOWTimeStart_ = runtime->performanceMonitoring().totalCPOWTime;
|
||||
isMonitoringCPOW_ = true;
|
||||
}
|
||||
|
||||
if (runtime->performanceMonitoring().isMonitoringJank()) {
|
||||
cyclesStart_ = this->getCycles(runtime);
|
||||
cpuStart_ = this->getCPU();
|
||||
isMonitoringJank_ = true;
|
||||
}
|
||||
}
|
||||
|
||||
bool AutoStopwatch::exit() {
|
||||
JSRuntime* runtime = cx_->runtime();
|
||||
|
||||
uint64_t cyclesDelta = 0;
|
||||
if (isMonitoringJank_ &&
|
||||
runtime->performanceMonitoring().isMonitoringJank()) {
|
||||
// We were monitoring jank when we entered and we still are.
|
||||
|
||||
// If possible, discard results when we don't end on the
|
||||
// same CPU as we started. Note that we can be
|
||||
// rescheduled to another CPU beween `getCycles()` and
|
||||
// `getCPU()`. We hope that this will happen rarely
|
||||
// enough that the impact on our statistics will remain
|
||||
// limited.
|
||||
const cpuid_t cpuEnd = this->getCPU();
|
||||
if (isSameCPU(cpuStart_, cpuEnd)) {
|
||||
const uint64_t cyclesEnd = getCycles(runtime);
|
||||
cyclesDelta = cyclesEnd -
|
||||
cyclesStart_; // Always >= 0 by definition of `getCycles`.
|
||||
}
|
||||
// Temporary disable untested code path.
|
||||
#if 0 // WINVER >= 0x600
|
||||
updateTelemetry(cpuStart_, cpuEnd);
|
||||
#elif defined(__linux__)
|
||||
updateTelemetry(cpuStart_, cpuEnd);
|
||||
#endif // WINVER >= 0x600 || _linux__
|
||||
}
|
||||
|
||||
uint64_t CPOWTimeDelta = 0;
|
||||
if (isMonitoringCPOW_ &&
|
||||
runtime->performanceMonitoring().isMonitoringCPOW()) {
|
||||
// We were monitoring CPOW when we entered and we still are.
|
||||
const uint64_t CPOWTimeEnd = runtime->performanceMonitoring().totalCPOWTime;
|
||||
CPOWTimeDelta = getDelta(CPOWTimeEnd, CPOWTimeStart_);
|
||||
}
|
||||
return addToGroups(cyclesDelta, CPOWTimeDelta);
|
||||
}
|
||||
|
||||
void AutoStopwatch::updateTelemetry(const cpuid_t& cpuStart_,
|
||||
const cpuid_t& cpuEnd) {
|
||||
JSRuntime* runtime = cx_->runtime();
|
||||
|
||||
if (isSameCPU(cpuStart_, cpuEnd)) {
|
||||
runtime->performanceMonitoring().testCpuRescheduling.stayed += 1;
|
||||
} else {
|
||||
runtime->performanceMonitoring().testCpuRescheduling.moved += 1;
|
||||
}
|
||||
}
|
||||
|
||||
PerformanceGroup* AutoStopwatch::acquireGroup(PerformanceGroup* group) {
|
||||
MOZ_ASSERT(group);
|
||||
|
||||
if (group->isAcquired(iteration_)) {
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
if (!group->isActive()) {
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
group->acquire(iteration_, this);
|
||||
return group;
|
||||
}
|
||||
|
||||
void AutoStopwatch::releaseGroup(PerformanceGroup* group) {
|
||||
MOZ_ASSERT(group);
|
||||
group->release(iteration_, this);
|
||||
}
|
||||
|
||||
bool AutoStopwatch::addToGroups(uint64_t cyclesDelta, uint64_t CPOWTimeDelta) {
|
||||
JSRuntime* runtime = cx_->runtime();
|
||||
|
||||
for (auto group = groups_.begin(); group < groups_.end(); ++group) {
|
||||
if (!addToGroup(runtime, cyclesDelta, CPOWTimeDelta, *group)) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
bool AutoStopwatch::addToGroup(JSRuntime* runtime, uint64_t cyclesDelta,
|
||||
uint64_t CPOWTimeDelta,
|
||||
PerformanceGroup* group) {
|
||||
MOZ_ASSERT(group);
|
||||
MOZ_ASSERT(group->isAcquired(iteration_, this));
|
||||
|
||||
if (!runtime->performanceMonitoring().addRecentGroup(group)) {
|
||||
return false;
|
||||
}
|
||||
group->addRecentTicks(iteration_, 1);
|
||||
group->addRecentCycles(iteration_, cyclesDelta);
|
||||
group->addRecentCPOW(iteration_, CPOWTimeDelta);
|
||||
return true;
|
||||
}
|
||||
|
||||
uint64_t AutoStopwatch::getDelta(const uint64_t end,
|
||||
const uint64_t start) const {
|
||||
if (start >= end) {
|
||||
return 0;
|
||||
}
|
||||
return end - start;
|
||||
}
|
||||
|
||||
uint64_t AutoStopwatch::getCycles(JSRuntime* runtime) const {
|
||||
return runtime->performanceMonitoring().monotonicReadTimestampCounter();
|
||||
}
|
||||
|
||||
cpuid_t inline AutoStopwatch::getCPU() const {
|
||||
// Temporary disable untested code path.
|
||||
#if 0 // defined(XP_WIN) && WINVER >= _WIN32_WINNT_VISTA
|
||||
PROCESSOR_NUMBER proc;
|
||||
GetCurrentProcessorNumberEx(&proc);
|
||||
|
||||
cpuid_t result(proc.Group, proc.Number);
|
||||
return result;
|
||||
#else
|
||||
return {};
|
||||
#endif // defined(XP_WIN)
|
||||
}
|
||||
|
||||
bool inline AutoStopwatch::isSameCPU(const cpuid_t& a, const cpuid_t& b) const {
|
||||
// Temporary disable untested code path.
|
||||
#if 0 // defined(XP_WIN) && WINVER >= _WIN32_WINNT_VISTA
|
||||
return a.group_ == b.group_ && a.number_ == b.number_;
|
||||
#else
|
||||
return true;
|
||||
#endif
|
||||
}
|
||||
|
||||
PerformanceGroup::PerformanceGroup()
|
||||
: recentCycles_(0),
|
||||
recentTicks_(0),
|
||||
recentCPOW_(0),
|
||||
iteration_(0),
|
||||
isActive_(false),
|
||||
isUsedInThisIteration_(false),
|
||||
owner_(nullptr),
|
||||
refCount_(0) {}
|
||||
|
||||
uint64_t PerformanceGroup::iteration() const { return iteration_; }
|
||||
|
||||
bool PerformanceGroup::isAcquired(uint64_t it) const {
|
||||
return owner_ != nullptr && iteration_ == it;
|
||||
}
|
||||
|
||||
bool PerformanceGroup::isAcquired(uint64_t it,
|
||||
const AutoStopwatch* owner) const {
|
||||
return owner_ == owner && iteration_ == it;
|
||||
}
|
||||
|
||||
void PerformanceGroup::acquire(uint64_t it, const AutoStopwatch* owner) {
|
||||
if (iteration_ != it) {
|
||||
// Any data that pretends to be recent is actually bound
|
||||
// to an older iteration and therefore stale.
|
||||
resetRecentData();
|
||||
}
|
||||
iteration_ = it;
|
||||
owner_ = owner;
|
||||
}
|
||||
|
||||
void PerformanceGroup::release(uint64_t it, const AutoStopwatch* owner) {
|
||||
if (iteration_ != it) {
|
||||
return;
|
||||
}
|
||||
|
||||
MOZ_ASSERT(owner == owner_ || owner_ == nullptr);
|
||||
owner_ = nullptr;
|
||||
}
|
||||
|
||||
void PerformanceGroup::resetRecentData() {
|
||||
recentCycles_ = 0;
|
||||
recentTicks_ = 0;
|
||||
recentCPOW_ = 0;
|
||||
isUsedInThisIteration_ = false;
|
||||
}
|
||||
|
||||
uint64_t PerformanceGroup::recentCycles(uint64_t iteration) const {
|
||||
MOZ_ASSERT(iteration == iteration_);
|
||||
return recentCycles_;
|
||||
}
|
||||
|
||||
void PerformanceGroup::addRecentCycles(uint64_t iteration, uint64_t cycles) {
|
||||
MOZ_ASSERT(iteration == iteration_);
|
||||
recentCycles_ += cycles;
|
||||
}
|
||||
|
||||
uint64_t PerformanceGroup::recentTicks(uint64_t iteration) const {
|
||||
MOZ_ASSERT(iteration == iteration_);
|
||||
return recentTicks_;
|
||||
}
|
||||
|
||||
void PerformanceGroup::addRecentTicks(uint64_t iteration, uint64_t ticks) {
|
||||
MOZ_ASSERT(iteration == iteration_);
|
||||
recentTicks_ += ticks;
|
||||
}
|
||||
|
||||
uint64_t PerformanceGroup::recentCPOW(uint64_t iteration) const {
|
||||
MOZ_ASSERT(iteration == iteration_);
|
||||
return recentCPOW_;
|
||||
}
|
||||
|
||||
void PerformanceGroup::addRecentCPOW(uint64_t iteration, uint64_t CPOW) {
|
||||
MOZ_ASSERT(iteration == iteration_);
|
||||
recentCPOW_ += CPOW;
|
||||
}
|
||||
|
||||
bool PerformanceGroup::isActive() const { return isActive_; }
|
||||
|
||||
void PerformanceGroup::setIsActive(bool value) { isActive_ = value; }
|
||||
|
||||
void PerformanceGroup::setIsUsedInThisIteration(bool value) {
|
||||
isUsedInThisIteration_ = value;
|
||||
}
|
||||
bool PerformanceGroup::isUsedInThisIteration() const {
|
||||
return isUsedInThisIteration_;
|
||||
}
|
||||
|
||||
void PerformanceGroup::AddRef() { ++refCount_; }
|
||||
|
||||
void PerformanceGroup::Release() {
|
||||
MOZ_ASSERT(refCount_ > 0);
|
||||
--refCount_;
|
||||
if (refCount_ > 0) {
|
||||
return;
|
||||
}
|
||||
|
||||
JS::AutoSuppressGCAnalysis nogc;
|
||||
this->Delete();
|
||||
}
|
||||
|
||||
JS_PUBLIC_API bool SetStopwatchStartCallback(JSContext* cx,
|
||||
StopwatchStartCallback cb,
|
||||
void* closure) {
|
||||
cx->runtime()->performanceMonitoring().setStopwatchStartCallback(cb, closure);
|
||||
return true;
|
||||
}
|
||||
|
||||
JS_PUBLIC_API bool SetStopwatchCommitCallback(JSContext* cx,
|
||||
StopwatchCommitCallback cb,
|
||||
void* closure) {
|
||||
cx->runtime()->performanceMonitoring().setStopwatchCommitCallback(cb,
|
||||
closure);
|
||||
return true;
|
||||
}
|
||||
|
||||
JS_PUBLIC_API bool SetGetPerformanceGroupsCallback(JSContext* cx,
|
||||
GetGroupsCallback cb,
|
||||
void* closure) {
|
||||
cx->runtime()->performanceMonitoring().setGetGroupsCallback(cb, closure);
|
||||
return true;
|
||||
}
|
||||
|
||||
JS_PUBLIC_API bool FlushPerformanceMonitoring(JSContext* cx) {
|
||||
return cx->runtime()->performanceMonitoring().commit();
|
||||
}
|
||||
JS_PUBLIC_API void ResetPerformanceMonitoring(JSContext* cx) {
|
||||
return cx->runtime()->performanceMonitoring().reset();
|
||||
}
|
||||
JS_PUBLIC_API void DisposePerformanceMonitoring(JSContext* cx) {
|
||||
return cx->runtime()->performanceMonitoring().dispose(cx->runtime());
|
||||
}
|
||||
|
||||
JS_PUBLIC_API bool SetStopwatchIsMonitoringJank(JSContext* cx, bool value) {
|
||||
return cx->runtime()->performanceMonitoring().setIsMonitoringJank(value);
|
||||
}
|
||||
JS_PUBLIC_API bool GetStopwatchIsMonitoringJank(JSContext* cx) {
|
||||
return cx->runtime()->performanceMonitoring().isMonitoringJank();
|
||||
}
|
||||
|
||||
JS_PUBLIC_API bool SetStopwatchIsMonitoringCPOW(JSContext* cx, bool value) {
|
||||
return cx->runtime()->performanceMonitoring().setIsMonitoringCPOW(value);
|
||||
}
|
||||
JS_PUBLIC_API bool GetStopwatchIsMonitoringCPOW(JSContext* cx) {
|
||||
return cx->runtime()->performanceMonitoring().isMonitoringCPOW();
|
||||
}
|
||||
|
||||
JS_PUBLIC_API void GetPerfMonitoringTestCpuRescheduling(JSContext* cx,
|
||||
uint64_t* stayed,
|
||||
uint64_t* moved) {
|
||||
*stayed = cx->runtime()->performanceMonitoring().testCpuRescheduling.stayed;
|
||||
*moved = cx->runtime()->performanceMonitoring().testCpuRescheduling.moved;
|
||||
}
|
||||
|
||||
JS_PUBLIC_API void AddCPOWPerformanceDelta(JSContext* cx, uint64_t delta) {
|
||||
cx->runtime()->performanceMonitoring().totalCPOWTime += delta;
|
||||
}
|
||||
|
||||
} // namespace js
|
@ -1234,11 +1234,6 @@ void XPCJSContext::BeforeProcessTask(bool aMightBlock) {
|
||||
mSlowScriptSecondHalf = false;
|
||||
mSlowScriptActualWait = mozilla::TimeDuration();
|
||||
mTimeoutAccumulated = false;
|
||||
|
||||
// As we may be entering a nested event loop, we need to
|
||||
// cancel any ongoing performance measurement.
|
||||
js::ResetPerformanceMonitoring(Context());
|
||||
|
||||
CycleCollectedJSContext::BeforeProcessTask(aMightBlock);
|
||||
}
|
||||
|
||||
@ -1250,13 +1245,7 @@ void XPCJSContext::AfterProcessTask(uint32_t aNewRecursionDepth) {
|
||||
// Call cycle collector occasionally.
|
||||
MOZ_ASSERT(NS_IsMainThread());
|
||||
nsJSContext::MaybePokeCC();
|
||||
|
||||
CycleCollectedJSContext::AfterProcessTask(aNewRecursionDepth);
|
||||
|
||||
// Now that we are certain that the event is complete,
|
||||
// we can flush any ongoing performance measurement.
|
||||
js::FlushPerformanceMonitoring(Context());
|
||||
|
||||
mozilla::jsipc::AfterProcessTask();
|
||||
}
|
||||
|
||||
|
@ -53,12 +53,6 @@
|
||||
#include "nsTerminator.h"
|
||||
#endif
|
||||
|
||||
#define MOZ_HAS_PERFSTATS
|
||||
|
||||
#if defined(MOZ_HAS_PERFSTATS)
|
||||
#include "nsPerformanceStats.h"
|
||||
#endif // defined (MOZ_HAS_PERFSTATS)
|
||||
|
||||
#if defined(ENABLE_TESTS)
|
||||
#include "geckoview/TelemetryGeckoViewTesting.h"
|
||||
#endif
|
||||
@ -69,10 +63,6 @@ using namespace mozilla;
|
||||
|
||||
NS_GENERIC_FACTORY_CONSTRUCTOR_INIT(nsAppStartup, Init)
|
||||
|
||||
#if defined(MOZ_HAS_PERFSTATS)
|
||||
NS_GENERIC_FACTORY_CONSTRUCTOR_INIT(nsPerformanceStatsService, Init)
|
||||
#endif // defined (MOZ_HAS_PERFSTATS)
|
||||
|
||||
#if defined(MOZ_HAS_TERMINATOR)
|
||||
NS_GENERIC_FACTORY_CONSTRUCTOR(nsTerminator)
|
||||
#endif
|
||||
@ -138,10 +128,6 @@ NS_GENERIC_FACTORY_CONSTRUCTOR(TelemetryGeckoViewTestingImpl)
|
||||
#endif
|
||||
|
||||
NS_DEFINE_NAMED_CID(NS_TOOLKIT_APPSTARTUP_CID);
|
||||
#if defined(MOZ_HAS_PERFSTATS)
|
||||
NS_DEFINE_NAMED_CID(NS_TOOLKIT_PERFORMANCESTATSSERVICE_CID);
|
||||
#endif // defined (MOZ_HAS_PERFSTATS)
|
||||
|
||||
#if defined(MOZ_HAS_TERMINATOR)
|
||||
NS_DEFINE_NAMED_CID(NS_TOOLKIT_TERMINATOR_CID);
|
||||
#endif
|
||||
@ -180,10 +166,6 @@ static const Module::CIDEntry kToolkitCIDs[] = {
|
||||
#if defined(MOZ_HAS_TERMINATOR)
|
||||
{&kNS_TOOLKIT_TERMINATOR_CID, false, nullptr, nsTerminatorConstructor},
|
||||
#endif
|
||||
#if defined(MOZ_HAS_PERFSTATS)
|
||||
{&kNS_TOOLKIT_PERFORMANCESTATSSERVICE_CID, false, nullptr,
|
||||
nsPerformanceStatsServiceConstructor},
|
||||
#endif // defined (MOZ_HAS_PERFSTATS)
|
||||
{&kNS_USERINFO_CID, false, nullptr, nsUserInfoConstructor},
|
||||
{&kALERT_NOTIFICATION_CID, false, nullptr, AlertNotificationConstructor},
|
||||
{&kNS_ALERTSSERVICE_CID, false, nullptr, nsAlertsServiceConstructor},
|
||||
@ -236,10 +218,6 @@ static const Module::ContractIDEntry kToolkitContracts[] = {
|
||||
#if defined(MOZ_HAS_TERMINATOR)
|
||||
{NS_TOOLKIT_TERMINATOR_CONTRACTID, &kNS_TOOLKIT_TERMINATOR_CID},
|
||||
#endif
|
||||
#if defined(MOZ_HAS_PERFSTATS)
|
||||
{NS_TOOLKIT_PERFORMANCESTATSSERVICE_CONTRACTID,
|
||||
&kNS_TOOLKIT_PERFORMANCESTATSSERVICE_CID},
|
||||
#endif // defined (MOZ_HAS_PERFSTATS)
|
||||
{NS_USERINFO_CONTRACTID, &kNS_USERINFO_CID},
|
||||
{ALERT_NOTIFICATION_CONTRACTID, &kALERT_NOTIFICATION_CID},
|
||||
{NS_ALERTSERVICE_CONTRACTID, &kNS_ALERTSSERVICE_CID},
|
||||
|
@ -1,141 +0,0 @@
|
||||
/* This Source Code Form is subject to the terms of the Mozilla Public
|
||||
* License, v. 2.0. If a copy of the MPL was not distributed with this
|
||||
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
|
||||
|
||||
/**
|
||||
* A proxy implementing communication between the PerformanceStats.jsm modules
|
||||
* of the parent and children processes.
|
||||
*
|
||||
* This script is loaded in all processes but is essentially a NOOP in the
|
||||
* parent process.
|
||||
*/
|
||||
|
||||
"use strict";
|
||||
|
||||
const { Services } = ChromeUtils.import("resource://gre/modules/Services.jsm", {});
|
||||
const { XPCOMUtils } = ChromeUtils.import("resource://gre/modules/XPCOMUtils.jsm", {});
|
||||
|
||||
ChromeUtils.defineModuleGetter(this, "PerformanceStats",
|
||||
"resource://gre/modules/PerformanceStats.jsm");
|
||||
|
||||
/**
|
||||
* A global performance monitor used by this process.
|
||||
*
|
||||
* For the sake of simplicity, rather than attempting to map each PerformanceMonitor
|
||||
* of the parent to a PerformanceMonitor in each child process, we maintain a single
|
||||
* PerformanceMonitor in each child process. Probes activation/deactivation for this
|
||||
* monitor is controlled by the activation/deactivation of probes in the parent.
|
||||
*
|
||||
* In the parent, this is always an empty monitor.
|
||||
*/
|
||||
var gMonitor = PerformanceStats.getMonitor([]);
|
||||
|
||||
/**
|
||||
* `true` if this is a content process, `false` otherwise.
|
||||
*/
|
||||
var isContent = Services.appinfo.processType == Services.appinfo.PROCESS_TYPE_CONTENT;
|
||||
|
||||
/**
|
||||
* Handle message `performance-stats-service-acquire`: ensure that the global
|
||||
* monitor has a given probe. This message must be sent by the parent process
|
||||
* whenever a probe is activated application-wide.
|
||||
*
|
||||
* Note that we may miss acquire messages if they are sent before this process is
|
||||
* launched. For this reason, `performance-stats-service-collect` automatically
|
||||
* re-acquires probes if it realizes that they are missing.
|
||||
*
|
||||
* This operation is a NOOP on the parent process.
|
||||
*
|
||||
* @param {{payload: Array<string>}} msg.data The message received. `payload`
|
||||
* must be an array of probe names.
|
||||
*/
|
||||
Services.cpmm.addMessageListener("performance-stats-service-acquire", function(msg) {
|
||||
if (!isContent) {
|
||||
return;
|
||||
}
|
||||
let name = msg.data.payload;
|
||||
ensureAcquired(name);
|
||||
});
|
||||
|
||||
/**
|
||||
* Handle message `performance-stats-service-release`: release a given probe
|
||||
* from the global monitor. This message must be sent by the parent process
|
||||
* whenever a probe is deactivated application-wide.
|
||||
*
|
||||
* Note that we may miss release messages if they are sent before this process is
|
||||
* launched. This is ok, as probes are inactive by default: if we miss the release
|
||||
* message, we have already missed the acquire message, and the effect of both
|
||||
* messages together is to reset to the default state.
|
||||
*
|
||||
* This operation is a NOOP on the parent process.
|
||||
*
|
||||
* @param {{payload: Array<string>}} msg.data The message received. `payload`
|
||||
* must be an array of probe names.
|
||||
*/
|
||||
Services.cpmm.addMessageListener("performance-stats-service-release", function(msg) {
|
||||
if (!isContent) {
|
||||
return;
|
||||
}
|
||||
|
||||
// Keep only the probes that do not appear in the payload
|
||||
let probes = gMonitor.probeNames
|
||||
.filter(x => !msg.data.payload.includes(x));
|
||||
gMonitor = PerformanceStats.getMonitor(probes);
|
||||
});
|
||||
|
||||
/**
|
||||
* Ensure that this process has all the probes it needs.
|
||||
*
|
||||
* @param {Array<string>} probeNames The name of all probes needed by the
|
||||
* process.
|
||||
*/
|
||||
function ensureAcquired(probeNames) {
|
||||
let alreadyAcquired = gMonitor.probeNames;
|
||||
|
||||
// Algorithm is O(n^2) because we expect that n ≤ 3.
|
||||
let shouldAcquire = [];
|
||||
for (let probeName of probeNames) {
|
||||
if (!alreadyAcquired.includes(probeName)) {
|
||||
shouldAcquire.push(probeName);
|
||||
}
|
||||
}
|
||||
|
||||
if (shouldAcquire.length == 0) {
|
||||
return;
|
||||
}
|
||||
gMonitor = PerformanceStats.getMonitor([...alreadyAcquired, ...shouldAcquire]);
|
||||
}
|
||||
|
||||
/**
|
||||
* Handle message `performance-stats-service-collected`: collect the data
|
||||
* obtained by the monitor. This message must be sent by the parent process
|
||||
* whenever we grab a performance snapshot of the application.
|
||||
*
|
||||
* This operation provides `null` on the parent process.
|
||||
*
|
||||
* @param {{data: {payload: Array<string>}}} msg The message received. `payload`
|
||||
* must be an array of probe names.
|
||||
*/
|
||||
Services.cpmm.addMessageListener("performance-stats-service-collect", async function(msg) {
|
||||
let {id, payload: {probeNames}} = msg.data;
|
||||
if (!isContent) {
|
||||
// This message was sent by the parent process to itself.
|
||||
// As per protocol, respond `null`.
|
||||
Services.cpmm.sendAsyncMessage("performance-stats-service-collect", {
|
||||
id,
|
||||
data: null,
|
||||
});
|
||||
return;
|
||||
}
|
||||
|
||||
// We may have missed acquire messages if the process was loaded too late.
|
||||
// Catch up now.
|
||||
ensureAcquired(probeNames);
|
||||
|
||||
// Collect and return data.
|
||||
let data = await gMonitor.promiseSnapshot({probeNames});
|
||||
Services.cpmm.sendAsyncMessage("performance-stats-service-collect", {
|
||||
id,
|
||||
data,
|
||||
});
|
||||
});
|
@ -1,980 +0,0 @@
|
||||
// -*- indent-tabs-mode: nil; js-indent-level: 2 -*-
|
||||
/* This Source Code Form is subject to the terms of the Mozilla Public
|
||||
* License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
||||
* You can obtain one at http://mozilla.org/MPL/2.0/. */
|
||||
|
||||
"use strict";
|
||||
|
||||
var EXPORTED_SYMBOLS = ["PerformanceStats"];
|
||||
|
||||
/**
|
||||
* API for querying and examining performance data.
|
||||
*
|
||||
* This API exposes data from several probes implemented by the JavaScript VM.
|
||||
* See `PerformanceStats.getMonitor()` for information on how to monitor data
|
||||
* from one or more probes and `PerformanceData` for the information obtained
|
||||
* from the probes.
|
||||
*
|
||||
* Data is collected by "Performance Group". Typically, a Performance Group
|
||||
* is a frame, or the internals of the application.
|
||||
*
|
||||
* Generally, if you have the choice between PerformanceStats and PerformanceWatcher,
|
||||
* you should favor PerformanceWatcher.
|
||||
*/
|
||||
|
||||
ChromeUtils.import("resource://gre/modules/XPCOMUtils.jsm", this);
|
||||
ChromeUtils.import("resource://gre/modules/Services.jsm", this);
|
||||
ChromeUtils.defineModuleGetter(this, "PromiseUtils",
|
||||
"resource://gre/modules/PromiseUtils.jsm");
|
||||
ChromeUtils.defineModuleGetter(this, "setTimeout",
|
||||
"resource://gre/modules/Timer.jsm");
|
||||
ChromeUtils.defineModuleGetter(this, "clearTimeout",
|
||||
"resource://gre/modules/Timer.jsm");
|
||||
|
||||
// The nsIPerformanceStatsService provides lower-level
|
||||
// access to SpiderMonkey and the probes.
|
||||
XPCOMUtils.defineLazyServiceGetter(this, "performanceStatsService",
|
||||
"@mozilla.org/toolkit/performance-stats-service;1",
|
||||
Ci.nsIPerformanceStatsService);
|
||||
|
||||
// The finalizer lets us automatically release (and when possible deactivate)
|
||||
// probes when a monitor is garbage-collected.
|
||||
XPCOMUtils.defineLazyServiceGetter(this, "finalizer",
|
||||
"@mozilla.org/toolkit/finalizationwitness;1",
|
||||
Ci.nsIFinalizationWitnessService
|
||||
);
|
||||
|
||||
// The topic used to notify that a PerformanceMonitor has been garbage-collected
|
||||
// and that we can release/close the probes it holds.
|
||||
const FINALIZATION_TOPIC = "performancemonitor-finalize";
|
||||
|
||||
const PROPERTIES_META_IMMUTABLE = ["isSystem", "isChildProcess", "groupId", "processId"];
|
||||
const PROPERTIES_META = [...PROPERTIES_META_IMMUTABLE, "windowId", "title", "name"];
|
||||
|
||||
// How long we wait for children processes to respond.
|
||||
const MAX_WAIT_FOR_CHILD_PROCESS_MS = 5000;
|
||||
|
||||
var isContent = Services.appinfo.processType == Services.appinfo.PROCESS_TYPE_CONTENT;
|
||||
/**
|
||||
* Access to a low-level performance probe.
|
||||
*
|
||||
* Each probe is dedicated to some form of performance monitoring.
|
||||
* As each probe may have a performance impact, a probe is activated
|
||||
* only when a client has requested a PerformanceMonitor for this probe,
|
||||
* and deactivated once all clients are disposed of.
|
||||
*/
|
||||
function Probe(name, impl) {
|
||||
this._name = name;
|
||||
this._counter = 0;
|
||||
this._impl = impl;
|
||||
}
|
||||
Probe.prototype = {
|
||||
/**
|
||||
* Acquire the probe on behalf of a client.
|
||||
*
|
||||
* If the probe was inactive, activate it. Note that activating a probe
|
||||
* can incur a memory or performance cost.
|
||||
*/
|
||||
acquire() {
|
||||
if (this._counter == 0) {
|
||||
this._impl.isActive = true;
|
||||
Process.broadcast("acquire", [this._name]);
|
||||
}
|
||||
this._counter++;
|
||||
},
|
||||
|
||||
/**
|
||||
* Release the probe on behalf of a client.
|
||||
*
|
||||
* If this was the last client for this probe, deactivate it.
|
||||
*/
|
||||
release() {
|
||||
this._counter--;
|
||||
if (this._counter == 0) {
|
||||
try {
|
||||
this._impl.isActive = false;
|
||||
} catch (ex) {
|
||||
if (ex && typeof ex == "object" && ex.result == Cr.NS_ERROR_NOT_AVAILABLE) {
|
||||
// The service has already been shutdown. Ignore further shutdown requests.
|
||||
return;
|
||||
}
|
||||
throw ex;
|
||||
}
|
||||
Process.broadcast("release", [this._name]);
|
||||
}
|
||||
},
|
||||
|
||||
/**
|
||||
* Obtain data from this probe, once it is available.
|
||||
*
|
||||
* @param {nsIPerformanceStats} xpcom A xpcom object obtained from
|
||||
* SpiderMonkey. Only the fields updated by the low-level probe
|
||||
* are in a specified state.
|
||||
* @return {object} An object containing the data extracted from this
|
||||
* probe. Actual format depends on the probe.
|
||||
*/
|
||||
extract(xpcom) {
|
||||
if (!this._impl.isActive) {
|
||||
throw new Error(`Probe is inactive: ${this._name}`);
|
||||
}
|
||||
return this._impl.extract(xpcom);
|
||||
},
|
||||
|
||||
/**
|
||||
* @param {object} a An object returned by `this.extract()`.
|
||||
* @param {object} b An object returned by `this.extract()`.
|
||||
*
|
||||
* @return {true} If `a` and `b` hold identical values.
|
||||
*/
|
||||
isEqual(a, b) {
|
||||
if (a == null && b == null) {
|
||||
return true;
|
||||
}
|
||||
if (a != null && b != null) {
|
||||
return this._impl.isEqual(a, b);
|
||||
}
|
||||
return false;
|
||||
},
|
||||
|
||||
/**
|
||||
* @param {object} a An object returned by `this.extract()`. May
|
||||
* NOT be `null`.
|
||||
* @param {object} b An object returned by `this.extract()`. May
|
||||
* be `null`.
|
||||
*
|
||||
* @return {object} An object representing `a - b`. If `b` is
|
||||
* `null`, this is `a`.
|
||||
*/
|
||||
subtract(a, b) {
|
||||
if (a == null) {
|
||||
throw new TypeError();
|
||||
}
|
||||
if (b == null) {
|
||||
return a;
|
||||
}
|
||||
return this._impl.subtract(a, b);
|
||||
},
|
||||
|
||||
importChildCompartments(parent, children) {
|
||||
if (!Array.isArray(children)) {
|
||||
throw new TypeError();
|
||||
}
|
||||
if (!parent || !(parent instanceof PerformanceDataLeaf)) {
|
||||
throw new TypeError();
|
||||
}
|
||||
return this._impl.importChildCompartments(parent, children);
|
||||
},
|
||||
|
||||
/**
|
||||
* The name of the probe.
|
||||
*/
|
||||
get name() {
|
||||
return this._name;
|
||||
},
|
||||
|
||||
compose(stats) {
|
||||
if (!Array.isArray(stats)) {
|
||||
throw new TypeError();
|
||||
}
|
||||
return this._impl.compose(stats);
|
||||
},
|
||||
};
|
||||
|
||||
// Utility function. Return the position of the last non-0 item in an
|
||||
// array, or -1 if there isn't any such item.
|
||||
function lastNonZero(array) {
|
||||
for (let i = array.length - 1; i >= 0; --i) {
|
||||
if (array[i] != 0) {
|
||||
return i;
|
||||
}
|
||||
}
|
||||
return -1;
|
||||
}
|
||||
|
||||
/**
|
||||
* The actual Probes implemented by SpiderMonkey.
|
||||
*/
|
||||
var Probes = {
|
||||
/**
|
||||
* A probe measuring jank.
|
||||
*
|
||||
* Data provided by this probe uses the following format:
|
||||
*
|
||||
* @field {number} totalCPUTime The total amount of time spent using the
|
||||
* CPU for this performance group, in µs.
|
||||
* @field {number} totalSystemTime The total amount of time spent in the
|
||||
* kernel for this performance group, in µs.
|
||||
* @field {Array<number>} durations An array containing at each position `i`
|
||||
* the number of times execution of this component has lasted at least `2^i`
|
||||
* milliseconds.
|
||||
* @field {number} longestDuration The index of the highest non-0 value in
|
||||
* `durations`.
|
||||
*/
|
||||
jank: new Probe("jank", {
|
||||
set isActive(x) {
|
||||
performanceStatsService.isMonitoringJank = x;
|
||||
},
|
||||
get isActive() {
|
||||
return performanceStatsService.isMonitoringJank;
|
||||
},
|
||||
extract(xpcom) {
|
||||
let durations = xpcom.getDurations();
|
||||
return {
|
||||
totalUserTime: xpcom.totalUserTime,
|
||||
totalSystemTime: xpcom.totalSystemTime,
|
||||
totalCPUTime: xpcom.totalUserTime + xpcom.totalSystemTime,
|
||||
durations,
|
||||
longestDuration: lastNonZero(durations),
|
||||
};
|
||||
},
|
||||
isEqual(a, b) {
|
||||
// invariant: `a` and `b` are both non-null
|
||||
if (a.totalUserTime != b.totalUserTime) {
|
||||
return false;
|
||||
}
|
||||
if (a.totalSystemTime != b.totalSystemTime) {
|
||||
return false;
|
||||
}
|
||||
for (let i = 0; i < a.durations.length; ++i) {
|
||||
if (a.durations[i] != b.durations[i]) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
return true;
|
||||
},
|
||||
subtract(a, b) {
|
||||
// invariant: `a` and `b` are both non-null
|
||||
let result = {
|
||||
totalUserTime: a.totalUserTime - b.totalUserTime,
|
||||
totalSystemTime: a.totalSystemTime - b.totalSystemTime,
|
||||
totalCPUTime: a.totalCPUTime - b.totalCPUTime,
|
||||
durations: [],
|
||||
longestDuration: -1,
|
||||
};
|
||||
for (let i = 0; i < a.durations.length; ++i) {
|
||||
result.durations[i] = a.durations[i] - b.durations[i];
|
||||
}
|
||||
result.longestDuration = lastNonZero(result.durations);
|
||||
return result;
|
||||
},
|
||||
importChildCompartments() { /* nothing to do */ },
|
||||
compose(stats) {
|
||||
let result = {
|
||||
totalUserTime: 0,
|
||||
totalSystemTime: 0,
|
||||
totalCPUTime: 0,
|
||||
durations: [],
|
||||
longestDuration: -1,
|
||||
};
|
||||
for (let stat of stats) {
|
||||
result.totalUserTime += stat.totalUserTime;
|
||||
result.totalSystemTime += stat.totalSystemTime;
|
||||
result.totalCPUTime += stat.totalCPUTime;
|
||||
for (let i = 0; i < stat.durations.length; ++i) {
|
||||
result.durations[i] += stat.durations[i];
|
||||
}
|
||||
result.longestDuration = Math.max(result.longestDuration, stat.longestDuration);
|
||||
}
|
||||
return result;
|
||||
},
|
||||
}),
|
||||
|
||||
/**
|
||||
* A probe measuring CPOW activity.
|
||||
*
|
||||
* Data provided by this probe uses the following format:
|
||||
*
|
||||
* @field {number} totalCPOWTime The amount of wallclock time
|
||||
* spent executing blocking cross-process calls, in µs.
|
||||
*/
|
||||
cpow: new Probe("cpow", {
|
||||
set isActive(x) {
|
||||
performanceStatsService.isMonitoringCPOW = x;
|
||||
},
|
||||
get isActive() {
|
||||
return performanceStatsService.isMonitoringCPOW;
|
||||
},
|
||||
extract(xpcom) {
|
||||
return {
|
||||
totalCPOWTime: xpcom.totalCPOWTime,
|
||||
};
|
||||
},
|
||||
isEqual(a, b) {
|
||||
return a.totalCPOWTime == b.totalCPOWTime;
|
||||
},
|
||||
subtract(a, b) {
|
||||
return {
|
||||
totalCPOWTime: a.totalCPOWTime - b.totalCPOWTime,
|
||||
};
|
||||
},
|
||||
importChildCompartments() { /* nothing to do */ },
|
||||
compose(stats) {
|
||||
let totalCPOWTime = 0;
|
||||
for (let stat of stats) {
|
||||
totalCPOWTime += stat.totalCPOWTime;
|
||||
}
|
||||
return { totalCPOWTime };
|
||||
},
|
||||
}),
|
||||
|
||||
/**
|
||||
* A probe measuring activations, i.e. the number
|
||||
* of times code execution has entered a given
|
||||
* PerformanceGroup.
|
||||
*
|
||||
* Note that this probe is always active.
|
||||
*
|
||||
* Data provided by this probe uses the following format:
|
||||
* @type {number} ticks The number of times execution has entered
|
||||
* this performance group.
|
||||
*/
|
||||
ticks: new Probe("ticks", {
|
||||
set isActive(x) { /* this probe cannot be deactivated */ },
|
||||
get isActive() { return true; },
|
||||
extract(xpcom) {
|
||||
return {
|
||||
ticks: xpcom.ticks,
|
||||
};
|
||||
},
|
||||
isEqual(a, b) {
|
||||
return a.ticks == b.ticks;
|
||||
},
|
||||
subtract(a, b) {
|
||||
return {
|
||||
ticks: a.ticks - b.ticks,
|
||||
};
|
||||
},
|
||||
importChildCompartments() { /* nothing to do */ },
|
||||
compose(stats) {
|
||||
let ticks = 0;
|
||||
for (let stat of stats) {
|
||||
ticks += stat.ticks;
|
||||
}
|
||||
return { ticks };
|
||||
},
|
||||
}),
|
||||
|
||||
compartments: new Probe("compartments", {
|
||||
set isActive(x) {
|
||||
performanceStatsService.isMonitoringPerCompartment = x;
|
||||
},
|
||||
get isActive() {
|
||||
return performanceStatsService.isMonitoringPerCompartment;
|
||||
},
|
||||
extract(xpcom) {
|
||||
return null;
|
||||
},
|
||||
isEqual(a, b) {
|
||||
return true;
|
||||
},
|
||||
subtract(a, b) {
|
||||
return true;
|
||||
},
|
||||
importChildCompartments(parent, children) {
|
||||
parent.children = children;
|
||||
},
|
||||
compose(stats) {
|
||||
return null;
|
||||
},
|
||||
}),
|
||||
};
|
||||
|
||||
/**
|
||||
* A monitor for a set of probes.
|
||||
*
|
||||
* Keeping probes active when they are unused is often a bad
|
||||
* idea for performance reasons. Upon destruction, or whenever
|
||||
* a client calls `dispose`, this monitor releases the probes,
|
||||
* which may let the system deactivate them.
|
||||
*/
|
||||
function PerformanceMonitor(probes) {
|
||||
this._probes = probes;
|
||||
|
||||
// Activate low-level features as needed
|
||||
for (let probe of probes) {
|
||||
probe.acquire();
|
||||
}
|
||||
|
||||
// A finalization witness. At some point after the garbage-collection of
|
||||
// `this` object, a notification of `FINALIZATION_TOPIC` will be triggered
|
||||
// with `id` as message.
|
||||
this._id = PerformanceMonitor.makeId();
|
||||
this._finalizer = finalizer.make(FINALIZATION_TOPIC, this._id);
|
||||
PerformanceMonitor._monitors.set(this._id, probes);
|
||||
}
|
||||
PerformanceMonitor.prototype = {
|
||||
/**
|
||||
* The names of probes activated in this monitor.
|
||||
*/
|
||||
get probeNames() {
|
||||
return this._probes.map(probe => probe.name);
|
||||
},
|
||||
|
||||
/**
|
||||
* Return asynchronously a snapshot with the data
|
||||
* for each probe monitored by this PerformanceMonitor.
|
||||
*
|
||||
* All numeric values are non-negative and can only increase. Depending on
|
||||
* the probe and the underlying operating system, probes may not be available
|
||||
* immediately and may miss some activity.
|
||||
*
|
||||
* Clients should NOT expect that the first call to `promiseSnapshot()`
|
||||
* will return a `Snapshot` in which all values are 0. For most uses,
|
||||
* the appropriate scenario is to perform a first call to `promiseSnapshot()`
|
||||
* to obtain a baseline, and then watch evolution of the values by calling
|
||||
* `promiseSnapshot()` and `subtract()`.
|
||||
*
|
||||
* On the other hand, numeric values are also monotonic across several instances
|
||||
* of a PerformanceMonitor with the same probes.
|
||||
* let a = PerformanceStats.getMonitor(someProbes);
|
||||
* let snapshot1 = yield a.promiseSnapshot();
|
||||
*
|
||||
* // ...
|
||||
* let b = PerformanceStats.getMonitor(someProbes); // Same list of probes
|
||||
* let snapshot2 = yield b.promiseSnapshot();
|
||||
*
|
||||
* // all values of `snapshot2` are greater or equal to values of `snapshot1`.
|
||||
*
|
||||
* @param {object} options If provided, an object that may contain the following
|
||||
* fields:
|
||||
* {Array<string>} probeNames The subset of probes to use for this snapshot.
|
||||
* These probes must be a subset of the probes active in the monitor.
|
||||
*
|
||||
* @return {Promise}
|
||||
* @resolve {Snapshot}
|
||||
*/
|
||||
_checkBeforeSnapshot(options) {
|
||||
if (!this._finalizer) {
|
||||
throw new Error("dispose() has already been called, this PerformanceMonitor is not usable anymore");
|
||||
}
|
||||
let probes;
|
||||
if (options && options.probeNames || undefined) {
|
||||
if (!Array.isArray(options.probeNames)) {
|
||||
throw new TypeError();
|
||||
}
|
||||
// Make sure that we only request probes that we have
|
||||
for (let probeName of options.probeNames) {
|
||||
let probe = this._probes.find(probe => probe.name == probeName);
|
||||
if (!probe) {
|
||||
throw new TypeError(`I need probe ${probeName} but I only have ${this.probeNames}`);
|
||||
}
|
||||
if (!probes) {
|
||||
probes = [];
|
||||
}
|
||||
probes.push(probe);
|
||||
}
|
||||
} else {
|
||||
probes = this._probes;
|
||||
}
|
||||
return probes;
|
||||
},
|
||||
promiseContentSnapshot(options = null) {
|
||||
this._checkBeforeSnapshot(options);
|
||||
return (new ProcessSnapshot(performanceStatsService.getSnapshot()));
|
||||
},
|
||||
promiseSnapshot(options = null) {
|
||||
let probes = this._checkBeforeSnapshot(options);
|
||||
return (async function() {
|
||||
let childProcesses = await Process.broadcastAndCollect("collect", {probeNames: probes.map(p => p.name)});
|
||||
let xpcom = performanceStatsService.getSnapshot();
|
||||
return new ApplicationSnapshot({
|
||||
xpcom,
|
||||
childProcesses,
|
||||
probes,
|
||||
date: Cu.now(),
|
||||
});
|
||||
})();
|
||||
},
|
||||
|
||||
/**
|
||||
* Release the probes used by this monitor.
|
||||
*
|
||||
* Releasing probes as soon as they are unused is a good idea, as some probes
|
||||
* cost CPU and/or memory.
|
||||
*/
|
||||
dispose() {
|
||||
if (!this._finalizer) {
|
||||
return;
|
||||
}
|
||||
this._finalizer.forget();
|
||||
PerformanceMonitor.dispose(this._id);
|
||||
|
||||
// As a safeguard against double-release, reset everything to `null`
|
||||
this._probes = null;
|
||||
this._id = null;
|
||||
this._finalizer = null;
|
||||
},
|
||||
};
|
||||
/**
|
||||
* @type {Map<string, Array<string>>} A map from id (as produced by `makeId`)
|
||||
* to list of probes. Used to deallocate a list of probes during finalization.
|
||||
*/
|
||||
PerformanceMonitor._monitors = new Map();
|
||||
|
||||
/**
|
||||
* Create a `PerformanceMonitor` for a list of probes, register it for
|
||||
* finalization.
|
||||
*/
|
||||
PerformanceMonitor.make = function(probeNames) {
|
||||
// Sanity checks
|
||||
if (!Array.isArray(probeNames)) {
|
||||
throw new TypeError("Expected an array, got " + probes);
|
||||
}
|
||||
let probes = [];
|
||||
for (let probeName of probeNames) {
|
||||
if (!(probeName in Probes)) {
|
||||
throw new TypeError("Probe not implemented: " + probeName);
|
||||
}
|
||||
probes.push(Probes[probeName]);
|
||||
}
|
||||
|
||||
return (new PerformanceMonitor(probes));
|
||||
};
|
||||
|
||||
/**
|
||||
* Implementation of `dispose`.
|
||||
*
|
||||
* The actual implementation of `dispose` is as a method of `PerformanceMonitor`,
|
||||
* rather than `PerformanceMonitor.prototype`, to avoid needing a strong reference
|
||||
* to instances of `PerformanceMonitor`, which would defeat the purpose of
|
||||
* finalization.
|
||||
*/
|
||||
PerformanceMonitor.dispose = function(id) {
|
||||
let probes = PerformanceMonitor._monitors.get(id);
|
||||
if (!probes) {
|
||||
throw new TypeError("`dispose()` has already been called on this monitor");
|
||||
}
|
||||
|
||||
PerformanceMonitor._monitors.delete(id);
|
||||
for (let probe of probes) {
|
||||
probe.release();
|
||||
}
|
||||
};
|
||||
|
||||
// Generate a unique id for each PerformanceMonitor. Used during
|
||||
// finalization.
|
||||
PerformanceMonitor._counter = 0;
|
||||
PerformanceMonitor.makeId = function() {
|
||||
return "PerformanceMonitor-" + (this._counter++);
|
||||
};
|
||||
|
||||
// Once a `PerformanceMonitor` has been garbage-collected,
|
||||
// release the probes unless `dispose()` has already been called.
|
||||
Services.obs.addObserver(function(subject, topic, value) {
|
||||
PerformanceMonitor.dispose(value);
|
||||
}, FINALIZATION_TOPIC);
|
||||
|
||||
// Public API
|
||||
var PerformanceStats = {
|
||||
/**
|
||||
* Create a monitor for observing a set of performance probes.
|
||||
*/
|
||||
getMonitor(probes) {
|
||||
return PerformanceMonitor.make(probes);
|
||||
},
|
||||
};
|
||||
|
||||
|
||||
/**
|
||||
* Information on a single performance group.
|
||||
*
|
||||
* This offers the following fields:
|
||||
*
|
||||
* @field {string} name The name of the performance group:
|
||||
* - for the process itself, "<process>";
|
||||
* - for platform code, "<platform>";
|
||||
* - for a webpage, the url of the page.
|
||||
*
|
||||
* @field {string|null} title The title of the webpage to which this code
|
||||
* belongs. Note that this is the title of the entire webpage (i.e. the tab),
|
||||
* even if the code is executed in an iframe. Also note that this title may
|
||||
* change over time.
|
||||
*
|
||||
* @field {number} windowId The outer window ID of the top-level nsIDOMWindow
|
||||
* to which this code belongs. May be 0 if the code doesn't belong to any
|
||||
* nsIDOMWindow.
|
||||
*
|
||||
* @field {boolean} isSystem `true` if the component is a system component (i.e.
|
||||
* an add-on or platform-code), `false` otherwise (i.e. a webpage).
|
||||
*
|
||||
* @field {object|undefined} activations See the documentation of probe "ticks".
|
||||
* `undefined` if this probe is not active.
|
||||
*
|
||||
* @field {object|undefined} jank See the documentation of probe "jank".
|
||||
* `undefined` if this probe is not active.
|
||||
*
|
||||
* @field {object|undefined} cpow See the documentation of probe "cpow".
|
||||
* `undefined` if this probe is not active.
|
||||
*/
|
||||
function PerformanceDataLeaf({xpcom, json, probes}) {
|
||||
if (xpcom && json) {
|
||||
throw new TypeError("Cannot import both xpcom and json data");
|
||||
}
|
||||
let source = xpcom || json;
|
||||
for (let k of PROPERTIES_META) {
|
||||
this[k] = source[k];
|
||||
}
|
||||
if (xpcom) {
|
||||
for (let probe of probes) {
|
||||
this[probe.name] = probe.extract(xpcom);
|
||||
}
|
||||
this.isChildProcess = false;
|
||||
} else {
|
||||
for (let probe of probes) {
|
||||
this[probe.name] = json[probe.name];
|
||||
}
|
||||
this.isChildProcess = true;
|
||||
}
|
||||
this.owner = null;
|
||||
}
|
||||
PerformanceDataLeaf.prototype = {
|
||||
/**
|
||||
* Compare two instances of `PerformanceData`
|
||||
*
|
||||
* @return `true` if `this` and `to` have equal values in all fields.
|
||||
*/
|
||||
equals(to) {
|
||||
if (!(to instanceof PerformanceDataLeaf)) {
|
||||
throw new TypeError();
|
||||
}
|
||||
for (let probeName of Object.keys(Probes)) {
|
||||
let probe = Probes[probeName];
|
||||
if (!probe.isEqual(this[probeName], to[probeName])) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
return true;
|
||||
},
|
||||
|
||||
/**
|
||||
* Compute the delta between two instances of `PerformanceData`.
|
||||
*
|
||||
* @param {PerformanceData|null} to. If `null`, assumed an instance of
|
||||
* `PerformanceData` in which all numeric values are 0.
|
||||
*
|
||||
* @return {PerformanceDiff} The performance usage between `to` and `this`.
|
||||
*/
|
||||
subtract(to = null) {
|
||||
return (new PerformanceDiffLeaf(this, to));
|
||||
},
|
||||
};
|
||||
|
||||
function PerformanceData(timestamp) {
|
||||
this._parent = null;
|
||||
this._content = new Map();
|
||||
this._all = [];
|
||||
this._timestamp = timestamp;
|
||||
}
|
||||
PerformanceData.prototype = {
|
||||
addChild(stat) {
|
||||
if (!(stat instanceof PerformanceDataLeaf)) {
|
||||
throw new TypeError(); // FIXME
|
||||
}
|
||||
if (!stat.isChildProcess) {
|
||||
throw new TypeError(); // FIXME
|
||||
}
|
||||
this._content.set(stat.groupId, stat);
|
||||
this._all.push(stat);
|
||||
stat.owner = this;
|
||||
},
|
||||
setParent(stat) {
|
||||
if (!(stat instanceof PerformanceDataLeaf)) {
|
||||
throw new TypeError(); // FIXME
|
||||
}
|
||||
if (stat.isChildProcess) {
|
||||
throw new TypeError(); // FIXME
|
||||
}
|
||||
this._parent = stat;
|
||||
this._all.push(stat);
|
||||
stat.owner = this;
|
||||
},
|
||||
equals(to) {
|
||||
if (this._parent && !to._parent) {
|
||||
return false;
|
||||
}
|
||||
if (!this._parent && to._parent) {
|
||||
return false;
|
||||
}
|
||||
if (this._content.size != to._content.size) {
|
||||
return false;
|
||||
}
|
||||
if (this._parent && !this._parent.equals(to._parent)) {
|
||||
return false;
|
||||
}
|
||||
for (let [k, v] of this._content) {
|
||||
let v2 = to._content.get(k);
|
||||
if (!v2) {
|
||||
return false;
|
||||
}
|
||||
if (!v.equals(v2)) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
return true;
|
||||
},
|
||||
subtract(to = null) {
|
||||
return (new PerformanceDiff(this, to));
|
||||
},
|
||||
get title() {
|
||||
return this._all[0].title;
|
||||
},
|
||||
};
|
||||
|
||||
function PerformanceDiff(current, old = null) {
|
||||
this.title = current.title;
|
||||
this.windowId = current.windowId;
|
||||
this.deltaT = old ? current._timestamp - old._timestamp : Infinity;
|
||||
this._all = [];
|
||||
|
||||
// Handle the parent, if any.
|
||||
if (current._parent) {
|
||||
this._parent = old ? current._parent.subtract(old._parent) : current._parent;
|
||||
this._all.push(this._parent);
|
||||
this._parent.owner = this;
|
||||
} else {
|
||||
this._parent = null;
|
||||
}
|
||||
|
||||
// Handle the children, if any.
|
||||
this._content = new Map();
|
||||
for (let [k, stat] of current._content) {
|
||||
let diff = stat.subtract(old ? old._content.get(k) : null);
|
||||
this._content.set(k, diff);
|
||||
this._all.push(diff);
|
||||
diff.owner = this;
|
||||
}
|
||||
|
||||
// Now consolidate data
|
||||
for (let k of Object.keys(Probes)) {
|
||||
if (!(k in this._all[0])) {
|
||||
// The stats don't contain data from this probe.
|
||||
continue;
|
||||
}
|
||||
let data = this._all.map(item => item[k]);
|
||||
let probe = Probes[k];
|
||||
this[k] = probe.compose(data);
|
||||
}
|
||||
}
|
||||
PerformanceDiff.prototype = {
|
||||
toString() {
|
||||
return `[PerformanceDiff] ${this.key}`;
|
||||
},
|
||||
get windowIds() {
|
||||
return this._all.map(item => item.windowId).filter(x => !!x);
|
||||
},
|
||||
get groupIds() {
|
||||
return this._all.map(item => item.groupId);
|
||||
},
|
||||
get key() {
|
||||
if (this._parent) {
|
||||
return this._parent.windowId;
|
||||
}
|
||||
return this._all[0].groupId;
|
||||
},
|
||||
get names() {
|
||||
return this._all.map(item => item.name);
|
||||
},
|
||||
get processes() {
|
||||
return this._all.map(item => ({ isChildProcess: item.isChildProcess, processId: item.processId}));
|
||||
},
|
||||
};
|
||||
|
||||
/**
|
||||
* The delta between two instances of `PerformanceDataLeaf`.
|
||||
*
|
||||
* Used to monitor resource usage between two timestamps.
|
||||
*/
|
||||
function PerformanceDiffLeaf(current, old = null) {
|
||||
for (let k of PROPERTIES_META) {
|
||||
this[k] = current[k];
|
||||
}
|
||||
|
||||
for (let probeName of Object.keys(Probes)) {
|
||||
let other = null;
|
||||
if (old && probeName in old) {
|
||||
other = old[probeName];
|
||||
}
|
||||
|
||||
if (probeName in current) {
|
||||
this[probeName] = Probes[probeName].subtract(current[probeName], other);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* A snapshot of a single process.
|
||||
*/
|
||||
function ProcessSnapshot({xpcom, probes}) {
|
||||
this.componentsData = [];
|
||||
|
||||
let subgroups = new Map();
|
||||
for (let data of xpcom.getComponentsData().enumerate(Ci.nsIPerformanceStats)) {
|
||||
let stat = (new PerformanceDataLeaf({xpcom: data, probes}));
|
||||
|
||||
if (!data.parentId) {
|
||||
this.componentsData.push(stat);
|
||||
} else {
|
||||
let siblings = subgroups.get(data.parentId);
|
||||
if (!siblings) {
|
||||
subgroups.set(data.parentId, (siblings = []));
|
||||
}
|
||||
siblings.push(stat);
|
||||
}
|
||||
}
|
||||
|
||||
for (let group of this.componentsData) {
|
||||
for (let probe of probes) {
|
||||
probe.importChildCompartments(group, subgroups.get(group.groupId) || []);
|
||||
}
|
||||
}
|
||||
|
||||
this.processData = (new PerformanceDataLeaf({xpcom: xpcom.getProcessData(), probes}));
|
||||
}
|
||||
|
||||
/**
|
||||
* A snapshot of the performance usage of the application.
|
||||
*
|
||||
* @param {nsIPerformanceSnapshot} xpcom The data acquired from this process.
|
||||
* @param {Array<Object>} childProcesses The data acquired from children processes.
|
||||
* @param {Array<Probe>} probes The active probes.
|
||||
*/
|
||||
function ApplicationSnapshot({xpcom, childProcesses, probes, date}) {
|
||||
ProcessSnapshot.call(this, {xpcom, probes});
|
||||
|
||||
this.webpages = new Map();
|
||||
this.date = date;
|
||||
|
||||
// Child processes
|
||||
for (let {componentsData} of (childProcesses || [])) {
|
||||
// We are only interested in `componentsData` for the time being.
|
||||
for (let json of componentsData) {
|
||||
let leaf = (new PerformanceDataLeaf({json, probes}));
|
||||
this.componentsData.push(leaf);
|
||||
}
|
||||
}
|
||||
|
||||
for (let leaf of this.componentsData) {
|
||||
let key, map;
|
||||
if (leaf.windowId) {
|
||||
key = leaf.windowId;
|
||||
map = this.webpages;
|
||||
} else {
|
||||
continue;
|
||||
}
|
||||
|
||||
let combined = map.get(key);
|
||||
if (!combined) {
|
||||
combined = new PerformanceData(date);
|
||||
map.set(key, combined);
|
||||
}
|
||||
if (leaf.isChildProcess) {
|
||||
combined.addChild(leaf);
|
||||
} else {
|
||||
combined.setParent(leaf);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Communication with other processes
|
||||
*/
|
||||
var Process = {
|
||||
// a counter used to match responses to requests
|
||||
_idcounter: 0,
|
||||
_loader: null,
|
||||
/**
|
||||
* If we are in a child process, return `null`.
|
||||
* Otherwise, return the global parent process message manager
|
||||
* and load the script to connect to children processes.
|
||||
*/
|
||||
get loader() {
|
||||
if (isContent) {
|
||||
return null;
|
||||
}
|
||||
if (this._loader) {
|
||||
return this._loader;
|
||||
}
|
||||
Services.ppmm.loadProcessScript("resource://gre/modules/PerformanceStats-content.js",
|
||||
true/* including future processes*/);
|
||||
return this._loader = Services.ppmm;
|
||||
},
|
||||
|
||||
/**
|
||||
* Broadcast a message to all children processes.
|
||||
*
|
||||
* NOOP if we are in a child process.
|
||||
*/
|
||||
broadcast(topic, payload) {
|
||||
if (!this.loader) {
|
||||
return;
|
||||
}
|
||||
this.loader.broadcastAsyncMessage("performance-stats-service-" + topic, {payload});
|
||||
},
|
||||
|
||||
/**
|
||||
* Brodcast a message to all children processes and wait for answer.
|
||||
*
|
||||
* NOOP if we are in a child process, or if we have no children processes,
|
||||
* in which case we return `undefined`.
|
||||
*
|
||||
* @return {undefined} If we have no children processes, in particular
|
||||
* if we are in a child process.
|
||||
* @return {Promise<Array<Object>>} If we have children processes, an
|
||||
* array of objects with a structure similar to PerformanceData. Note
|
||||
* that the array may be empty if no child process responded.
|
||||
*/
|
||||
async broadcastAndCollect(topic, payload) {
|
||||
if (!this.loader || this.loader.childCount == 1) {
|
||||
return undefined;
|
||||
}
|
||||
const TOPIC = "performance-stats-service-" + topic;
|
||||
let id = this._idcounter++;
|
||||
|
||||
// The number of responses we are expecting. Note that we may
|
||||
// not receive all responses if a process is too long to respond.
|
||||
let expecting = this.loader.childCount;
|
||||
|
||||
// The responses we have collected, in arbitrary order.
|
||||
let collected = [];
|
||||
let deferred = PromiseUtils.defer();
|
||||
|
||||
let observer = function({data, target}) {
|
||||
if (data.id != id) {
|
||||
// Collision between two collections,
|
||||
// ignore the other one.
|
||||
return;
|
||||
}
|
||||
if (data.data) {
|
||||
collected.push(data.data);
|
||||
}
|
||||
if (--expecting > 0) {
|
||||
// We are still waiting for at least one response.
|
||||
return;
|
||||
}
|
||||
deferred.resolve();
|
||||
};
|
||||
this.loader.addMessageListener(TOPIC, observer);
|
||||
this.loader.broadcastAsyncMessage(
|
||||
TOPIC,
|
||||
{id, payload}
|
||||
);
|
||||
|
||||
// Processes can die/freeze/be busy loading a page..., so don't expect
|
||||
// that they will always respond.
|
||||
let timeout = setTimeout(() => {
|
||||
if (expecting == 0) {
|
||||
return;
|
||||
}
|
||||
deferred.resolve();
|
||||
}, MAX_WAIT_FOR_CHILD_PROCESS_MS);
|
||||
|
||||
deferred.promise.then(() => {
|
||||
clearTimeout(timeout);
|
||||
});
|
||||
|
||||
await deferred.promise;
|
||||
this.loader.removeMessageListener(TOPIC, observer);
|
||||
|
||||
return collected;
|
||||
},
|
||||
};
|
@ -1,46 +0,0 @@
|
||||
/* This Source Code Form is subject to the terms of the Mozilla Public
|
||||
* License, v. 2.0. If a copy of the MPL was not distributed with this
|
||||
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
|
||||
|
||||
"use strict";
|
||||
|
||||
/**
|
||||
* An API for being informed of slow tabs (content process scripts).
|
||||
*/
|
||||
|
||||
const { Services } = ChromeUtils.import("resource://gre/modules/Services.jsm", {});
|
||||
|
||||
/**
|
||||
* `true` if this is a content process, `false` otherwise.
|
||||
*/
|
||||
let isContent = Services.appinfo.processType == Services.appinfo.PROCESS_TYPE_CONTENT;
|
||||
|
||||
if (isContent) {
|
||||
|
||||
const { PerformanceWatcher } = ChromeUtils.import("resource://gre/modules/PerformanceWatcher.jsm", {});
|
||||
|
||||
let toMsg = function(alerts) {
|
||||
let result = [];
|
||||
for (let {source, details} of alerts) {
|
||||
// Convert xpcom values to serializable data.
|
||||
let serializableSource = {};
|
||||
for (let k of ["groupId", "name", "windowId", "isSystem", "processId", "isContentProcess"]) {
|
||||
serializableSource[k] = source[k];
|
||||
}
|
||||
|
||||
let serializableDetails = {};
|
||||
for (let k of ["reason", "highestJank", "highestCPOW"]) {
|
||||
serializableDetails[k] = details[k];
|
||||
}
|
||||
result.push({source: serializableSource, details: serializableDetails});
|
||||
}
|
||||
return result;
|
||||
};
|
||||
|
||||
PerformanceWatcher.addPerformanceListener({windowId: 0}, alerts => {
|
||||
Services.cpmm.sendAsyncMessage("performancewatcher-propagate-notifications",
|
||||
{windows: toMsg(alerts)}
|
||||
);
|
||||
});
|
||||
|
||||
}
|
@ -1,325 +0,0 @@
|
||||
// -*- indent-tabs-mode: nil; js-indent-level: 2 -*-
|
||||
/* This Source Code Form is subject to the terms of the Mozilla Public
|
||||
* License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
||||
* You can obtain one at http://mozilla.org/MPL/2.0/. */
|
||||
|
||||
"use strict";
|
||||
|
||||
/**
|
||||
* An API for being informed of slow tabs.
|
||||
*
|
||||
* Generally, this API is both more CPU-efficient and more battery-efficient
|
||||
* than PerformanceStats. As PerformanceStats, this API does not provide any
|
||||
* information during the startup or shutdown of Firefox.
|
||||
*
|
||||
* = Example =
|
||||
*
|
||||
* Example use: reporting whenever any webpage slows down Firefox.
|
||||
* let listener = function(alerts) {
|
||||
* // This listener is triggered whenever any window causes Firefox to miss
|
||||
* // frames. FieldArgument `source` contains information about the source of the
|
||||
* // slowdown (including the process in which it happens), while `details`
|
||||
* // contains performance statistics.
|
||||
* for (let {source, details} of alerts) {
|
||||
* console.log(`Oops, window ${source.windowId} seems to be slowing down Firefox.`, details);
|
||||
* };
|
||||
* // Special windowId 0 lets us to listen to all webpages.
|
||||
* PerformanceWatcher.addPerformanceListener({windowId: 0}, listener);
|
||||
*
|
||||
*
|
||||
* = How this works =
|
||||
*
|
||||
* This high-level API is based on the lower-level nsIPerformanceStatsService.
|
||||
* At the end of each event (including micro-tasks), the nsIPerformanceStatsService
|
||||
* updates its internal performance statistics and determines whether any
|
||||
* window in the current process has exceeded the jank threshold.
|
||||
*
|
||||
* The PerformanceWatcher maintains low-level performance observers in each
|
||||
* process and forwards alerts to the main process. Internal observers collate
|
||||
* low-level main process alerts and children process alerts and notify clients
|
||||
* of this API.
|
||||
*/
|
||||
|
||||
var EXPORTED_SYMBOLS = ["PerformanceWatcher"];
|
||||
|
||||
let { PerformanceStats, performanceStatsService } = ChromeUtils.import("resource://gre/modules/PerformanceStats.jsm", {});
|
||||
let { Services } = ChromeUtils.import("resource://gre/modules/Services.jsm", {});
|
||||
|
||||
// `true` if the code is executed in content, `false` otherwise
|
||||
let isContent = Services.appinfo.processType == Services.appinfo.PROCESS_TYPE_CONTENT;
|
||||
|
||||
if (!isContent) {
|
||||
// Initialize communication with children.
|
||||
//
|
||||
// To keep the protocol simple, the children inform the parent whenever a slow
|
||||
// tab is detected. We do not attempt to implement thresholds.
|
||||
Services.ppmm.loadProcessScript("resource://gre/modules/PerformanceWatcher-content.js",
|
||||
true/* including future processes*/);
|
||||
|
||||
Services.ppmm.addMessageListener("performancewatcher-propagate-notifications",
|
||||
(...args) => ChildManager.notifyObservers(...args)
|
||||
);
|
||||
}
|
||||
|
||||
// Configure the performance stats service to inform us in case of jank.
|
||||
performanceStatsService.jankAlertThreshold = 64000 /* us */;
|
||||
|
||||
|
||||
/**
|
||||
* Handle communications with child processes. Handle listening to
|
||||
* a single window id (including the special window id 0, which is
|
||||
* notified for all windows).
|
||||
*
|
||||
* Acquire through `ChildManager.getWindow`.
|
||||
*/
|
||||
function ChildManager(map, key) {
|
||||
this.key = key;
|
||||
this._map = map;
|
||||
this._listeners = new Set();
|
||||
}
|
||||
ChildManager.prototype = {
|
||||
/**
|
||||
* Add a listener, which will be notified whenever a child process
|
||||
* reports a slow performance alert for this window.
|
||||
*/
|
||||
addListener(listener) {
|
||||
this._listeners.add(listener);
|
||||
},
|
||||
/**
|
||||
* Remove a listener.
|
||||
*/
|
||||
removeListener(listener) {
|
||||
let deleted = this._listeners.delete(listener);
|
||||
if (!deleted) {
|
||||
throw new Error("Unknown listener");
|
||||
}
|
||||
},
|
||||
|
||||
listeners() {
|
||||
return this._listeners.values();
|
||||
},
|
||||
};
|
||||
|
||||
/**
|
||||
* Dispatch child alerts to observers.
|
||||
*
|
||||
* Triggered by messages from content processes.
|
||||
*/
|
||||
ChildManager.notifyObservers = function({data: {windows}}) {
|
||||
if (windows && windows.length > 0) {
|
||||
// Dispatch the entire list to universal listeners
|
||||
this._notify(ChildManager.getWindow(0).listeners(), windows);
|
||||
|
||||
// Dispatch individual alerts to individual listeners
|
||||
for (let {source, details} of windows) {
|
||||
this._notify(ChildManager.getWindow(source.windowId).listeners(), source, details);
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
ChildManager._notify = function(targets, ...args) {
|
||||
for (let target of targets) {
|
||||
target(...args);
|
||||
}
|
||||
};
|
||||
|
||||
ChildManager.getWindow = function(key) {
|
||||
return this._get(this._windows, key);
|
||||
};
|
||||
ChildManager._windows = new Map();
|
||||
|
||||
ChildManager._get = function(map, key) {
|
||||
let result = map.get(key);
|
||||
if (!result) {
|
||||
result = new ChildManager(map, key);
|
||||
map.set(key, result);
|
||||
}
|
||||
return result;
|
||||
};
|
||||
|
||||
/**
|
||||
* An object in charge of managing all the observables for a single
|
||||
* target (window/all windows).
|
||||
*
|
||||
* In a content process, a target is represented by a single observable.
|
||||
* The situation is more sophisticated in a parent process, as a target
|
||||
* has both an in-process observable and several observables across children
|
||||
* processes.
|
||||
*
|
||||
* This class abstracts away the difference to simplify the work of
|
||||
* (un)registering observers for targets.
|
||||
*
|
||||
* @param {object} target The target being observed, as an object
|
||||
* with one of the following fields:
|
||||
* - {xul:tab} tab A single tab. It must already be initialized.
|
||||
* - {number} windowId Either 0 for the universal window observer
|
||||
* or the outer window id of the window.
|
||||
*/
|
||||
function Observable(target) {
|
||||
// A mapping from `listener` (function) to `Observer`.
|
||||
this._observers = new Map();
|
||||
if ("tab" in target || "windowId" in target) {
|
||||
let windowId;
|
||||
if ("tab" in target) {
|
||||
windowId = target.tab.linkedBrowser.outerWindowID;
|
||||
// By convention, outerWindowID may not be 0.
|
||||
} else if ("windowId" in target) {
|
||||
windowId = target.windowId;
|
||||
}
|
||||
if (windowId == undefined || windowId == null) {
|
||||
throw new TypeError(`No outerWindowID. Perhaps the target is a tab that is not initialized yet.`);
|
||||
}
|
||||
this._key = `tab-windowId: ${windowId}`;
|
||||
this._process = performanceStatsService.getObservableWindow(windowId);
|
||||
this._children = isContent ? null : ChildManager.getWindow(windowId);
|
||||
this._isBuffered = windowId == 0;
|
||||
} else {
|
||||
throw new TypeError("Unexpected target");
|
||||
}
|
||||
}
|
||||
Observable.prototype = {
|
||||
addJankObserver(listener) {
|
||||
if (this._observers.has(listener)) {
|
||||
throw new TypeError(`Listener already registered for target ${this._key}`);
|
||||
}
|
||||
if (this._children) {
|
||||
this._children.addListener(listener);
|
||||
}
|
||||
let observer = this._isBuffered ? new BufferedObserver(listener)
|
||||
: new Observer(listener);
|
||||
// Store the observer to be able to call `this._process.removeJankObserver`.
|
||||
this._observers.set(listener, observer);
|
||||
|
||||
this._process.addJankObserver(observer);
|
||||
},
|
||||
removeJankObserver(listener) {
|
||||
let observer = this._observers.get(listener);
|
||||
if (!observer) {
|
||||
throw new TypeError(`No listener for target ${this._key}`);
|
||||
}
|
||||
this._observers.delete(listener);
|
||||
|
||||
if (this._children) {
|
||||
this._children.removeListener(listener);
|
||||
}
|
||||
|
||||
this._process.removeJankObserver(observer);
|
||||
observer.dispose();
|
||||
},
|
||||
};
|
||||
|
||||
/**
|
||||
* Get a cached observable for a given target.
|
||||
*/
|
||||
Observable.get = function(target) {
|
||||
let key;
|
||||
if ("tab" in target) {
|
||||
// We do not want to use a tab as a key, as this would prevent it from
|
||||
// being garbage-collected.
|
||||
key = target.tab.linkedBrowser.outerWindowID;
|
||||
} else if ("windowId" in target) {
|
||||
key = target.windowId;
|
||||
}
|
||||
if (key == null) {
|
||||
throw new TypeError(`Could not extract a key from ${JSON.stringify(target)}. Could the target be an unitialized tab?`);
|
||||
}
|
||||
let observable = this._cache.get(key);
|
||||
if (!observable) {
|
||||
observable = new Observable(target);
|
||||
this._cache.set(key, observable);
|
||||
}
|
||||
return observable;
|
||||
};
|
||||
Observable._cache = new Map();
|
||||
|
||||
/**
|
||||
* Wrap a listener callback as an unbuffered nsIPerformanceObserver.
|
||||
*
|
||||
* Each observation is propagated immediately to the listener.
|
||||
*/
|
||||
function Observer(listener) {
|
||||
// Make sure that monitoring stays alive (in all processes) at least as
|
||||
// long as the observer.
|
||||
this._monitor = PerformanceStats.getMonitor(["jank", "cpow"]);
|
||||
this._listener = listener;
|
||||
}
|
||||
Observer.prototype = {
|
||||
observe(...args) {
|
||||
this._listener(...args);
|
||||
},
|
||||
dispose() {
|
||||
this._monitor.dispose();
|
||||
this.observe = function poison() {
|
||||
throw new Error("Internal error: I should have stopped receiving notifications");
|
||||
};
|
||||
},
|
||||
};
|
||||
|
||||
/**
|
||||
* Wrap a listener callback as an buffered nsIPerformanceObserver.
|
||||
*
|
||||
* Observations are buffered and dispatch in the next tick to the listener.
|
||||
*/
|
||||
function BufferedObserver(listener) {
|
||||
Observer.call(this, listener);
|
||||
this._buffer = [];
|
||||
this._isDispatching = false;
|
||||
this._pending = null;
|
||||
}
|
||||
BufferedObserver.prototype = Object.create(Observer.prototype);
|
||||
BufferedObserver.prototype.observe = function(source, details) {
|
||||
this._buffer.push({source, details});
|
||||
if (!this._isDispatching) {
|
||||
this._isDispatching = true;
|
||||
Services.tm.dispatchToMainThread(() => {
|
||||
// Grab buffer, in case something in the listener could modify it.
|
||||
let buffer = this._buffer;
|
||||
this._buffer = [];
|
||||
|
||||
// As of this point, any further observations need to use the new buffer
|
||||
// and a new dispatcher.
|
||||
this._isDispatching = false;
|
||||
|
||||
this._listener(buffer);
|
||||
});
|
||||
}
|
||||
};
|
||||
|
||||
var PerformanceWatcher = {
|
||||
/**
|
||||
* Add a listener informed whenever we receive a slow performance alert
|
||||
* in the application.
|
||||
*
|
||||
* @param {object} target An object with one of the following fields:
|
||||
* - {number} windowId Either 0 to observe all windows or an outer window ID
|
||||
* to observe a single tab.
|
||||
* - {xul:browser} tab To observe a single tab.
|
||||
* @param {function} listener A function that will be triggered whenever
|
||||
* the target causes a slow performance notification. The notification may
|
||||
* have originated in any process of the application.
|
||||
*
|
||||
* If the listener listens to a single webpage, it is triggered with
|
||||
* the following arguments:
|
||||
* source: {groupId, name, windowId, isSystem, processId}
|
||||
* Information on the source of the notification.
|
||||
* details: {reason, highestJank, highestCPOW} Information on the
|
||||
* notification.
|
||||
*
|
||||
* If the listener listens to all webpages, it is triggered with
|
||||
* an array of {source, details}, as described above.
|
||||
*/
|
||||
addPerformanceListener(target, listener) {
|
||||
if (typeof listener != "function") {
|
||||
throw new TypeError();
|
||||
}
|
||||
let observable = Observable.get(target);
|
||||
observable.addJankObserver(listener);
|
||||
},
|
||||
removePerformanceListener(target, listener) {
|
||||
if (typeof listener != "function") {
|
||||
throw new TypeError();
|
||||
}
|
||||
let observable = Observable.get(target);
|
||||
observable.removeJankObserver(listener);
|
||||
},
|
||||
};
|
@ -1,120 +0,0 @@
|
||||
This directory is part of the implementation of the Performance Monitoring API
|
||||
|
||||
# What is the Performance Monitoring API?
|
||||
|
||||
The Performance Monitoring API is a set of interfaces designed to let front-end code find out if the application or a specific process is currently janky, quantify this jank and its evolution, and investigate what is causing jank (system code? a webpage? an add-on? CPOW?). In other words, this is a form of minimal profiler, designed to be lightweight enough to be enabled at all times in production code.
|
||||
|
||||
In Firefox Nightly, the Performance Monitoring API is used to:
|
||||
- inform users if their machine janks because of an add-on;
|
||||
- upload add-on performance to Telemetry for the benefit of AMO maintainers and add-on developers;
|
||||
- let users inspect the performance of their browser through about:performance.
|
||||
|
||||
# How can I use the API?
|
||||
|
||||
The API is designed mainly to be used from JavaScript client code, using PerformanceStats.jsm. If you really need to use it from C++ code, you should use the performance stats service defined in nsIPerformanceStats.idl. Note that PerformanceStats.jsm contains support for entire e10s-enabled applications, while nsIPerformanceStats.idl only supports one process at a time.
|
||||
|
||||
|
||||
# How does the Performance Monitoring API work?
|
||||
|
||||
At the time of this writing, the implementation of this API monitors only performance information related to the execution of JavaScript code, and only in the main thread. This is performed by an instrumentation of js/, orchestrated by toolkit/.
|
||||
|
||||
At low-level, the unit of code used for monitoring is the JS Compartment: one .jsm module, one XPCOM component, one sandbox, one script in an iframe, ... When executing code in a compartment, it is possible to inspect either the compartment or the script itself to find out who this compartment belongs to: a `<xul:browser>`, an add-on, etc.
|
||||
|
||||
At higher-level, the unit of code used for monitoring is the Performance Group. One Performance Group represents one or more JS Compartments, grouped together because we are interested in their performance. The current implementation uses Performance Groups to represent individual JS Compartments, entire add-ons, entire webpages including iframes and entire threads. Other applications have been discussed to represent entire eTLD+1 domains (e.g. to monitor the cost of ads), etc.
|
||||
|
||||
A choice was made to represent the CPU cost in *clock cycles* at low-level, as extracting a number of clock cycles has a very low latency (typically a few dozen cycles on recent architectures) and is much more precise than `getrusage`-style CPU clocks (which are often limited to a precision of 16ms). The drawback of this choice is that distinct CPUs/cores may, depending on the architecture, have entirely unrelated clock cycles count. We assume that the risk of false positives is reasonably low, and bound the uncertainty by renormalizing the result with the actual CPU clocks once per event.
|
||||
|
||||
## SpiderMonkey-level
|
||||
|
||||
The instrumentation of SpiderMonkey lives in `js/src/vm/Stopwatch.*`. As SpiderMonkey does not know about the Gecko event loop, or DOM events, or windows, so any such information must be provided by the embedding. To communicate with higher levels, SpiderMonkey exposes a virtual class `js::PerformanceGroup` designed to be subclassed and instantiated by the embedding based on its interests.
|
||||
|
||||
An instance of `js::PerformanceGroup` may be acquired (to mark that it is currently being monitored) and released (once monitoring is complete or cancelled) by SpiderMonkey. Furthermore, a `js::PerformanceGroup` can be marked as active (to mark that the embedding is currently interested in its performance) or inactive (otherwise) by the embedding.
|
||||
|
||||
Each `js::Performance` holds a total CPU cost measured in *clock cycles* and a total CPOW cost measured in *microseconds*. Both costs can only increase while measuring data, and can be reset to 0 by the embedding, once we have finished execution of the event loop.
|
||||
|
||||
### Upon starting to execute code in a JS Compartment `cx`
|
||||
1. If global monitoring is deactivated, bailout;
|
||||
2. If XPConnect has informed us that we are entering a nested event loop, cancel any ongoing measure on the outer event loop and proceed with the current measure;
|
||||
3. If we do not know to which performance groups `cx` is associated, request the information from the embedding;
|
||||
4. For each performance group `group` to which `cx` belongs *and* that is not acquired *and* for which monitoring is active, acquire the group;
|
||||
5. If no group was acquired, bailout;
|
||||
6. Capture a timestamp for the CPU cost of `cx`, in *clock cycles*. This value is provided directly by the CPU;
|
||||
7. Capture a timestamp for the CPOW cost of `cx`, in *CPOW microseconds*. This value is provided by the CPOW-level embedding.
|
||||
|
||||
### Upon stopping execution of the code in the JS compartment `cx`
|
||||
1. If global monitoring is deactivated, bailout;
|
||||
2. If the measure has been canceled, bailout;
|
||||
3. If no group was acquired, bailout;
|
||||
4. Capture a timestamp for the CPU cost of `cx`, use it to update the total CPU cost of each of the groups acquired;
|
||||
5. Capture a timestamp for the CPOW cost of `cx`, use it to update the total CPOW cost of each of the groups acquired;
|
||||
6. Mark acquired groups as executed recently;
|
||||
7. Release groups.
|
||||
|
||||
### When informed by the embedding that the iteration of the event loop is complete
|
||||
1. Commit all the groups executed recently to the embedding;
|
||||
2. Release all groups;
|
||||
3. Reset all CPU/CPOW costs to 0.
|
||||
|
||||
## Cross-Process Object Wrapper-level
|
||||
|
||||
The instrumentation of CPOW lives in `js/ipc/src`. It maintains a CPOW clock that increases whenever the process is blocked by a CPOW call.
|
||||
|
||||
## XPConnect-level
|
||||
|
||||
The instrumentation of XPConnect lives in `js/xpconnect/src/XPCJSContext.cpp`.
|
||||
|
||||
### When we enter a nested event loop
|
||||
|
||||
1. Inform the SpiderMonkey-level instrumentation, to let it cancel any ongoing measure.
|
||||
|
||||
### When we finish executing an iteration of the event loop, including microtasks:
|
||||
|
||||
1. Inform the SpiderMonkey-level instrumentation, to let it commit its recent data.
|
||||
|
||||
## nsIPerformanceStatsService-level
|
||||
|
||||
This code lives in `toolkit/components/perfmonitoring/ns*`. Its role is to orchestrate the information provided by SpiderMonkey at the scale of a single thread of a single process. At the time of this writing, this instrumentation is only activated on the main thread, for all Gecko processes.
|
||||
|
||||
The service defines a class `nsPerformanceGroup`, designed to be the sole concrete implementation of `js::PerformanceGroup`. `nsPerformanceGroup` extends `js::PerformanceGroup` with the global performance information gathered for the group since the start of the service. The information is:
|
||||
- total CPU time measured;
|
||||
- total CPOW time measured;
|
||||
- number of times CPU time exceeded 1ms;
|
||||
- number of times CPU time exceeded 2ms;
|
||||
- number of times CPU time exceeded 4ms;
|
||||
- ...
|
||||
- number of times CPU time exceeded 2^9ms.
|
||||
|
||||
Also, `nsPerformanceGroup` extends `js::PerformanceGroup` with high-level identification:
|
||||
- id of the window that executed the code, if any;
|
||||
- id of the add-on that provided the code, if any.
|
||||
|
||||
### When the SpiderMonkey-level instrumentation requests a list of PerformanceGroup for a compartment
|
||||
|
||||
Return a list with the following groups:
|
||||
* all compartments are associated with the "top group", which represents the entire thread;
|
||||
* find out if the compartment is code from a window, if so add a group shared by all compartments for this specific window;
|
||||
* find out if the compartment is code from an add-on, if so add a group shared by all compartments for this add-on;
|
||||
* add a group representing this specific compartment.
|
||||
|
||||
For performance reasons, groups representing a single compartment are inactive by default, while all other groups are active by default.
|
||||
|
||||
Performance groups are refcounted and destroyed with the implementation of `delete` used by toolkit/.
|
||||
|
||||
### When the SpiderMonkey-level instrumentation commits a list of PerformanceGroups
|
||||
|
||||
For each group in the list:
|
||||
1. transfer recent CPU time and recent CPOW time to total CPU time, total CPOW time, number of times CPU time exceeded *n* ms;
|
||||
2. reset group.
|
||||
|
||||
Future versions are expected to trigger low-performance alerts at this stage.
|
||||
|
||||
### Snapshotting
|
||||
|
||||
(to be documented)
|
||||
|
||||
## PerformanceStats.jsm-level
|
||||
|
||||
PerformanceStats provides a JS-friendly API on top of nsIPerformanceStatsService. The main differences are:
|
||||
- utilities for subtracting snapshots;
|
||||
- tracking clients that need specific measures;
|
||||
- synchronization between e10s processes.
|
@ -7,24 +7,6 @@
|
||||
with Files('**'):
|
||||
BUG_COMPONENT = ('Toolkit', 'Performance Monitoring')
|
||||
|
||||
BROWSER_CHROME_MANIFESTS += ['tests/browser/browser.ini']
|
||||
|
||||
XPIDL_MODULE = 'toolkit_perfmonitoring'
|
||||
|
||||
EXTRA_JS_MODULES += [
|
||||
'PerformanceStats-content.js',
|
||||
'PerformanceStats.jsm',
|
||||
'PerformanceWatcher-content.js',
|
||||
'PerformanceWatcher.jsm',
|
||||
]
|
||||
|
||||
XPIDL_SOURCES += [
|
||||
'nsIPerformanceStats.idl',
|
||||
]
|
||||
|
||||
UNIFIED_SOURCES += [
|
||||
'nsPerformanceStats.cpp'
|
||||
]
|
||||
|
||||
UNIFIED_SOURCES += [
|
||||
'PerformanceMetricsCollector.cpp',
|
||||
@ -37,13 +19,8 @@ EXPORTS.mozilla += [
|
||||
'PerformanceUtils.h'
|
||||
]
|
||||
|
||||
EXPORTS += [
|
||||
'nsPerformanceStats.h'
|
||||
]
|
||||
|
||||
LOCAL_INCLUDES += [
|
||||
'/dom/base',
|
||||
'/dom/media',
|
||||
'/dom/base'
|
||||
]
|
||||
|
||||
FINAL_LIBRARY = 'xul'
|
||||
|
@ -1,312 +0,0 @@
|
||||
/* -*- indent-tabs-mode: nil; js-indent-level: 2 -*-*/
|
||||
/* vim: set ts=8 sts=2 et sw=2 tw=80: */
|
||||
/* This Source Code Form is subject to the terms of the Mozilla Public
|
||||
* License, v. 2.0. If a copy of the MPL was not distributed with this
|
||||
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
|
||||
|
||||
#include "nsISupports.idl"
|
||||
#include "nsIArray.idl"
|
||||
#include "nsIDOMWindow.idl"
|
||||
|
||||
/**
|
||||
* Mechanisms for querying the current process about performance
|
||||
* information.
|
||||
*
|
||||
* JavaScript clients should rather use PerformanceStats.jsm.
|
||||
*/
|
||||
|
||||
/**
|
||||
* Identification details for a performance group.
|
||||
*
|
||||
* A performance group is a set of JavaScript compartments whose
|
||||
* performance is observed as a single entity. Typical examples of
|
||||
* performance groups: a webpage without its frames, a
|
||||
* webpage with all its frames, the entire JS runtime, ...
|
||||
*/
|
||||
[scriptable, builtinclass, uuid(994c56be-939a-4f20-8364-124f6422d86a)]
|
||||
interface nsIPerformanceGroupDetails: nsISupports {
|
||||
/**
|
||||
* An identifier unique to the component.
|
||||
*
|
||||
* This identifier is somewhat human-readable to aid with debugging,
|
||||
* but clients should not rely upon the format.
|
||||
*/
|
||||
readonly attribute AString groupId;
|
||||
|
||||
/**
|
||||
* A somewhat human-readable name for the component.
|
||||
*/
|
||||
readonly attribute AString name;
|
||||
|
||||
/**
|
||||
* If the component is code executed in a window, the ID of the topmost
|
||||
* outer window (i.e. the tab), otherwise 0.
|
||||
*/
|
||||
readonly attribute uint64_t windowId;
|
||||
|
||||
/**
|
||||
* `true` if this component is executed with system privileges
|
||||
* (e.g. the platform itself), `false` otherwise
|
||||
* (e.g. webpages).
|
||||
*/
|
||||
readonly attribute bool isSystem;
|
||||
|
||||
/**
|
||||
* The process running this group.
|
||||
*/
|
||||
readonly attribute unsigned long long processId;
|
||||
|
||||
/**
|
||||
* `true` if the code is executed in a content process, `false` otherwise.
|
||||
*/
|
||||
readonly attribute bool isContentProcess;
|
||||
};
|
||||
|
||||
/**
|
||||
* Snapshot of the performance of a component, e.g. a web
|
||||
* page, system built-ins, a module or the entire process itself.
|
||||
*
|
||||
* All values are monotonic and are updated only when
|
||||
* `nsIPerformanceStatsService.isStopwatchActive` is `true`.
|
||||
*/
|
||||
[scriptable, builtinclass, uuid(8a635d4b-aa56-466b-9a7d-9f91ca9405ef)]
|
||||
interface nsIPerformanceStats: nsIPerformanceGroupDetails {
|
||||
/**
|
||||
* Total amount of time spent executing code in this group, in
|
||||
* microseconds.
|
||||
*/
|
||||
readonly attribute unsigned long long totalUserTime;
|
||||
readonly attribute unsigned long long totalSystemTime;
|
||||
readonly attribute unsigned long long totalCPOWTime;
|
||||
|
||||
/**
|
||||
* Total number of times code execution entered this group,
|
||||
* since process launch. This may be greater than the number
|
||||
* of times we have entered the event loop.
|
||||
*/
|
||||
readonly attribute unsigned long long ticks;
|
||||
|
||||
/**
|
||||
* Jank indicator.
|
||||
*
|
||||
* durations[i] == number of times execution of this group
|
||||
* lasted at lest 2^i ms.
|
||||
*/
|
||||
void getDurations([optional] out unsigned long aCount,
|
||||
[retval, array, size_is(aCount)]out unsigned long long aNumberOfOccurrences);
|
||||
};
|
||||
|
||||
/**
|
||||
* A snapshot of the performance data of the process.
|
||||
*/
|
||||
[scriptable, builtinclass, uuid(13cc235b-739e-4690-b0e3-d89cbe036a93)]
|
||||
interface nsIPerformanceSnapshot: nsISupports {
|
||||
/**
|
||||
* Data on all individual components.
|
||||
*/
|
||||
nsIArray getComponentsData();
|
||||
|
||||
/**
|
||||
* Information on the process itself.
|
||||
*
|
||||
* This contains the total amount of time spent executing JS code,
|
||||
* the total amount of time spent waiting for system calls while
|
||||
* executing JS code, the total amount of time performing blocking
|
||||
* inter-process calls, etc.
|
||||
*/
|
||||
nsIPerformanceStats getProcessData();
|
||||
};
|
||||
|
||||
/**
|
||||
* A performance alert.
|
||||
*/
|
||||
[scriptable, builtinclass, uuid(a85706ab-d703-4687-8865-78cd771eab93)]
|
||||
interface nsIPerformanceAlert: nsISupports {
|
||||
/**
|
||||
* A slowdown was detected.
|
||||
*
|
||||
* See REASON_JANK_* for details on whether this slowdown was user-noticeable.
|
||||
*/
|
||||
const unsigned long REASON_SLOWDOWN = 1;
|
||||
|
||||
/**
|
||||
* This alert was triggered during a jank in animation.
|
||||
*
|
||||
* In the current implementation, we consider that there is a jank
|
||||
* in animation if delivery of the vsync message to the main thread
|
||||
* has been delayed too much (see
|
||||
* nsIPerformanceStatsService.animationJankLevelThreshold).
|
||||
*
|
||||
* Note that this is a heuristic which may provide false positives,
|
||||
* so clients of this API are expected to perform post-processing to
|
||||
* filter out such false positives.
|
||||
*/
|
||||
const unsigned long REASON_JANK_IN_ANIMATION = 2;
|
||||
|
||||
/**
|
||||
* This alert was triggered during a jank in user input.
|
||||
*
|
||||
* In the current implementation, we consider that there is a jank
|
||||
* in animation if a user input was received either immediately
|
||||
* before executing the offending code (see
|
||||
* nsIPerformanceStatsService.userInputDelayThreshold) or while
|
||||
* executing the offending code.
|
||||
*
|
||||
* Note that this is a heuristic which may provide false positives,
|
||||
* so clients of this API are expected to perform post-processing to
|
||||
* filter out such false positives.
|
||||
*/
|
||||
const unsigned long REASON_JANK_IN_INPUT = 4;
|
||||
|
||||
/**
|
||||
* The reason for the alert, as a bitwise or of the various REASON_*
|
||||
* constants.
|
||||
*/
|
||||
readonly attribute unsigned long reason;
|
||||
|
||||
/**
|
||||
* Longest interval spent executing code in this group
|
||||
* since the latest alert, in microseconds.
|
||||
*
|
||||
* Note that the underlying algorithm is probabilistic and may
|
||||
* provide false positives, so clients of this API are expected to
|
||||
* perform post-processing to filter out such false positives. In
|
||||
* particular, a high system load will increase the noise level on
|
||||
* this measure.
|
||||
*/
|
||||
readonly attribute unsigned long long highestJank;
|
||||
|
||||
/**
|
||||
* Longest interval spent executing CPOW in this group
|
||||
* since the latest alert, in microseconds.
|
||||
*
|
||||
* This measure is reliable and involves no heuristics. However,
|
||||
* note that the duration of CPOWs is increased by high system
|
||||
* loads.
|
||||
*/
|
||||
readonly attribute unsigned long long highestCPOW;
|
||||
};
|
||||
|
||||
|
||||
/**
|
||||
* An observer for slow performance alerts.
|
||||
*/
|
||||
[scriptable, function, uuid(b746a929-3fec-420b-8ed8-c35d71995e05)]
|
||||
interface nsIPerformanceObserver: nsISupports {
|
||||
/**
|
||||
* @param target The performance group that caused the jank.
|
||||
* @param alert The performance cost that triggered the alert.
|
||||
*/
|
||||
void observe(in nsIPerformanceGroupDetails target, in nsIPerformanceAlert alert);
|
||||
};
|
||||
|
||||
|
||||
/**
|
||||
* A part of the system that may be observed for slow performance.
|
||||
*/
|
||||
[scriptable, builtinclass, uuid(b85720d0-e328-4342-9e46-8ca1acf8c70e)]
|
||||
interface nsIPerformanceObservable: nsISupports {
|
||||
/**
|
||||
* If a single group is being observed, information on this group.
|
||||
*/
|
||||
readonly attribute nsIPerformanceGroupDetails target;
|
||||
|
||||
/**
|
||||
* Add an observer that will be informed in case of jank.
|
||||
*
|
||||
* Set `jankAlertThreshold` to determine how much jank is needed
|
||||
* to trigger alerts.
|
||||
*
|
||||
* If the same observer is added more than once, it will be
|
||||
* triggered as many times as it has been added.
|
||||
*/
|
||||
void addJankObserver(in nsIPerformanceObserver observer);
|
||||
|
||||
/**
|
||||
* Remove an observer previously added with `addJankObserver`.
|
||||
*
|
||||
* Noop if the observer hasn't been added.
|
||||
*/
|
||||
void removeJankObserver(in nsIPerformanceObserver observer);
|
||||
};
|
||||
|
||||
|
||||
[scriptable, uuid(505bc42e-be38-4a53-baba-92cb33690cde)]
|
||||
interface nsIPerformanceStatsService : nsISupports {
|
||||
/**
|
||||
* `true` if we should monitor CPOW, `false` otherwise.
|
||||
*/
|
||||
[implicit_jscontext] attribute bool isMonitoringCPOW;
|
||||
|
||||
/**
|
||||
* `true` if we should monitor jank, `false` otherwise.
|
||||
*/
|
||||
[implicit_jscontext] attribute bool isMonitoringJank;
|
||||
|
||||
/**
|
||||
* `true` if all compartments need to be monitored individually,
|
||||
* `false` if only performance groups (i.e. entire webpages, etc.)
|
||||
* need to be monitored.
|
||||
*/
|
||||
[implicit_jscontext] attribute bool isMonitoringPerCompartment;
|
||||
|
||||
/**
|
||||
* Capture a snapshot of the performance data.
|
||||
*/
|
||||
[implicit_jscontext] nsIPerformanceSnapshot getSnapshot();
|
||||
|
||||
/**
|
||||
* The threshold, in microseconds, above which a performance group is
|
||||
* considered "slow" and should raise performance alerts.
|
||||
*/
|
||||
attribute unsigned long long jankAlertThreshold;
|
||||
|
||||
/**
|
||||
* If a user is seeing an animation and we spend too long executing
|
||||
* JS code while blocking refresh, this will be visible to the user.
|
||||
*
|
||||
* We assume that any jank during an animation and lasting more than
|
||||
* 2^animationJankLevelThreshold ms will be visible.
|
||||
*/
|
||||
attribute short animationJankLevelThreshold;
|
||||
|
||||
/**
|
||||
* If a user performs an input (e.g. clicking, pressing a key, but
|
||||
* *NOT* moving the mouse), and we spend too long executing JS code
|
||||
* before displaying feedback, this will be visible to the user even
|
||||
* if there is no ongoing animation.
|
||||
*
|
||||
* We assume that any jank during `userInputDelayThreshold` us after
|
||||
* the user input will be visible.
|
||||
*/
|
||||
attribute unsigned long long userInputDelayThreshold;
|
||||
|
||||
/**
|
||||
* A buffering delay, in milliseconds, used by the service to
|
||||
* regroup performance alerts, before observers are actually
|
||||
* noticed. Higher delays let the system avoid redundant
|
||||
* notifications for the same group, and are generally better for
|
||||
* performance.
|
||||
*/
|
||||
attribute unsigned long jankAlertBufferingDelay;
|
||||
|
||||
/**
|
||||
* Get a nsIPerformanceObservable representing a DOM window. This
|
||||
* observable may then be used to (un)register for watching
|
||||
* performance alerts for this window.
|
||||
*
|
||||
* Note that this covers only the current process.
|
||||
*
|
||||
* Use special window id 0 to get an observable that may be used to
|
||||
* (un)register for watching performance alerts of all windows at
|
||||
* once.
|
||||
*/
|
||||
nsIPerformanceObservable getObservableWindow(in unsigned long long windowId);
|
||||
};
|
||||
|
||||
|
||||
%{C++
|
||||
#define NS_TOOLKIT_PERFORMANCESTATSSERVICE_CID {0xfd7435d4, 0x9ec4, 0x4699, \
|
||||
{0xad, 0xd4, 0x1b, 0xe8, 0x3d, 0xd6, 0x8e, 0xf3} }
|
||||
#define NS_TOOLKIT_PERFORMANCESTATSSERVICE_CONTRACTID "@mozilla.org/toolkit/performance-stats-service;1"
|
||||
%}
|
File diff suppressed because it is too large
Load Diff
@ -1,754 +0,0 @@
|
||||
/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
|
||||
/* This Source Code Form is subject to the terms of the Mozilla Public
|
||||
* License, v. 2.0. If a copy of the MPL was not distributed with this
|
||||
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
|
||||
|
||||
#ifndef nsPerformanceStats_h
|
||||
#define nsPerformanceStats_h
|
||||
|
||||
#include "jsapi.h"
|
||||
|
||||
#include "nsHashKeys.h"
|
||||
#include "nsTHashtable.h"
|
||||
|
||||
#include "nsIObserver.h"
|
||||
#include "nsPIDOMWindow.h"
|
||||
|
||||
#include "nsIPerformanceStats.h"
|
||||
|
||||
class nsPerformanceGroup;
|
||||
class nsPerformanceGroupDetails;
|
||||
|
||||
typedef mozilla::Vector<RefPtr<nsPerformanceGroup>, 8> GroupVector;
|
||||
|
||||
/**
|
||||
* A data structure for registering observers interested in
|
||||
* performance alerts.
|
||||
*
|
||||
* Each performance group owns a single instance of this class.
|
||||
* Additionally, the service owns instances designed to observe the
|
||||
* performance alerts in all webpages.
|
||||
*/
|
||||
class nsPerformanceObservationTarget final : public nsIPerformanceObservable {
|
||||
public:
|
||||
NS_DECL_ISUPPORTS
|
||||
NS_DECL_NSIPERFORMANCEOBSERVABLE
|
||||
|
||||
/**
|
||||
* `true` if this target has at least once performance observer
|
||||
* registered, `false` otherwise.
|
||||
*/
|
||||
bool HasObservers() const;
|
||||
|
||||
/**
|
||||
* Notify all the observers that jank has happened.
|
||||
*/
|
||||
void NotifyJankObservers(nsIPerformanceGroupDetails* source,
|
||||
nsIPerformanceAlert* gravity);
|
||||
|
||||
/**
|
||||
* Set the details on the group being observed.
|
||||
*/
|
||||
void SetTarget(nsPerformanceGroupDetails* details);
|
||||
|
||||
private:
|
||||
~nsPerformanceObservationTarget() {}
|
||||
|
||||
// The observers for this target. We hold them as a vector, despite
|
||||
// the linear removal cost, as we expect that the typical number of
|
||||
// observers will be lower than 3, and that (un)registrations will
|
||||
// be fairly infrequent.
|
||||
mozilla::Vector<nsCOMPtr<nsIPerformanceObserver>> mObservers;
|
||||
|
||||
// Details on the group being observed. May be `nullptr`.
|
||||
RefPtr<nsPerformanceGroupDetails> mDetails;
|
||||
};
|
||||
|
||||
/**
|
||||
* The base class for entries of maps from window id to
|
||||
* performance group.
|
||||
*
|
||||
* Performance observers may be registered before their group is
|
||||
* created (e.g., one may register an observer for a webpage before all
|
||||
* its iframes are loaded). This class serves to hold the observation
|
||||
* target until the performance group may be created, and then to
|
||||
* associate the observation target and the performance group.
|
||||
*/
|
||||
class nsGroupHolder {
|
||||
public:
|
||||
nsGroupHolder() : mGroup(nullptr), mPendingObservationTarget(nullptr) {}
|
||||
|
||||
/**
|
||||
* Get the observation target, creating it if necessary.
|
||||
*/
|
||||
nsPerformanceObservationTarget* ObservationTarget();
|
||||
|
||||
/**
|
||||
* Get the group, if it has been created.
|
||||
*
|
||||
* May return `null` if the group hasn't been created yet.
|
||||
*/
|
||||
class nsPerformanceGroup* GetGroup();
|
||||
|
||||
/**
|
||||
* Set the group.
|
||||
*
|
||||
* Once this method has been called, calling
|
||||
* `this->ObservationTarget()` and `group->ObservationTarget()` is equivalent.
|
||||
*
|
||||
* Must only be called once.
|
||||
*/
|
||||
void SetGroup(class nsPerformanceGroup*);
|
||||
|
||||
private:
|
||||
// The group. Initially `nullptr`, until we have called `SetGroup`.
|
||||
class nsPerformanceGroup* mGroup;
|
||||
|
||||
// The observation target. Instantiated by the first call to
|
||||
// `ObservationTarget()`.
|
||||
RefPtr<nsPerformanceObservationTarget> mPendingObservationTarget;
|
||||
};
|
||||
|
||||
/**
|
||||
* An implementation of the nsIPerformanceStatsService.
|
||||
*
|
||||
* Note that this implementation is not thread-safe.
|
||||
*/
|
||||
class nsPerformanceStatsService final : public nsIPerformanceStatsService,
|
||||
public nsIObserver {
|
||||
public:
|
||||
NS_DECL_ISUPPORTS
|
||||
NS_DECL_NSIPERFORMANCESTATSSERVICE
|
||||
NS_DECL_NSIOBSERVER
|
||||
|
||||
nsPerformanceStatsService();
|
||||
nsresult Init();
|
||||
|
||||
private:
|
||||
nsresult InitInternal();
|
||||
void Dispose();
|
||||
~nsPerformanceStatsService();
|
||||
|
||||
protected:
|
||||
friend nsPerformanceGroup;
|
||||
|
||||
/**
|
||||
* `false` until `Init()` and after `Dispose()`, `true` inbetween.
|
||||
*/
|
||||
bool mIsAvailable;
|
||||
|
||||
/**
|
||||
* `true` once we have called `Dispose()`.
|
||||
*/
|
||||
bool mDisposed;
|
||||
|
||||
/**
|
||||
* A unique identifier for the process.
|
||||
*
|
||||
* Process HANDLE under Windows, pid under Unix.
|
||||
*/
|
||||
const uint64_t mProcessId;
|
||||
|
||||
/**
|
||||
* Generate unique identifiers.
|
||||
*/
|
||||
uint64_t GetNextId();
|
||||
uint64_t mUIdCounter;
|
||||
|
||||
/**
|
||||
* Extract a snapshot of performance statistics from a performance group.
|
||||
*/
|
||||
static nsIPerformanceStats* GetStatsForGroup(
|
||||
const js::PerformanceGroup* group);
|
||||
static nsIPerformanceStats* GetStatsForGroup(const nsPerformanceGroup* group);
|
||||
|
||||
/**
|
||||
* Get the performance groups associated to a given JS compartment.
|
||||
*
|
||||
* A compartment is typically associated to the following groups:
|
||||
* - the top group, shared by the entire process;
|
||||
* - the window group, if the code is executed in a window, shared
|
||||
* by all compartments for that window (typically, all frames);
|
||||
* - the compartment's own group.
|
||||
*
|
||||
* Pre-condition: the VM must have entered the JS compartment.
|
||||
*
|
||||
* The caller is expected to cache the results of this method, as
|
||||
* calling it more than once may not return the same instances of
|
||||
* performance groups.
|
||||
*/
|
||||
bool GetPerformanceGroups(JSContext* cx, js::PerformanceGroupVector&);
|
||||
static bool GetPerformanceGroupsCallback(JSContext* cx,
|
||||
js::PerformanceGroupVector&,
|
||||
void* closure);
|
||||
|
||||
/**********************************************************
|
||||
*
|
||||
* Sets of all performance groups, indexed by several keys.
|
||||
*
|
||||
* These sets do not keep the performance groups alive. Rather, a
|
||||
* performance group is inserted in the relevant sets upon
|
||||
* construction and removed from the sets upon destruction or when
|
||||
* we Dispose() of the service.
|
||||
*
|
||||
* A `nsPerformanceGroup` is typically kept alive (as a
|
||||
* `js::PerformanceGroup`) by the JS::Compartment to which it is
|
||||
* associated. It may also temporarily be kept alive by the JS
|
||||
* stack, in particular in case of nested event loops.
|
||||
*/
|
||||
|
||||
/**
|
||||
* Set of performance groups associated to windows, indexed by outer
|
||||
* window id. Each item is shared by all the compartments that
|
||||
* belong to the window.
|
||||
*/
|
||||
struct WindowIdToGroup : public nsUint64HashKey, public nsGroupHolder {
|
||||
explicit WindowIdToGroup(const uint64_t* key) : nsUint64HashKey(key) {}
|
||||
};
|
||||
nsTHashtable<WindowIdToGroup> mWindowIdToGroup;
|
||||
|
||||
/**
|
||||
* Set of all performance groups.
|
||||
*/
|
||||
struct Groups : public nsPtrHashKey<nsPerformanceGroup> {
|
||||
explicit Groups(const nsPerformanceGroup* key)
|
||||
: nsPtrHashKey<nsPerformanceGroup>(key) {}
|
||||
};
|
||||
nsTHashtable<Groups> mGroups;
|
||||
|
||||
/**
|
||||
* The performance group representing the runtime itself. All
|
||||
* compartments are associated to this group.
|
||||
*/
|
||||
RefPtr<nsPerformanceGroup> mTopGroup;
|
||||
|
||||
/**********************************************************
|
||||
*
|
||||
* Measuring and recording the CPU use of the system.
|
||||
*
|
||||
*/
|
||||
|
||||
/**
|
||||
* Get the OS-reported time spent in userland/systemland, in
|
||||
* microseconds. On most platforms, this data is per-thread,
|
||||
* but on some platforms we need to fall back to per-process.
|
||||
*
|
||||
* Data is not guaranteed to be monotonic.
|
||||
*/
|
||||
nsresult GetResources(uint64_t* userTime, uint64_t* systemTime) const;
|
||||
|
||||
/**
|
||||
* Amount of user/system CPU time used by the thread (or process,
|
||||
* for platforms that don't support per-thread measure) since start.
|
||||
* Updated by `StopwatchStart` at most once per event.
|
||||
*
|
||||
* Unit: microseconds.
|
||||
*/
|
||||
uint64_t mUserTimeStart;
|
||||
uint64_t mSystemTimeStart;
|
||||
|
||||
bool mIsHandlingUserInput;
|
||||
|
||||
/**
|
||||
* The number of user inputs since the start of the process. Used to
|
||||
* determine whether the current iteration has triggered a
|
||||
* (JS-implemented) user input.
|
||||
*/
|
||||
uint64_t mUserInputCount;
|
||||
|
||||
/**********************************************************
|
||||
*
|
||||
* Callbacks triggered by the JS VM when execution of JavaScript
|
||||
* code starts/completes.
|
||||
*
|
||||
* As measures of user CPU time/system CPU time have low resolution
|
||||
* (and are somewhat slow), we measure both only during the calls to
|
||||
* `StopwatchStart`/`StopwatchCommit` and we make the assumption
|
||||
* that each group's user/system CPU time is proportional to the
|
||||
* number of clock cycles spent executing code in the group between
|
||||
* `StopwatchStart`/`StopwatchCommit`.
|
||||
*
|
||||
* The results may be skewed by the thread being rescheduled to a
|
||||
* different CPU during the measure, but we expect that on average,
|
||||
* the skew will have limited effects, and will generally tend to
|
||||
* make already-slow executions appear slower.
|
||||
*/
|
||||
|
||||
/**
|
||||
* Execution of JavaScript code has started. This may happen several
|
||||
* times in succession if the JavaScript code contains nested event
|
||||
* loops, in which case only the innermost call will receive
|
||||
* `StopwatchCommitCallback`.
|
||||
*
|
||||
* @param iteration The number of times we have started executing
|
||||
* JavaScript code.
|
||||
*/
|
||||
static bool StopwatchStartCallback(uint64_t iteration, void* closure);
|
||||
bool StopwatchStart(uint64_t iteration);
|
||||
|
||||
/**
|
||||
* Execution of JavaScript code has reached completion (including
|
||||
* enqueued microtasks). In cse of tested event loops, any ongoing
|
||||
* measurement on outer loops is silently cancelled without any call
|
||||
* to this method.
|
||||
*
|
||||
* @param iteration The number of times we have started executing
|
||||
* JavaScript code.
|
||||
* @param recentGroups The groups that have seen activity during this
|
||||
* event.
|
||||
*/
|
||||
static bool StopwatchCommitCallback(uint64_t iteration,
|
||||
js::PerformanceGroupVector& recentGroups,
|
||||
void* closure);
|
||||
bool StopwatchCommit(uint64_t iteration,
|
||||
js::PerformanceGroupVector& recentGroups);
|
||||
|
||||
/**
|
||||
* The number of times we have started executing JavaScript code.
|
||||
*/
|
||||
uint64_t mIteration;
|
||||
|
||||
/**
|
||||
* Commit performance measures of a single group.
|
||||
*
|
||||
* Data is transfered from `group->recent*` to `group->data`.
|
||||
*
|
||||
*
|
||||
* @param iteration The current iteration.
|
||||
* @param userTime The total user CPU time for this thread (or
|
||||
* process, if per-thread data is not available) between the
|
||||
* calls to `StopwatchStart` and `StopwatchCommit`.
|
||||
* @param systemTime The total system CPU time for this thread (or
|
||||
* process, if per-thread data is not available) between the
|
||||
* calls to `StopwatchStart` and `StopwatchCommit`.
|
||||
* @param cycles The total number of cycles for this thread
|
||||
* between the calls to `StopwatchStart` and `StopwatchCommit`.
|
||||
* @param isJankVisible If `true`, expect that the user will notice
|
||||
* any slowdown.
|
||||
* @param group The group containing the data to commit.
|
||||
*/
|
||||
void CommitGroup(uint64_t iteration, uint64_t userTime, uint64_t systemTime,
|
||||
uint64_t cycles, bool isJankVisible,
|
||||
nsPerformanceGroup* group);
|
||||
|
||||
/**********************************************************
|
||||
*
|
||||
* To check whether our algorithm makes sense, we keep count of the
|
||||
* number of times the process has been rescheduled to another CPU
|
||||
* while we were monitoring the performance of a group and we upload
|
||||
* this data through Telemetry.
|
||||
*/
|
||||
nsresult UpdateTelemetry();
|
||||
|
||||
uint64_t mProcessStayed;
|
||||
uint64_t mProcessMoved;
|
||||
uint32_t mProcessUpdateCounter;
|
||||
|
||||
/**********************************************************
|
||||
*
|
||||
* Options controlling measurements.
|
||||
*/
|
||||
|
||||
/**
|
||||
* Determine if we are measuring the performance of every individual
|
||||
* compartment (in particular, every individual module, frame,
|
||||
* sandbox). Note that this makes measurements noticeably slower.
|
||||
*/
|
||||
bool mIsMonitoringPerCompartment;
|
||||
|
||||
/**********************************************************
|
||||
*
|
||||
* Determining whether jank is user-visible.
|
||||
*/
|
||||
|
||||
/**
|
||||
* `true` if we believe that any slowdown can cause a noticeable
|
||||
* delay in handling user-input.
|
||||
*
|
||||
* In the current implementation, we return `true` if the latest
|
||||
* user input was less than MAX_DURATION_OF_INTERACTION_MS ago. This
|
||||
* includes all inputs (mouse, keyboard, other devices), with the
|
||||
* exception of mousemove.
|
||||
*/
|
||||
bool IsHandlingUserInput();
|
||||
|
||||
public:
|
||||
/**********************************************************
|
||||
*
|
||||
* Letting observers register themselves to watch for performance
|
||||
* alerts.
|
||||
*
|
||||
* To avoid saturating clients with alerts (or even creating loops
|
||||
* of alerts), each alert is buffered. At the end of each iteration
|
||||
* of the event loop, groups that have caused performance alerts
|
||||
* are registered in a set of pending alerts, and the collection
|
||||
* timer hasn't been started yet, it is started. Once the timer
|
||||
* firers, we gather all the pending alerts, empty the set and
|
||||
* dispatch to observers.
|
||||
*/
|
||||
|
||||
/**
|
||||
* Clear the set of pending alerts and dispatch the pending alerts
|
||||
* to observers.
|
||||
*/
|
||||
void NotifyJankObservers(const mozilla::Vector<uint64_t>& previousJankLevels);
|
||||
|
||||
private:
|
||||
/**
|
||||
* The set of groups for which we know that an alert should be
|
||||
* raised. This set is cleared once `mPendingAlertsCollector`
|
||||
* fires.
|
||||
*
|
||||
* Invariant: no group may appear twice in this vector.
|
||||
*/
|
||||
GroupVector mPendingAlerts;
|
||||
|
||||
/**
|
||||
* A timer callback in charge of collecting the groups in
|
||||
* `mPendingAlerts` and triggering `NotifyJankObservers` to dispatch
|
||||
* performance alerts.
|
||||
*/
|
||||
RefPtr<class PendingAlertsCollector> mPendingAlertsCollector;
|
||||
|
||||
/**
|
||||
* Observation targets that are not attached to a specific group.
|
||||
*/
|
||||
struct UniversalTargets {
|
||||
UniversalTargets();
|
||||
/**
|
||||
* A target for observers interested in watching all windows.
|
||||
*/
|
||||
RefPtr<nsPerformanceObservationTarget> mWindows;
|
||||
};
|
||||
UniversalTargets mUniversalTargets;
|
||||
|
||||
/**
|
||||
* The threshold, in microseconds, above which a performance group is
|
||||
* considered "slow" and should raise performance alerts.
|
||||
*/
|
||||
uint64_t mJankAlertThreshold;
|
||||
|
||||
/**
|
||||
* A buffering delay, in milliseconds, used by the service to
|
||||
* regroup performance alerts, before observers are actually
|
||||
* noticed. Higher delays let the system avoid redundant
|
||||
* notifications for the same group, and are generally better for
|
||||
* performance.
|
||||
*/
|
||||
uint32_t mJankAlertBufferingDelay;
|
||||
|
||||
/**
|
||||
* The threshold above which jank, as reported by the refresh drivers,
|
||||
* is considered user-visible.
|
||||
*
|
||||
* A value of n means that any jank above 2^n ms will be considered
|
||||
* user visible.
|
||||
*/
|
||||
short mJankLevelVisibilityThreshold;
|
||||
|
||||
/**
|
||||
* The number of microseconds during which we assume that a
|
||||
* user-interaction can keep the code jank-critical. Any user
|
||||
* interaction that lasts longer than this duration is expected to
|
||||
* either have already caused jank or have caused a nested event
|
||||
* loop.
|
||||
*
|
||||
* In either case, we consider that monitoring
|
||||
* jank-during-interaction after this duration is useless.
|
||||
*/
|
||||
uint64_t mMaxExpectedDurationOfInteractionUS;
|
||||
};
|
||||
|
||||
/**
|
||||
* Container for performance data.
|
||||
*
|
||||
* All values are monotonic.
|
||||
*
|
||||
* All values are updated after running to completion.
|
||||
*/
|
||||
struct PerformanceData {
|
||||
/**
|
||||
* Number of times we have spent at least 2^n consecutive
|
||||
* milliseconds executing code in this group.
|
||||
* durations[0] is increased whenever we spend at least 1 ms
|
||||
* executing code in this group
|
||||
* durations[1] whenever we spend 2ms+
|
||||
* ...
|
||||
* durations[i] whenever we spend 2^ims+
|
||||
*/
|
||||
uint64_t mDurations[10];
|
||||
|
||||
/**
|
||||
* Total amount of time spent executing code in this group, in
|
||||
* microseconds.
|
||||
*/
|
||||
uint64_t mTotalUserTime;
|
||||
uint64_t mTotalSystemTime;
|
||||
uint64_t mTotalCPOWTime;
|
||||
|
||||
/**
|
||||
* Total number of times code execution entered this group, since
|
||||
* process launch. This may be greater than the number of times we
|
||||
* have entered the event loop.
|
||||
*/
|
||||
uint64_t mTicks;
|
||||
|
||||
PerformanceData();
|
||||
PerformanceData(const PerformanceData& from) = default;
|
||||
PerformanceData& operator=(const PerformanceData& from) = default;
|
||||
};
|
||||
|
||||
/**
|
||||
* Identification information for an item that can hold performance
|
||||
* data.
|
||||
*/
|
||||
class nsPerformanceGroupDetails final : public nsIPerformanceGroupDetails {
|
||||
public:
|
||||
NS_DECL_ISUPPORTS
|
||||
NS_DECL_NSIPERFORMANCEGROUPDETAILS
|
||||
|
||||
nsPerformanceGroupDetails(const nsAString& aName, const nsAString& aGroupId,
|
||||
const uint64_t aWindowId, const uint64_t aProcessId,
|
||||
const bool aIsSystem)
|
||||
: mName(aName),
|
||||
mGroupId(aGroupId),
|
||||
mWindowId(aWindowId),
|
||||
mProcessId(aProcessId),
|
||||
mIsSystem(aIsSystem) {}
|
||||
|
||||
public:
|
||||
const nsAString& Name() const;
|
||||
const nsAString& GroupId() const;
|
||||
uint64_t WindowId() const;
|
||||
uint64_t ProcessId() const;
|
||||
bool IsWindow() const;
|
||||
bool IsSystem() const;
|
||||
bool IsContentProcess() const;
|
||||
|
||||
private:
|
||||
~nsPerformanceGroupDetails() {}
|
||||
|
||||
const nsString mName;
|
||||
const nsString mGroupId;
|
||||
const uint64_t mWindowId;
|
||||
const uint64_t mProcessId;
|
||||
const bool mIsSystem;
|
||||
};
|
||||
|
||||
/**
|
||||
* The kind of compartments represented by this group.
|
||||
*/
|
||||
enum class PerformanceGroupScope {
|
||||
/**
|
||||
* This group represents the entire runtime (i.e. the thread).
|
||||
*/
|
||||
RUNTIME,
|
||||
|
||||
/**
|
||||
* This group represents all the compartments executed in a window.
|
||||
*/
|
||||
WINDOW,
|
||||
|
||||
/**
|
||||
* This group represents a single compartment.
|
||||
*/
|
||||
COMPARTMENT,
|
||||
};
|
||||
|
||||
/**
|
||||
* A concrete implementation of `js::PerformanceGroup`, also holding
|
||||
* performance data. Instances may represent individual compartments,
|
||||
* windows or the entire runtime.
|
||||
*
|
||||
* This class is intended to be the sole implementation of
|
||||
* `js::PerformanceGroup`.
|
||||
*/
|
||||
class nsPerformanceGroup final : public js::PerformanceGroup {
|
||||
public:
|
||||
// Ideally, we would define the enum class in nsPerformanceGroup,
|
||||
// but this seems to choke some versions of gcc.
|
||||
typedef PerformanceGroupScope GroupScope;
|
||||
|
||||
/**
|
||||
* Construct a performance group.
|
||||
*
|
||||
* @param cx The container context. Used to generate a unique identifier.
|
||||
* @param service The performance service. Used during destruction to
|
||||
* cleanup the hash tables.
|
||||
* @param name A name for the group, designed mostly for debugging purposes,
|
||||
* so it should be at least somewhat human-readable.
|
||||
* @param windowId The identifier of the window. Should be 0 when the
|
||||
* group is not part of a window.
|
||||
* @param processId A unique identifier for the process.
|
||||
* @param isSystem `true` if the code of the group is executed with
|
||||
* system credentials, `false` otherwise.
|
||||
* @param scope the scope of this group.
|
||||
*/
|
||||
static nsPerformanceGroup* Make(nsPerformanceStatsService* service,
|
||||
const nsAString& name, uint64_t windowId,
|
||||
uint64_t processId, bool isSystem,
|
||||
GroupScope scope);
|
||||
|
||||
/**
|
||||
* Utility: type-safer conversion from js::PerformanceGroup to
|
||||
* nsPerformanceGroup.
|
||||
*/
|
||||
static inline nsPerformanceGroup* Get(js::PerformanceGroup* self) {
|
||||
return static_cast<nsPerformanceGroup*>(self);
|
||||
}
|
||||
static inline const nsPerformanceGroup* Get(
|
||||
const js::PerformanceGroup* self) {
|
||||
return static_cast<const nsPerformanceGroup*>(self);
|
||||
}
|
||||
|
||||
/**
|
||||
* The performance data committed to this group.
|
||||
*/
|
||||
PerformanceData data;
|
||||
|
||||
/**
|
||||
* The scope of this group. Used to determine whether the group
|
||||
* should be (de)activated.
|
||||
*/
|
||||
GroupScope Scope() const;
|
||||
|
||||
/**
|
||||
* Identification details for this group.
|
||||
*/
|
||||
nsPerformanceGroupDetails* Details() const;
|
||||
|
||||
/**
|
||||
* Cleanup any references.
|
||||
*/
|
||||
void Dispose();
|
||||
|
||||
/**
|
||||
* Set the observation target for this group.
|
||||
*
|
||||
* This method must be called exactly once, when the performance
|
||||
* group is attached to its `nsGroupHolder`.
|
||||
*/
|
||||
void SetObservationTarget(nsPerformanceObservationTarget*);
|
||||
|
||||
/**
|
||||
* `true` if we have already noticed that a performance alert should
|
||||
* be raised for this group but we have not dispatched it yet,
|
||||
* `false` otherwise.
|
||||
*/
|
||||
bool HasPendingAlert() const;
|
||||
void SetHasPendingAlert(bool value);
|
||||
|
||||
protected:
|
||||
nsPerformanceGroup(nsPerformanceStatsService* service, const nsAString& name,
|
||||
const nsAString& groupId, uint64_t windowId,
|
||||
uint64_t processId, bool isSystem, GroupScope scope);
|
||||
|
||||
/**
|
||||
* Virtual implementation of `delete`, to make sure that objects are
|
||||
* destoyed with an implementation of `delete` compatible with the
|
||||
* implementation of `new` used to allocate them.
|
||||
*
|
||||
* Called by SpiderMonkey.
|
||||
*/
|
||||
virtual void Delete() override { delete this; }
|
||||
~nsPerformanceGroup();
|
||||
|
||||
private:
|
||||
/**
|
||||
* Identification details for this group.
|
||||
*/
|
||||
RefPtr<nsPerformanceGroupDetails> mDetails;
|
||||
|
||||
/**
|
||||
* The stats service. Used to perform cleanup during destruction.
|
||||
*/
|
||||
RefPtr<nsPerformanceStatsService> mService;
|
||||
|
||||
/**
|
||||
* The scope of this group. Used to determine whether the group
|
||||
* should be (de)activated.
|
||||
*/
|
||||
const GroupScope mScope;
|
||||
|
||||
// Observing performance alerts.
|
||||
|
||||
public:
|
||||
/**
|
||||
* The observation target, used to register observers.
|
||||
*/
|
||||
nsPerformanceObservationTarget* ObservationTarget() const;
|
||||
|
||||
/**
|
||||
* Record a jank duration.
|
||||
*
|
||||
* Update the highest recent jank if necessary.
|
||||
*/
|
||||
void RecordJank(uint64_t jank);
|
||||
uint64_t HighestRecentJank();
|
||||
|
||||
/**
|
||||
* Record a CPOW duration.
|
||||
*
|
||||
* Update the highest recent CPOW if necessary.
|
||||
*/
|
||||
void RecordCPOW(uint64_t cpow);
|
||||
uint64_t HighestRecentCPOW();
|
||||
|
||||
/**
|
||||
* Record that this group has recently been involved in handling
|
||||
* user input. Note that heuristics are involved here, so the
|
||||
* result is not 100% accurate.
|
||||
*/
|
||||
void RecordUserInput();
|
||||
bool HasRecentUserInput();
|
||||
|
||||
/**
|
||||
* Reset recent values (recent highest CPOW and jank, involvement in
|
||||
* user input).
|
||||
*/
|
||||
void ResetRecent();
|
||||
|
||||
private:
|
||||
/**
|
||||
* The target used by observers to register for watching slow
|
||||
* performance alerts caused by this group.
|
||||
*
|
||||
* May be nullptr for groups that cannot be watched (the top group).
|
||||
*/
|
||||
RefPtr<class nsPerformanceObservationTarget> mObservationTarget;
|
||||
|
||||
/**
|
||||
* The highest jank encountered since jank observers for this group
|
||||
* were last called, in microseconds.
|
||||
*/
|
||||
uint64_t mHighestJank;
|
||||
|
||||
/**
|
||||
* The highest CPOW encountered since jank observers for this group
|
||||
* were last called, in microseconds.
|
||||
*/
|
||||
uint64_t mHighestCPOW;
|
||||
|
||||
/**
|
||||
* `true` if this group has been involved in handling user input,
|
||||
* `false` otherwise.
|
||||
*
|
||||
* Note that we use heuristics to determine whether a group is
|
||||
* involved in handling user input, so this value is not 100%
|
||||
* accurate.
|
||||
*/
|
||||
bool mHasRecentUserInput;
|
||||
|
||||
/**
|
||||
* `true` if this group has caused a performance alert and this alert
|
||||
* hasn't been dispatched yet.
|
||||
*
|
||||
* We use this as part of the buffering of performance alerts. If
|
||||
* the group generates several alerts several times during the
|
||||
* buffering delay, we only wish to add the group once to the list
|
||||
* of alerts.
|
||||
*/
|
||||
bool mHasPendingAlert;
|
||||
};
|
||||
|
||||
#endif
|
@ -1,7 +0,0 @@
|
||||
"use strict";
|
||||
|
||||
module.exports = {
|
||||
"extends": [
|
||||
"plugin:mozilla/browser-test"
|
||||
]
|
||||
};
|
@ -1,11 +0,0 @@
|
||||
[DEFAULT]
|
||||
head = head.js
|
||||
tags = addons
|
||||
support-files =
|
||||
browser_compartments.html
|
||||
browser_compartments_frame.html
|
||||
browser_compartments_script.js
|
||||
|
||||
[browser_compartments.js]
|
||||
skip-if = (os == "linux" && !debug && e10s) || (os == "win" && os_version == "10.0") # Bug 1230018, Bug 1409631
|
||||
[browser_webpagePerformanceAlerts.js]
|
@ -1,20 +0,0 @@
|
||||
<!DOCTYPE html>
|
||||
<html>
|
||||
<head>
|
||||
<title>
|
||||
Main frame for test browser_compartments.js
|
||||
</title>
|
||||
</head>
|
||||
<body>
|
||||
Main frame.
|
||||
|
||||
<iframe src="browser_compartments_frame.html?frame=1">
|
||||
Subframe 1
|
||||
</iframe>
|
||||
|
||||
<iframe src="browser_compartments_frame.html?frame=2">
|
||||
Subframe 2.
|
||||
</iframe>
|
||||
|
||||
</body>
|
||||
</html>
|
@ -1,301 +0,0 @@
|
||||
/* Any copyright is dedicated to the Public Domain.
|
||||
* http://creativecommons.org/publicdomain/zero/1.0/ */
|
||||
|
||||
/* eslint-env mozilla/frame-script */
|
||||
/* eslint-disable mozilla/no-arbitrary-setTimeout */
|
||||
|
||||
"use strict";
|
||||
|
||||
/**
|
||||
* Test that we see jank that takes place in a webpage,
|
||||
* and that jank from several iframes are actually charged
|
||||
* to the top window.
|
||||
*/
|
||||
ChromeUtils.import("resource://gre/modules/PerformanceStats.jsm", this);
|
||||
ChromeUtils.import("resource://gre/modules/Services.jsm", this);
|
||||
ChromeUtils.import("resource://testing-common/ContentTask.jsm", this);
|
||||
|
||||
|
||||
const URL = "http://example.com/browser/toolkit/components/perfmonitoring/tests/browser/browser_compartments.html?test=" + Math.random();
|
||||
const PARENT_TITLE = `Main frame for test browser_compartments.js ${Math.random()}`;
|
||||
const FRAME_TITLE = `Subframe for test browser_compartments.js ${Math.random()}`;
|
||||
|
||||
const PARENT_PID = Services.appinfo.processID;
|
||||
|
||||
// This function is injected as source as a frameScript
|
||||
function frameScript() {
|
||||
try {
|
||||
"use strict";
|
||||
|
||||
ChromeUtils.import("resource://gre/modules/PerformanceStats.jsm");
|
||||
ChromeUtils.import("resource://gre/modules/Services.jsm");
|
||||
|
||||
// Make sure that the stopwatch is now active.
|
||||
let monitor = PerformanceStats.getMonitor(["jank", "cpow", "ticks", "compartments"]);
|
||||
|
||||
addMessageListener("compartments-test:getStatistics", () => {
|
||||
try {
|
||||
monitor.promiseSnapshot().then(snapshot => {
|
||||
sendAsyncMessage("compartments-test:getStatistics", {snapshot, pid: Services.appinfo.processID});
|
||||
});
|
||||
} catch (ex) {
|
||||
Cu.reportError("Error in content (getStatistics): " + ex);
|
||||
Cu.reportError(ex.stack);
|
||||
}
|
||||
});
|
||||
|
||||
addMessageListener("compartments-test:setTitles", titles => {
|
||||
try {
|
||||
content.document.title = titles.data.parent;
|
||||
for (let i = 0; i < content.frames.length; ++i) {
|
||||
content.frames[i].postMessage({title: titles.data.frames}, "*");
|
||||
}
|
||||
console.log("content", "Done setting titles", content.document.title);
|
||||
sendAsyncMessage("compartments-test:setTitles");
|
||||
} catch (ex) {
|
||||
Cu.reportError("Error in content (setTitles): " + ex);
|
||||
Cu.reportError(ex.stack);
|
||||
}
|
||||
});
|
||||
} catch (ex) {
|
||||
Cu.reportError("Error in content (setup): " + ex);
|
||||
Cu.reportError(ex.stack);
|
||||
}
|
||||
}
|
||||
|
||||
// A variant of `Assert` that doesn't spam the logs
|
||||
// in case of success.
|
||||
var SilentAssert = {
|
||||
equal(a, b, msg) {
|
||||
if (a == b) {
|
||||
return;
|
||||
}
|
||||
Assert.equal(a, b, msg);
|
||||
},
|
||||
notEqual(a, b, msg) {
|
||||
if (a != b) {
|
||||
return;
|
||||
}
|
||||
Assert.notEqual(a, b, msg);
|
||||
},
|
||||
ok(a, msg) {
|
||||
if (a) {
|
||||
return;
|
||||
}
|
||||
Assert.ok(a, msg);
|
||||
},
|
||||
leq(a, b, msg) {
|
||||
this.ok(a <= b, `${msg}: ${a} <= ${b}`);
|
||||
},
|
||||
};
|
||||
|
||||
var isShuttingDown = false;
|
||||
function monotinicity_tester(source, testName) {
|
||||
// In the background, check invariants:
|
||||
// - numeric data can only ever increase;
|
||||
// - the name, isSystem of a component never changes;
|
||||
// - the name, isSystem of the process data;
|
||||
// - there is at most one component with `name`;
|
||||
// - types, etc.
|
||||
let previous = {
|
||||
processData: null,
|
||||
componentsMap: new Map(),
|
||||
};
|
||||
|
||||
let sanityCheck = function(prev, next) {
|
||||
if (prev == null) {
|
||||
return;
|
||||
}
|
||||
for (let k of ["groupId", "isSystem"]) {
|
||||
SilentAssert.equal(prev[k], next[k], `Sanity check (${testName}): ${k} hasn't changed (${prev.name}).`);
|
||||
}
|
||||
for (let [probe, k] of [
|
||||
["jank", "totalUserTime"],
|
||||
["jank", "totalSystemTime"],
|
||||
["cpow", "totalCPOWTime"],
|
||||
["ticks", "ticks"],
|
||||
]) {
|
||||
SilentAssert.equal(typeof next[probe][k], "number", `Sanity check (${testName}): ${k} is a number.`);
|
||||
SilentAssert.leq(prev[probe][k], next[probe][k], `Sanity check (${testName}): ${k} is monotonic.`);
|
||||
SilentAssert.leq(0, next[probe][k], `Sanity check (${testName}): ${k} is >= 0.`);
|
||||
}
|
||||
SilentAssert.equal(prev.jank.durations.length, next.jank.durations.length,
|
||||
`Sanity check (${testName}): Jank durations should be equal`);
|
||||
for (let i = 0; i < next.jank.durations.length; ++i) {
|
||||
SilentAssert.ok(typeof next.jank.durations[i] == "number" && next.jank.durations[i] >= 0,
|
||||
`Sanity check (${testName}): durations[${i}] is a non-negative number.`);
|
||||
SilentAssert.leq(prev.jank.durations[i], next.jank.durations[i],
|
||||
`Sanity check (${testName}): durations[${i}] is monotonic.`);
|
||||
}
|
||||
for (let i = 0; i < next.jank.durations.length - 1; ++i) {
|
||||
SilentAssert.leq(next.jank.durations[i + 1], next.jank.durations[i],
|
||||
`Sanity check (${testName}): durations[${i}] >= durations[${i + 1}].`);
|
||||
}
|
||||
};
|
||||
let iteration = 0;
|
||||
let frameCheck = async function() {
|
||||
if (isShuttingDown) {
|
||||
window.clearInterval(interval);
|
||||
return;
|
||||
}
|
||||
let name = `${testName}: ${iteration++}`;
|
||||
let result = await source();
|
||||
if (!result) {
|
||||
// This can happen at the end of the test when we attempt
|
||||
// to communicate too late with the content process.
|
||||
window.clearInterval(interval);
|
||||
return;
|
||||
}
|
||||
let {pid, snapshot} = result;
|
||||
|
||||
// Sanity check on the process data.
|
||||
sanityCheck(previous.processData, snapshot.processData);
|
||||
SilentAssert.equal(snapshot.processData.isSystem, true, "Should be system");
|
||||
SilentAssert.equal(snapshot.processData.name, "<process>", "Should have '<process>' name");
|
||||
SilentAssert.equal(snapshot.processData.processId, pid, "Process id should match");
|
||||
previous.procesData = snapshot.processData;
|
||||
|
||||
// Sanity check on components data.
|
||||
let map = new Map();
|
||||
for (let item of snapshot.componentsData) {
|
||||
let isCorrectPid = (item.processId == pid && !item.isChildProcess)
|
||||
|| (item.processId != pid && item.isChildProcess);
|
||||
SilentAssert.ok(isCorrectPid, `Pid check (${name}): the item comes from the right process`);
|
||||
|
||||
let key = item.groupId;
|
||||
if (map.has(key)) {
|
||||
let old = map.get(key);
|
||||
Assert.ok(false, `Component ${key} has already been seen. Latest: ${item.name}, previous: ${old.name}`);
|
||||
}
|
||||
map.set(key, item);
|
||||
}
|
||||
for (let item of snapshot.componentsData) {
|
||||
if (!item.parentId) {
|
||||
continue;
|
||||
}
|
||||
let parent = map.get(item.parentId);
|
||||
SilentAssert.ok(parent, `The parent exists ${item.parentId}`);
|
||||
|
||||
for (let [probe, k] of [
|
||||
["jank", "totalUserTime"],
|
||||
["jank", "totalSystemTime"],
|
||||
["cpow", "totalCPOWTime"],
|
||||
]) {
|
||||
// Note that we cannot expect components data to be always smaller
|
||||
// than parent data, as `getrusage` & co are not monotonic.
|
||||
SilentAssert.leq(item[probe][k], 2 * parent[probe][k],
|
||||
`Sanity check (${testName}): ${k} of component is not impossibly larger than that of parent`);
|
||||
}
|
||||
}
|
||||
for (let [key, item] of map) {
|
||||
sanityCheck(previous.componentsMap.get(key), item);
|
||||
previous.componentsMap.set(key, item);
|
||||
}
|
||||
};
|
||||
let interval = window.setInterval(frameCheck, 300);
|
||||
registerCleanupFunction(() => {
|
||||
window.clearInterval(interval);
|
||||
});
|
||||
}
|
||||
|
||||
add_task(async function test() {
|
||||
let monitor = PerformanceStats.getMonitor(["jank", "cpow", "ticks"]);
|
||||
|
||||
info("Extracting initial state");
|
||||
let stats0 = await monitor.promiseSnapshot();
|
||||
Assert.notEqual(stats0.componentsData.length, 0, "There is more than one component");
|
||||
Assert.ok(!stats0.componentsData.find(stat => stat.name.includes(URL)),
|
||||
"The url doesn't appear yet");
|
||||
|
||||
let newTab = BrowserTestUtils.addTab(gBrowser);
|
||||
let browser = newTab.linkedBrowser;
|
||||
// Setup monitoring in the tab
|
||||
info("Setting up monitoring in the tab");
|
||||
await ContentTask.spawn(newTab.linkedBrowser, null, frameScript);
|
||||
|
||||
info("Opening URL");
|
||||
BrowserTestUtils.loadURI(newTab.linkedBrowser, URL);
|
||||
|
||||
if (Services.sysinfo.getPropertyAsAString("name") == "Windows_NT") {
|
||||
info("Deactivating sanity checks under Windows (bug 1151240)");
|
||||
} else {
|
||||
info("Setting up sanity checks");
|
||||
monotinicity_tester(() => monitor.promiseSnapshot().then(snapshot => ({snapshot, pid: PARENT_PID})), "parent process");
|
||||
monotinicity_tester(() => promiseContentResponseOrNull(browser, "compartments-test:getStatistics", null), "content process" );
|
||||
}
|
||||
|
||||
let skipTotalUserTime = hasLowPrecision();
|
||||
|
||||
|
||||
while (true) {
|
||||
await new Promise(resolve => setTimeout(resolve, 100));
|
||||
|
||||
// We may have race conditions with DOM loading.
|
||||
// Don't waste too much brainpower here, let's just ask
|
||||
// repeatedly for the title to be changed, until this works.
|
||||
info("Setting titles");
|
||||
await promiseContentResponse(browser, "compartments-test:setTitles", {
|
||||
parent: PARENT_TITLE,
|
||||
frames: FRAME_TITLE,
|
||||
});
|
||||
info("Titles set");
|
||||
|
||||
let {snapshot: stats} = (await promiseContentResponse(browser, "compartments-test:getStatistics", null));
|
||||
|
||||
// Attach titles to components.
|
||||
let titles = [];
|
||||
let map = new Map();
|
||||
for (let window of Services.wm.getEnumerator("navigator:browser")) {
|
||||
let tabbrowser = window.gBrowser;
|
||||
for (let browser of tabbrowser.browsers) {
|
||||
let id = browser.outerWindowID; // May be `null` if the browser isn't loaded yet
|
||||
if (id != null) {
|
||||
map.set(id, browser);
|
||||
}
|
||||
}
|
||||
}
|
||||
for (let stat of stats.componentsData) {
|
||||
if (!stat.windowId) {
|
||||
continue;
|
||||
}
|
||||
let browser = map.get(stat.windowId);
|
||||
if (!browser) {
|
||||
continue;
|
||||
}
|
||||
let title = browser.contentTitle;
|
||||
if (title) {
|
||||
stat.title = title;
|
||||
titles.push(title);
|
||||
}
|
||||
}
|
||||
|
||||
// While the webpage consists in three compartments, we should see only
|
||||
// one `PerformanceData` in `componentsData`. Its `name` is undefined
|
||||
// (could be either the main frame or one of its subframes), but its
|
||||
// `title` should be the title of the main frame.
|
||||
info(`Searching for frame title '${FRAME_TITLE}' in ${JSON.stringify(titles)} (I hope not to find it)`);
|
||||
Assert.ok(!titles.includes(FRAME_TITLE), "Searching by title, the frames don't show up in the list of components");
|
||||
|
||||
info(`Searching for window title '${PARENT_TITLE}' in ${JSON.stringify(titles)} (I hope to find it)`);
|
||||
let parent = stats.componentsData.find(x => x.title == PARENT_TITLE);
|
||||
if (!parent) {
|
||||
info("Searching by title, we didn't find the main frame");
|
||||
continue;
|
||||
}
|
||||
info("Found the main frame");
|
||||
|
||||
if (skipTotalUserTime) {
|
||||
info("Not looking for total user time on this platform, we're done");
|
||||
break;
|
||||
} else if (parent.jank.totalUserTime > 1000) {
|
||||
info("Enough CPU time detected, we're done");
|
||||
break;
|
||||
} else {
|
||||
info(`Not enough CPU time detected: ${parent.jank.totalUserTime}`);
|
||||
}
|
||||
}
|
||||
isShuttingDown = true;
|
||||
|
||||
// Cleanup
|
||||
gBrowser.removeTab(newTab, {skipPermitUnload: true});
|
||||
});
|
@ -1,12 +0,0 @@
|
||||
<!DOCTYPE html>
|
||||
<html>
|
||||
<head>
|
||||
<title>
|
||||
Subframe for test browser_compartments.html (do not change this title)
|
||||
</title>
|
||||
<script src="browser_compartments_script.js"></script>
|
||||
</head>
|
||||
<body>
|
||||
Subframe loaded.
|
||||
</body>
|
||||
</html>
|
@ -1,29 +0,0 @@
|
||||
|
||||
var carryOn = true;
|
||||
|
||||
window.addEventListener("message", e => {
|
||||
console.log("frame content", "message", e);
|
||||
if ("title" in e.data) {
|
||||
document.title = e.data.title;
|
||||
}
|
||||
if ("stop" in e.data) {
|
||||
carryOn = false;
|
||||
}
|
||||
});
|
||||
|
||||
// Use some CPU.
|
||||
var interval = window.setInterval(() => {
|
||||
if (!carryOn) {
|
||||
window.clearInterval(interval);
|
||||
return;
|
||||
}
|
||||
|
||||
// Compute an arbitrary value, print it out to make sure that the JS
|
||||
// engine doesn't discard all our computation.
|
||||
var date = Date.now();
|
||||
var array = [];
|
||||
var i = 0;
|
||||
while (Date.now() - date <= 100) {
|
||||
array[i % 2] = i++;
|
||||
}
|
||||
}, 300);
|
@ -1,114 +0,0 @@
|
||||
/* eslint-disable mozilla/no-arbitrary-setTimeout */
|
||||
"use strict";
|
||||
|
||||
/**
|
||||
* Tests for PerformanceWatcher watching slow web pages.
|
||||
*/
|
||||
|
||||
/**
|
||||
* Simulate a slow webpage.
|
||||
*/
|
||||
function WebpageBurner() {
|
||||
CPUBurner.call(this, "http://example.com/browser/toolkit/components/perfmonitoring/tests/browser/browser_compartments.html?test=" + Math.random(), 300000);
|
||||
}
|
||||
WebpageBurner.prototype = Object.create(CPUBurner.prototype);
|
||||
WebpageBurner.prototype.promiseBurnContentCPU = function() {
|
||||
return promiseContentResponse(this._browser, "test-performance-watcher:burn-content-cpu", {});
|
||||
};
|
||||
|
||||
function WebpageListener(windowId, accept) {
|
||||
info(`Creating WebpageListener for ${windowId}`);
|
||||
AlertListener.call(this, accept, {
|
||||
register: () => PerformanceWatcher.addPerformanceListener({windowId}, this.listener),
|
||||
unregister: () => PerformanceWatcher.removePerformanceListener({windowId}, this.listener),
|
||||
});
|
||||
}
|
||||
WebpageListener.prototype = Object.create(AlertListener.prototype);
|
||||
|
||||
add_task(async function init() {
|
||||
// Get rid of buffering.
|
||||
let service = Cc["@mozilla.org/toolkit/performance-stats-service;1"].getService(
|
||||
Ci.nsIPerformanceStatsService);
|
||||
let oldDelay = service.jankAlertBufferingDelay;
|
||||
|
||||
service.jankAlertBufferingDelay = 0 /* ms */;
|
||||
registerCleanupFunction(() => {
|
||||
info("Cleanup");
|
||||
service.jankAlertBufferingDelay = oldDelay;
|
||||
});
|
||||
});
|
||||
|
||||
add_task(async function test_open_window_then_watch_it() {
|
||||
let burner = new WebpageBurner();
|
||||
await burner.promiseInitialized;
|
||||
await burner.promiseBurnContentCPU();
|
||||
|
||||
info(`Check that burning CPU triggers the real listener, but not the fake listener`);
|
||||
let realListener = new WebpageListener(burner.windowId, (group, details) => {
|
||||
info(`test: realListener for ${burner.tab.linkedBrowser.outerWindowID}: ${group}, ${details}\n`);
|
||||
Assert.equal(group.windowId, burner.windowId, "We should not receive data meant for another group");
|
||||
return details;
|
||||
}); // This listener should be triggered.
|
||||
|
||||
info(`Creating fake burner`);
|
||||
let otherTab = BrowserTestUtils.addTab(gBrowser);
|
||||
await BrowserTestUtils.browserLoaded(otherTab.linkedBrowser);
|
||||
info(`Check that burning CPU triggers the real listener, but not the fake listener`);
|
||||
let fakeListener = new WebpageListener(otherTab.linkedBrowser.outerWindowID, group => group.windowId == burner.windowId); // This listener should never be triggered.
|
||||
let universalListener = new WebpageListener(0, alerts =>
|
||||
alerts.find(alert => alert.source.windowId == burner.windowId)
|
||||
);
|
||||
|
||||
// Waiting a little – listeners are buffered.
|
||||
await new Promise(resolve => setTimeout(resolve, 100));
|
||||
|
||||
await burner.run("promiseBurnContentCPU", 20, realListener);
|
||||
Assert.ok(realListener.triggered, `1. The real listener was triggered`);
|
||||
Assert.ok(universalListener.triggered, `1. The universal listener was triggered`);
|
||||
Assert.ok(!fakeListener.triggered, `1. The fake listener was not triggered`);
|
||||
|
||||
if (realListener.result) {
|
||||
Assert.ok(realListener.result.highestJank >= 300, `1. jank is at least 300ms (${realListener.result.highestJank}ms)`);
|
||||
}
|
||||
|
||||
info(`Attempting to remove a performance listener incorrectly, check that this does not hurt our real listener`);
|
||||
Assert.throws(() => PerformanceWatcher.removePerformanceListener({windowId: burner.windowId}, () => {}),
|
||||
/No listener for target/, "should throw an error for a different listener");
|
||||
Assert.throws(() => PerformanceWatcher.removePerformanceListener({windowId: burner.windowId + "-unbound-id-" + Math.random()}, realListener.listener),
|
||||
/No listener for target/, "should throw an error for a different window id");
|
||||
|
||||
// Waiting a little – listeners are buffered.
|
||||
await new Promise(resolve => setTimeout(resolve, 100));
|
||||
await burner.run("promiseBurnContentCPU", 20, realListener);
|
||||
// Waiting a little – listeners are buffered.
|
||||
await new Promise(resolve => setTimeout(resolve, 100));
|
||||
|
||||
Assert.ok(realListener.triggered, `2. The real listener was triggered`);
|
||||
Assert.ok(universalListener.triggered, `2. The universal listener was triggered`);
|
||||
Assert.ok(!fakeListener.triggered, `2. The fake listener was not triggered`);
|
||||
if (realListener.result) {
|
||||
Assert.ok(realListener.result.highestJank >= 300, `2. jank is at least 300ms (${realListener.jank}ms)`);
|
||||
}
|
||||
|
||||
info(`Attempting to remove correctly, check if the listener is still triggered`);
|
||||
// Waiting a little – listeners are buffered.
|
||||
await new Promise(resolve => setTimeout(resolve, 100));
|
||||
realListener.unregister();
|
||||
|
||||
// Waiting a little – listeners are buffered.
|
||||
await new Promise(resolve => setTimeout(resolve, 100));
|
||||
await burner.run("promiseBurnContentCPU", 3, realListener);
|
||||
Assert.ok(!realListener.triggered, `3. After being unregistered, the real listener was not triggered`);
|
||||
Assert.ok(universalListener.triggered, `3. The universal listener is still triggered`);
|
||||
|
||||
universalListener.unregister();
|
||||
|
||||
// Waiting a little – listeners are buffered.
|
||||
await new Promise(resolve => setTimeout(resolve, 100));
|
||||
await burner.run("promiseBurnContentCPU", 3, realListener);
|
||||
Assert.ok(!universalListener.triggered, `4. After being unregistered, the universal listener is not triggered`);
|
||||
|
||||
fakeListener.unregister();
|
||||
burner.dispose();
|
||||
gBrowser.removeTab(otherTab);
|
||||
});
|
@ -1,283 +0,0 @@
|
||||
/* Any copyright is dedicated to the Public Domain.
|
||||
* http://creativecommons.org/publicdomain/zero/1.0/ */
|
||||
|
||||
/* eslint-env mozilla/frame-script */
|
||||
|
||||
ChromeUtils.import("resource://gre/modules/AddonManager.jsm", this);
|
||||
ChromeUtils.import("resource://gre/modules/PerformanceWatcher.jsm", this);
|
||||
ChromeUtils.import("resource://gre/modules/Services.jsm", this);
|
||||
ChromeUtils.import("resource://testing-common/ContentTaskUtils.jsm", this);
|
||||
|
||||
/**
|
||||
* Base class for simulating slow addons/webpages.
|
||||
*/
|
||||
function CPUBurner(url, jankThreshold) {
|
||||
info(`CPUBurner: Opening tab for ${url}\n`);
|
||||
this.url = url;
|
||||
this.tab = BrowserTestUtils.addTab(gBrowser, url);
|
||||
this.jankThreshold = jankThreshold;
|
||||
let browser = this.tab.linkedBrowser;
|
||||
this._browser = browser;
|
||||
ContentTask.spawn(this._browser, null, CPUBurner.frameScript);
|
||||
this.promiseInitialized = BrowserTestUtils.browserLoaded(browser);
|
||||
}
|
||||
CPUBurner.prototype = {
|
||||
get windowId() {
|
||||
return this._browser.outerWindowID;
|
||||
},
|
||||
/**
|
||||
* Burn CPU until it triggers a listener with the specified jank threshold.
|
||||
*/
|
||||
async run(burner, max, listener) {
|
||||
listener.reset();
|
||||
for (let i = 0; i < max; ++i) {
|
||||
await new Promise(resolve => setTimeout(resolve, 50));
|
||||
try {
|
||||
await this[burner]();
|
||||
} catch (ex) {
|
||||
return false;
|
||||
}
|
||||
if (listener.triggered && listener.result >= this.jankThreshold) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
return false;
|
||||
},
|
||||
dispose() {
|
||||
info(`CPUBurner: Closing tab for ${this.url}\n`);
|
||||
gBrowser.removeTab(this.tab);
|
||||
},
|
||||
};
|
||||
// This function is injected in all frames
|
||||
CPUBurner.frameScript = function() {
|
||||
try {
|
||||
"use strict";
|
||||
|
||||
let sandboxes = new Map();
|
||||
let getSandbox = function(addonId) {
|
||||
let sandbox = sandboxes.get(addonId);
|
||||
if (!sandbox) {
|
||||
sandbox = Cu.Sandbox(Services.scriptSecurityManager.getSystemPrincipal(), { addonId });
|
||||
sandboxes.set(addonId, sandbox);
|
||||
}
|
||||
return sandbox;
|
||||
};
|
||||
|
||||
let burnCPU = function() {
|
||||
var start = Date.now();
|
||||
var ignored = [];
|
||||
while (Date.now() - start < 500) {
|
||||
ignored[ignored.length % 2] = ignored.length;
|
||||
}
|
||||
};
|
||||
let burnCPUInSandbox = function(addonId) {
|
||||
let sandbox = getSandbox(addonId);
|
||||
Cu.evalInSandbox(burnCPU.toSource() + "()", sandbox);
|
||||
};
|
||||
|
||||
{
|
||||
let topic = "test-performance-watcher:burn-content-cpu";
|
||||
addMessageListener(topic, function(msg) {
|
||||
try {
|
||||
if (msg.data && msg.data.addonId) {
|
||||
burnCPUInSandbox(msg.data.addonId);
|
||||
} else {
|
||||
burnCPU();
|
||||
}
|
||||
sendAsyncMessage(topic, {});
|
||||
} catch (ex) {
|
||||
dump(`This is the content attempting to burn CPU: error ${ex}\n`);
|
||||
dump(`${ex.stack}\n`);
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
// Bind the function to the global context or it might be GC'd during test
|
||||
// causing failures (bug 1230027)
|
||||
this.burnCPOWInSandbox = function(addonId) {
|
||||
try {
|
||||
burnCPUInSandbox(addonId);
|
||||
} catch (ex) {
|
||||
dump(`This is the addon attempting to burn CPOW: error ${ex}\n`);
|
||||
dump(`${ex.stack}\n`);
|
||||
}
|
||||
};
|
||||
|
||||
sendAsyncMessage("test-performance-watcher:cpow-init", {}, {
|
||||
burnCPOWInSandbox: this.burnCPOWInSandbox,
|
||||
});
|
||||
|
||||
} catch (ex) {
|
||||
Cu.reportError("This is the addon: error " + ex);
|
||||
Cu.reportError(ex.stack);
|
||||
}
|
||||
};
|
||||
|
||||
/**
|
||||
* Base class for listening to slow group alerts
|
||||
*/
|
||||
function AlertListener(accept, {register, unregister}) {
|
||||
this.listener = (...args) => {
|
||||
if (this._unregistered) {
|
||||
throw new Error("Listener was unregistered");
|
||||
}
|
||||
let result = accept(...args);
|
||||
if (!result) {
|
||||
return;
|
||||
}
|
||||
this.result = result;
|
||||
this.triggered = true;
|
||||
};
|
||||
this.triggered = false;
|
||||
this.result = null;
|
||||
this._unregistered = false;
|
||||
this._unregister = unregister;
|
||||
registerCleanupFunction(() => {
|
||||
this.unregister();
|
||||
});
|
||||
register();
|
||||
}
|
||||
AlertListener.prototype = {
|
||||
unregister() {
|
||||
this.reset();
|
||||
if (this._unregistered) {
|
||||
info(`head.js: No need to unregister, we're already unregistered.\n`);
|
||||
return;
|
||||
}
|
||||
info(`head.js: Unregistering listener.\n`);
|
||||
this._unregistered = true;
|
||||
this._unregister();
|
||||
info(`head.js: Unregistration complete.\n`);
|
||||
},
|
||||
reset() {
|
||||
this.triggered = false;
|
||||
this.result = null;
|
||||
},
|
||||
};
|
||||
|
||||
/**
|
||||
* Simulate a slow add-on.
|
||||
*/
|
||||
function AddonBurner(addonId = "fake add-on id: " + Math.random()) {
|
||||
this.jankThreshold = 200000;
|
||||
CPUBurner.call(this, `http://example.com/?uri=${addonId}`, this.jankThreshold);
|
||||
this._addonId = addonId;
|
||||
this._sandbox = Cu.Sandbox(Services.scriptSecurityManager.getSystemPrincipal(), { addonId: this._addonId });
|
||||
this._CPOWBurner = null;
|
||||
|
||||
this._promiseCPOWBurner = new Promise(resolve => {
|
||||
this._browser.messageManager.addMessageListener("test-performance-watcher:cpow-init", msg => {
|
||||
// Note that we cannot resolve Promises with CPOWs now that they
|
||||
// have been outlawed in bug 1233497, so we stash it in the
|
||||
// AddonBurner instance instead.
|
||||
this._CPOWBurner = msg.objects.burnCPOWInSandbox;
|
||||
resolve();
|
||||
});
|
||||
});
|
||||
}
|
||||
AddonBurner.prototype = Object.create(CPUBurner.prototype);
|
||||
Object.defineProperty(AddonBurner.prototype, "addonId", {
|
||||
get() {
|
||||
return this._addonId;
|
||||
},
|
||||
});
|
||||
|
||||
/**
|
||||
* Simulate slow code being executed by the add-on in the chrome.
|
||||
*/
|
||||
AddonBurner.prototype.burnCPU = function() {
|
||||
Cu.evalInSandbox(AddonBurner.burnCPU.toSource() + "()", this._sandbox);
|
||||
};
|
||||
|
||||
/**
|
||||
* Simulate slow code being executed by the add-on in a CPOW.
|
||||
*/
|
||||
AddonBurner.prototype.promiseBurnCPOW = async function() {
|
||||
await this._promiseCPOWBurner;
|
||||
ok(this._CPOWBurner, "Got the CPOW burner");
|
||||
let burner = this._CPOWBurner;
|
||||
info("Parent: Preparing to burn CPOW");
|
||||
try {
|
||||
await burner(this._addonId);
|
||||
info("Parent: Done burning CPOW");
|
||||
} catch (ex) {
|
||||
info(`Parent: Error burning CPOW: ${ex}\n`);
|
||||
info(ex.stack + "\n");
|
||||
}
|
||||
};
|
||||
|
||||
/**
|
||||
* Simulate slow code being executed by the add-on in the content.
|
||||
*/
|
||||
AddonBurner.prototype.promiseBurnContentCPU = function() {
|
||||
return promiseContentResponse(this._browser, "test-performance-watcher:burn-content-cpu", {addonId: this._addonId});
|
||||
};
|
||||
AddonBurner.burnCPU = function() {
|
||||
var start = Date.now();
|
||||
var ignored = [];
|
||||
while (Date.now() - start < 500) {
|
||||
ignored[ignored.length % 2] = ignored.length;
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
function AddonListener(addonId, accept) {
|
||||
let target = {addonId};
|
||||
AlertListener.call(this, accept, {
|
||||
register: () => {
|
||||
info(`AddonListener: registering ${JSON.stringify(target, null, "\t")}`);
|
||||
PerformanceWatcher.addPerformanceListener({addonId}, this.listener);
|
||||
},
|
||||
unregister: () => {
|
||||
info(`AddonListener: unregistering ${JSON.stringify(target, null, "\t")}`);
|
||||
PerformanceWatcher.removePerformanceListener({addonId}, this.listener);
|
||||
},
|
||||
});
|
||||
}
|
||||
AddonListener.prototype = Object.create(AlertListener.prototype);
|
||||
|
||||
function promiseContentResponse(browser, name, message) {
|
||||
let mm = browser.messageManager;
|
||||
let promise = new Promise(resolve => {
|
||||
function removeListener() {
|
||||
mm.removeMessageListener(name, listener);
|
||||
}
|
||||
|
||||
function listener(msg) {
|
||||
removeListener();
|
||||
resolve(msg.data);
|
||||
}
|
||||
|
||||
mm.addMessageListener(name, listener);
|
||||
registerCleanupFunction(removeListener);
|
||||
});
|
||||
mm.sendAsyncMessage(name, message);
|
||||
return promise;
|
||||
}
|
||||
function promiseContentResponseOrNull(browser, name, message) {
|
||||
if (!browser.messageManager) {
|
||||
return null;
|
||||
}
|
||||
return promiseContentResponse(browser, name, message);
|
||||
}
|
||||
|
||||
/**
|
||||
* `true` if we are running an OS in which the OS performance
|
||||
* clock has a low precision and might unpredictably
|
||||
* never be updated during the execution of the test.
|
||||
*/
|
||||
function hasLowPrecision() {
|
||||
let [sysName, sysVersion] = [Services.sysinfo.getPropertyAsAString("name"), Services.sysinfo.getPropertyAsDouble("version")];
|
||||
info(`Running ${sysName} version ${sysVersion}`);
|
||||
|
||||
if (sysName == "Windows_NT" && sysVersion < 6) {
|
||||
info("Running old Windows, need to deactivate tests due to bad precision.");
|
||||
return true;
|
||||
}
|
||||
if (sysName == "Linux" && sysVersion <= 2.6) {
|
||||
info("Running old Linux, need to deactivate tests due to bad precision.");
|
||||
return true;
|
||||
}
|
||||
info("This platform has good precision.");
|
||||
return false;
|
||||
}
|
Loading…
Reference in New Issue
Block a user