Bug 1123237 - Part 8. Tracking the memory events. r=BenWa,terrence

Based on patch from Ting-Yuan Huang <laszio.bugzilla@gmail.com>
This commit is contained in:
Kan-Ru Chen 2015-05-08 11:23:20 +08:00
parent 10b9f95c15
commit e1fa14415d
12 changed files with 1151 additions and 25 deletions

View File

@ -0,0 +1,134 @@
/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
/* vim: set ts=2 et sw=2 tw=80: */
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this file,
* You can obtain one at http://mozilla.org/MPL/2.0/. */
#ifndef memory_profiler_CompactTraceTable_h
#define memory_profiler_CompactTraceTable_h
#include "UncensoredAllocator.h"
#include <functional>
#include <utility>
#include "mozilla/HashFunctions.h"
namespace mozilla {
struct TrieNode final
{
uint32_t parentIdx;
uint32_t nameIdx;
bool operator==(const TrieNode t) const
{
return parentIdx == t.parentIdx && nameIdx == t.nameIdx;
}
};
} // namespace mozilla
namespace std {
template<>
struct hash<mozilla::TrieNode>
{
size_t operator()(const mozilla::TrieNode& v) const
{
uint64_t k = static_cast<uint64_t>(v.parentIdx) << 32 | v.nameIdx;
return std::hash<uint64_t>()(k);
}
};
template<>
struct hash<mozilla::u_string>
{
size_t operator()(const mozilla::u_string& v) const
{
return mozilla::HashString(v.c_str());
}
};
} // namespace std
namespace mozilla {
// This class maps a Node of type T to its parent's index in the
// map. When serializing, the map is traversed and put into an ordered
// vector of Nodes.
template<typename T>
class NodeIndexMap final
{
public:
uint32_t Insert(const T& e)
{
auto i = mMap.insert(std::make_pair(e, mMap.size()));
return i.first->second;
}
u_vector<T> Serialize() const
{
u_vector<T> v(mMap.size());
for (auto i: mMap) {
v[i.second] = i.first;
}
return v;
}
uint32_t Size() const
{
return mMap.size();
}
void Clear()
{
mMap.clear();
}
private:
u_unordered_map<T, uint32_t> mMap;
};
// Backtraces are stored in a trie to save spaces.
// Function names are stored in an unique table and TrieNodes contain indexes
// into that table.
// The trie is implemented with a hash table; children are stored in
// traces[TrieNode{parent node index, branch/function name index}].
class CompactTraceTable final
{
public:
CompactTraceTable()
{
mNames.Insert("(unknown)");
mTraces.Insert(TrieNode{0, 0});
}
u_vector<u_string> GetNames() const
{
return mNames.Serialize();
}
u_vector<TrieNode> GetTraces() const
{
return mTraces.Serialize();
}
// Returns an ID to a stacktrace.
uint32_t Insert(const u_vector<u_string>& aRawStacktrace)
{
uint32_t parent = 0;
for (auto& frame: aRawStacktrace) {
parent = mTraces.Insert(TrieNode{parent, mNames.Insert(frame)});
}
return parent;
}
void Reset()
{
mNames.Clear();
mTraces.Clear();
}
private:
NodeIndexMap<u_string> mNames;
NodeIndexMap<TrieNode> mTraces;
};
} // namespace mozilla
#endif // memory_profiler_CompactTraceTable_h

View File

@ -0,0 +1,163 @@
/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
/* vim: set ts=2 et sw=2 tw=80: */
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this file,
* You can obtain one at http://mozilla.org/MPL/2.0/. */
#include "GCHeapProfilerImpl.h"
#include "mozilla/TimeStamp.h"
#include "prlock.h"
namespace mozilla {
GCHeapProfilerImpl::GCHeapProfilerImpl()
{
mLock = PR_NewLock();
mMarking = false;
}
GCHeapProfilerImpl::~GCHeapProfilerImpl()
{
if (mLock) {
PR_DestroyLock(mLock);
}
}
u_vector<u_string>
GCHeapProfilerImpl::GetNames() const
{
return mTraceTable.GetNames();
}
u_vector<TrieNode>
GCHeapProfilerImpl::GetTraces() const
{
return mTraceTable.GetTraces();
}
const u_vector<AllocEvent>&
GCHeapProfilerImpl::GetEvents() const
{
return mAllocEvents;
}
void
GCHeapProfilerImpl::reset()
{
mTraceTable.Reset();
mAllocEvents.clear();
mNurseryEntries.clear();
mTenuredEntriesFG.clear();
mTenuredEntriesBG.clear();
}
void
GCHeapProfilerImpl::sampleTenured(void* addr, uint32_t size)
{
SampleInternal(addr, size, mTenuredEntriesFG);
}
void
GCHeapProfilerImpl::sampleNursery(void* addr, uint32_t size)
{
SampleInternal(addr, size, mNurseryEntries);
}
void
GCHeapProfilerImpl::markTenuredStart()
{
AutoMPLock lock(mLock);
if (!mMarking) {
mMarking = true;
Swap(mTenuredEntriesFG, mTenuredEntriesBG);
MOZ_ASSERT(mTenuredEntriesFG.empty());
}
}
void
GCHeapProfilerImpl::markTenured(void* addr)
{
AutoMPLock lock(mLock);
if (mMarking) {
auto res = mTenuredEntriesBG.find(addr);
if (res != mTenuredEntriesBG.end()) {
res->second.mMarked = true;
}
}
}
void
GCHeapProfilerImpl::sweepTenured()
{
AutoMPLock lock(mLock);
if (mMarking) {
mMarking = false;
for (auto& entry: mTenuredEntriesBG) {
if (entry.second.mMarked) {
entry.second.mMarked = false;
mTenuredEntriesFG.insert(entry);
} else {
AllocEvent& oldEvent = mAllocEvents[entry.second.mEventIdx];
AllocEvent newEvent(oldEvent.mTraceIdx, -oldEvent.mSize, TimeStamp::Now());
mAllocEvents.push_back(newEvent);
}
}
mTenuredEntriesBG.clear();
}
}
void
GCHeapProfilerImpl::sweepNursery()
{
AutoMPLock lock(mLock);
for (auto& entry: mNurseryEntries) {
AllocEvent& oldEvent = mAllocEvents[entry.second.mEventIdx];
AllocEvent newEvent(oldEvent.mTraceIdx, -oldEvent.mSize, TimeStamp::Now());
mAllocEvents.push_back(newEvent);
}
mNurseryEntries.clear();
}
void
GCHeapProfilerImpl::moveNurseryToTenured(void* addrOld, void* addrNew)
{
AutoMPLock lock(mLock);
auto iterOld = mNurseryEntries.find(addrOld);
if (iterOld == mNurseryEntries.end()) {
return;
}
// Because the tenured heap is sampled, the address might already be there.
// If not, the address is inserted with the old event.
auto res = mTenuredEntriesFG.insert(
std::make_pair(addrNew, AllocEntry(iterOld->second.mEventIdx)));
auto iterNew = res.first;
// If it is already inserted, the insertion above will fail and the
// iterator of the already-inserted element is returned.
// We choose to ignore the the new event by setting its size zero and point
// the newly allocated address to the old event.
// An event of size zero will be skipped when reporting.
if (!res.second) {
mAllocEvents[iterNew->second.mEventIdx].mSize = 0;
iterNew->second.mEventIdx = iterOld->second.mEventIdx;
}
mNurseryEntries.erase(iterOld);
}
void
GCHeapProfilerImpl::SampleInternal(void* aAddr, uint32_t aSize, AllocMap& aTable)
{
AutoMPLock lock(mLock);
size_t nSamples = AddBytesSampled(aSize);
if (nSamples > 0) {
u_vector<u_string> trace = GetStacktrace();
AllocEvent ai(mTraceTable.Insert(trace), nSamples * mSampleSize, TimeStamp::Now());
aTable.insert(std::make_pair(aAddr, AllocEntry(mAllocEvents.size())));
mAllocEvents.push_back(ai);
}
}
} // namespace mozilla

View File

@ -0,0 +1,53 @@
/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
/* vim: set ts=2 et sw=2 tw=80: */
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this file,
* You can obtain one at http://mozilla.org/MPL/2.0/. */
#ifndef memory_profiler_GCHeapProfilerImpl_h
#define memory_profiler_GCHeapProfilerImpl_h
#include "CompactTraceTable.h"
#include "MemoryProfiler.h"
#include "jsfriendapi.h"
namespace mozilla {
class GCHeapProfilerImpl final : public GCHeapProfiler
, public ProfilerImpl
{
public:
GCHeapProfilerImpl();
~GCHeapProfilerImpl() override;
u_vector<u_string> GetNames() const override;
u_vector<TrieNode> GetTraces() const override;
const u_vector<AllocEvent>& GetEvents() const override;
void reset() override;
void sampleTenured(void* addr, uint32_t size) override;
void sampleNursery(void* addr, uint32_t size) override;
void markTenuredStart() override;
void markTenured(void* addr) override;
void sweepTenured() override;
void sweepNursery() override;
void moveNurseryToTenured(void* addrOld, void* addrNew) override;
private:
void SampleInternal(void* addr, uint32_t size, AllocMap& table);
PRLock* mLock;
bool mMarking;
AllocMap mNurseryEntries;
AllocMap mTenuredEntriesFG;
AllocMap mTenuredEntriesBG;
u_vector<AllocEvent> mAllocEvents;
CompactTraceTable mTraceTable;
};
} // namespace mozilla
#endif // memory_profiler_GCHeapProfilerImpl_h

View File

@ -1,46 +1,319 @@
/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
/* vim: set ts=2 et sw=2 tw=80: */
/* This Source Code Form is subject to the terms of the Mozilla Public /* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this file, * License, v. 2.0. If a copy of the MPL was not distributed with this file,
* You can obtain one at http://mozilla.org/MPL/2.0/. */ * You can obtain one at http://mozilla.org/MPL/2.0/. */
#include "MemoryProfiler.h" #include "MemoryProfiler.h"
#include <cmath>
#include <cstdlib>
#include "mozilla/Compiler.h"
#include "mozilla/Move.h"
#include "mozilla/TimeStamp.h"
#include "mozilla/unused.h"
#include "GCHeapProfilerImpl.h"
#include "GeckoProfiler.h"
#include "NativeProfilerImpl.h"
#include "UncensoredAllocator.h"
#include "js/TypeDecls.h"
#include "jsfriendapi.h"
#include "nsIDOMClassInfo.h" #include "nsIDOMClassInfo.h"
#include "nsIGlobalObject.h" #include "nsIGlobalObject.h"
#include "js/TypeDecls.h"
#include "xpcprivate.h" #include "xpcprivate.h"
struct JSRuntime;
#if MOZ_USING_STLPORT
namespace std {
template<class T>
struct hash<T*>
{
size_t operator()(T* v) const
{
return hash<void*>()(static_cast<void*>(v));
}
};
} // namespace std
#endif
namespace mozilla {
#define MEMORY_PROFILER_SAMPLE_SIZE 65536
#define BACKTRACE_BUFFER_SIZE 16384
ProfilerImpl::ProfilerImpl()
: mSampleSize(MEMORY_PROFILER_SAMPLE_SIZE)
{
mLog1minusP = std::log(1.0 - 1.0 / mSampleSize);
mRemainingBytes = std::floor(std::log(1.0 - DRandom()) / mLog1minusP);
}
u_vector<u_string>
ProfilerImpl::GetStacktrace()
{
u_vector<u_string> trace;
char* output = (char*)u_malloc(BACKTRACE_BUFFER_SIZE);
profiler_get_backtrace_noalloc(output, BACKTRACE_BUFFER_SIZE);
for (const char* p = output; *p; p += strlen(p) + 1) {
trace.push_back(p);
}
u_free(output);
return trace;
}
// Generate a random number in [0, 1).
double
ProfilerImpl::DRandom()
{
return double(((uint64_t(std::rand()) & ((1 << 26) - 1)) << 27) +
(uint64_t(std::rand()) & ((1 << 27) - 1)))
/ (uint64_t(1) << 53);
}
size_t
ProfilerImpl::AddBytesSampled(uint32_t aBytes)
{
size_t nSamples = 0;
while (mRemainingBytes <= aBytes) {
mRemainingBytes += std::floor(std::log(1.0 - DRandom()) / mLog1minusP);
nSamples++;
}
mRemainingBytes -= aBytes;
return nSamples;
}
NS_IMPL_ISUPPORTS(MemoryProfiler, nsIMemoryProfiler) NS_IMPL_ISUPPORTS(MemoryProfiler, nsIMemoryProfiler)
MemoryProfiler::MemoryProfiler() PRLock* MemoryProfiler::sLock;
{ uint32_t MemoryProfiler::sProfileRuntimeCount;
/* member initializers and constructor code */ NativeProfilerImpl* MemoryProfiler::sNativeProfiler;
} JSRuntimeProfilerMap* MemoryProfiler::sJSRuntimeProfilerMap;
TimeStamp MemoryProfiler::sStartTime;
MemoryProfiler::~MemoryProfiler() void
MemoryProfiler::InitOnce()
{ {
/* destructor code */ MOZ_ASSERT(NS_IsMainThread());
static bool initialized = false;
if (!initialized) {
InitializeMallocHook();
sLock = PR_NewLock();
sProfileRuntimeCount = 0;
sJSRuntimeProfilerMap = new JSRuntimeProfilerMap();
std::srand(PR_Now());
bool ignored;
sStartTime = TimeStamp::ProcessCreation(ignored);
initialized = true;
}
} }
NS_IMETHODIMP NS_IMETHODIMP
MemoryProfiler::StartProfiler() MemoryProfiler::StartProfiler()
{ {
InitOnce();
JSRuntime* runtime = XPCJSRuntime::Get()->Runtime();
AutoMPLock lock(sLock);
if (!(*sJSRuntimeProfilerMap)[runtime].mEnabled) {
(*sJSRuntimeProfilerMap)[runtime].mEnabled = true;
if (sProfileRuntimeCount == 0) {
js::EnableRuntimeProfilingStack(runtime, true);
if (!sNativeProfiler) {
sNativeProfiler = new NativeProfilerImpl();
}
MemProfiler::SetNativeProfiler(sNativeProfiler);
}
GCHeapProfilerImpl* gp = new GCHeapProfilerImpl();
(*sJSRuntimeProfilerMap)[runtime].mProfiler = gp;
MemProfiler::GetMemProfiler(runtime)->start(gp);
if (sProfileRuntimeCount == 0) {
EnableMallocHook(sNativeProfiler);
}
sProfileRuntimeCount++;
}
return NS_OK; return NS_OK;
} }
NS_IMETHODIMP NS_IMETHODIMP
MemoryProfiler::StopProfiler() MemoryProfiler::StopProfiler()
{ {
InitOnce();
JSRuntime* runtime = XPCJSRuntime::Get()->Runtime();
AutoMPLock lock(sLock);
if ((*sJSRuntimeProfilerMap)[runtime].mEnabled) {
MemProfiler::GetMemProfiler(runtime)->stop();
if (--sProfileRuntimeCount == 0) {
DisableMallocHook();
MemProfiler::SetNativeProfiler(nullptr);
js::EnableRuntimeProfilingStack(runtime, false);
}
(*sJSRuntimeProfilerMap)[runtime].mEnabled = false;
}
return NS_OK; return NS_OK;
} }
NS_IMETHODIMP NS_IMETHODIMP
MemoryProfiler::ResetProfiler() MemoryProfiler::ResetProfiler()
{ {
InitOnce();
JSRuntime* runtime = XPCJSRuntime::Get()->Runtime();
AutoMPLock lock(sLock);
if (!(*sJSRuntimeProfilerMap)[runtime].mEnabled) {
delete (*sJSRuntimeProfilerMap)[runtime].mProfiler;
(*sJSRuntimeProfilerMap)[runtime].mProfiler = nullptr;
}
if (sProfileRuntimeCount == 0) {
delete sNativeProfiler;
sNativeProfiler = nullptr;
}
return NS_OK; return NS_OK;
} }
NS_IMETHODIMP struct MergedTraces
MemoryProfiler::GetResults(JSContext *cx, JS::MutableHandle<JS::Value> aResult)
{ {
u_vector<u_string> mNames;
u_vector<TrieNode> mTraces;
u_vector<AllocEvent> mEvents;
};
// Merge events and corresponding traces and names.
static MergedTraces
MergeResults(u_vector<u_string> names0, u_vector<TrieNode> traces0, u_vector<AllocEvent> events0,
u_vector<u_string> names1, u_vector<TrieNode> traces1, u_vector<AllocEvent> events1)
{
NodeIndexMap<u_string> names;
NodeIndexMap<TrieNode> traces;
u_vector<AllocEvent> events;
u_vector<size_t> names1Tonames0;
u_vector<size_t> traces1Totraces0(1, 0);
// Merge names.
for (auto& i: names0) {
names.Insert(i);
}
for (auto& i: names1) {
names1Tonames0.push_back(names.Insert(i));
}
// Merge traces. Note that traces1[i].parentIdx < i for all i > 0.
for (auto& i: traces0) {
traces.Insert(i);
}
for (size_t i = 1; i < traces1.size(); i++) {
TrieNode node = traces1[i];
node.parentIdx = traces1Totraces0[node.parentIdx];
node.nameIdx = names1Tonames0[node.nameIdx];
traces1Totraces0.push_back(traces.Insert(node));
}
// Update events1
for (auto& i: events1) {
i.mTraceIdx = traces1Totraces0[i.mTraceIdx];
}
// Merge the events according to timestamps.
auto p0 = events0.begin();
auto p1 = events1.begin();
while (p0 != events0.end() && p1 != events1.end()) {
if (p0->mTimestamp < p1->mTimestamp) {
events.push_back(*p0++);
} else {
events.push_back(*p1++);
}
}
while (p0 != events0.end()) {
events.push_back(*p0++);
}
while (p1 != events1.end()) {
events.push_back(*p1++);
}
return MergedTraces{names.Serialize(), traces.Serialize(), Move(events)};
}
NS_IMETHODIMP
MemoryProfiler::GetResults(JSContext* cx, JS::MutableHandle<JS::Value> aResult)
{
InitOnce();
JSRuntime* runtime = XPCJSRuntime::Get()->Runtime();
AutoMPLock lock(sLock);
// Getting results when the profiler is running is not allowed.
if (sProfileRuntimeCount > 0) {
return NS_OK;
}
// Return immediately when native profiler does not exist.
if (!sNativeProfiler) {
return NS_OK;
}
// Return immediately when there's no result in current runtime.
if (!(*sJSRuntimeProfilerMap)[runtime].mProfiler) {
return NS_OK;
}
GCHeapProfilerImpl* gp = (*sJSRuntimeProfilerMap)[runtime].mProfiler;
auto results = MergeResults(gp->GetNames(), gp->GetTraces(), gp->GetEvents(),
sNativeProfiler->GetNames(),
sNativeProfiler->GetTraces(),
sNativeProfiler->GetEvents());
u_vector<u_string> names = Move(results.mNames);
u_vector<TrieNode> traces = Move(results.mTraces);
u_vector<AllocEvent> events = Move(results.mEvents);
JS::RootedObject jsnames(cx, JS_NewArrayObject(cx, names.size()));
JS::RootedObject jstraces(cx, JS_NewArrayObject(cx, traces.size()));
JS::RootedObject jsevents(cx, JS_NewArrayObject(cx, events.size()));
for (size_t i = 0; i < names.size(); i++) {
JS::RootedString name(cx, JS_NewStringCopyZ(cx, names[i].c_str()));
JS_SetElement(cx, jsnames, i, name);
}
for (size_t i = 0; i < traces.size(); i++) {
JS::RootedObject tn(cx, JS_NewPlainObject(cx));
JS::RootedValue nameIdx(cx, JS_NumberValue(traces[i].nameIdx));
JS::RootedValue parentIdx(cx, JS_NumberValue(traces[i].parentIdx));
JS_SetProperty(cx, tn, "nameIdx", nameIdx);
JS_SetProperty(cx, tn, "parentIdx", parentIdx);
JS_SetElement(cx, jstraces, i, tn);
}
int i = 0;
for (auto ent: events) {
if (ent.mSize == 0) {
continue;
}
MOZ_ASSERT(!sStartTime.IsNull());
double time = (sStartTime - ent.mTimestamp).ToMilliseconds();
JS::RootedObject tn(cx, JS_NewPlainObject(cx));
JS::RootedValue size(cx, JS_NumberValue(ent.mSize));
JS::RootedValue traceIdx(cx, JS_NumberValue(ent.mTraceIdx));
JS::RootedValue timestamp(cx, JS_NumberValue(time));
JS_SetProperty(cx, tn, "size", size);
JS_SetProperty(cx, tn, "traceIdx", traceIdx);
JS_SetProperty(cx, tn, "timestamp", timestamp);
JS_SetElement(cx, jsevents, i++, tn);
}
JS_SetArrayLength(cx, jsevents, i);
JS::RootedObject result(cx, JS_NewPlainObject(cx));
JS::RootedValue objnames(cx, ObjectOrNullValue(jsnames));
JS_SetProperty(cx, result, "names", objnames);
JS::RootedValue objtraces(cx, ObjectOrNullValue(jstraces));
JS_SetProperty(cx, result, "traces", objtraces);
JS::RootedValue objevents(cx, ObjectOrNullValue(jsevents));
JS_SetProperty(cx, result, "events", objevents);
aResult.setObject(*result);
return NS_OK; return NS_OK;
} }
} // namespace mozilla

View File

@ -9,7 +9,12 @@
#include "nsIMemoryProfiler.h" #include "nsIMemoryProfiler.h"
#include "nsString.h" #include "mozilla/TimeStamp.h"
#include "CompactTraceTable.h"
#include "UncensoredAllocator.h"
#include "prlock.h"
#define MEMORY_PROFILER_CID \ #define MEMORY_PROFILER_CID \
{ 0xf976eaa2, 0xcc1f, 0x47ee, \ { 0xf976eaa2, 0xcc1f, 0x47ee, \
@ -17,19 +22,131 @@
#define MEMORY_PROFILER_CONTRACT_ID "@mozilla.org/tools/memory-profiler;1" #define MEMORY_PROFILER_CONTRACT_ID "@mozilla.org/tools/memory-profiler;1"
class MemoryProfiler : public nsIMemoryProfiler struct JSRuntime;
namespace mozilla {
class NativeProfilerImpl;
class GCHeapProfilerImpl;
struct ProfilerForJSRuntime
{
ProfilerForJSRuntime()
: mProfiler(nullptr)
, mEnabled(false)
{}
GCHeapProfilerImpl* mProfiler;
bool mEnabled;
};
using JSRuntimeProfilerMap = u_unordered_map<JSRuntime*, ProfilerForJSRuntime>;
class MemoryProfiler final : public nsIMemoryProfiler
{ {
public: public:
NS_DECL_ISUPPORTS NS_DECL_ISUPPORTS
NS_DECL_NSIMEMORYPROFILER NS_DECL_NSIMEMORYPROFILER
MemoryProfiler();
private: private:
virtual ~MemoryProfiler(); static void InitOnce();
~MemoryProfiler() {}
protected: // The accesses to other static member are guarded by sLock and
/* additional members */ // sProfileRuntimeCount.
static PRLock* sLock;
static uint32_t sProfileRuntimeCount;
static NativeProfilerImpl* sNativeProfiler;
static JSRuntimeProfilerMap* sJSRuntimeProfilerMap;
static TimeStamp sStartTime;
}; };
// Allocation events to be reported.
struct AllocEvent {
TimeStamp mTimestamp;
// index to a stacktrace singleton.
uint32_t mTraceIdx;
// Allocation size
int32_t mSize;
AllocEvent(uint32_t aTraceIdx, int32_t aSize, TimeStamp aTimestamp)
: mTimestamp(aTimestamp)
, mTraceIdx(aTraceIdx)
, mSize(aSize)
{}
};
// Index to allocation events but also a mark bit to be GC-able.
struct AllocEntry {
uint32_t mEventIdx : 31;
bool mMarked : 1;
AllocEntry(int aEventIdx)
: mEventIdx(aEventIdx)
, mMarked(false)
{}
};
using AllocMap = u_unordered_map<void*, AllocEntry>;
class ProfilerImpl
{
public:
static u_vector<u_string> GetStacktrace();
static double DRandom();
ProfilerImpl();
virtual u_vector<u_string> GetNames() const = 0;
virtual u_vector<TrieNode> GetTraces() const = 0;
virtual const u_vector<AllocEvent>& GetEvents() const = 0;
protected:
/**
* The sampler generates a random variable which conforms to a geometric
* distribution of probability p = 1 / mSampleSize to calculate the
* next-to-be-sampled byte directly; It avoids rolling a dice on each byte.
*
* Let Bn denote a Bernoulli process with first success on n-th trial, the
* cumulative distribution function of Bn is Cn = 1 - (1 - p) ^ n.
* Let U denote a uniformly distributed random variable in [0, 1).
* A geometric random variable can be generated by Cn's reverse function:
* G = floor(log(1 - U) / log(1 - p)).
*
* @param aSize the number of bytes seen
* @return the number of events sampled
*/
size_t AddBytesSampled(uint32_t aBytes);
uint32_t mSampleSize;
private:
uint32_t mRemainingBytes;
double mLog1minusP;
};
/*
* This class is used to make sure the profile data is only accessed
* on one thread at a time. Don't use mozilla::Mutex because we don't
* want to allocate memory.
*/
class AutoMPLock
{
public:
explicit AutoMPLock(PRLock* aLock)
{
MOZ_ASSERT(aLock);
mLock = aLock;
PR_Lock(mLock);
}
~AutoMPLock()
{
PR_Unlock(mLock);
}
private:
PRLock* mLock;
};
} // namespace mozilla
#endif #endif

View File

@ -0,0 +1,82 @@
/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
/* vim: set ts=2 et sw=2 tw=80: */
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this file,
* You can obtain one at http://mozilla.org/MPL/2.0/. */
#include "NativeProfilerImpl.h"
#include "mozilla/TimeStamp.h"
#include "prlock.h"
namespace mozilla {
NativeProfilerImpl::NativeProfilerImpl()
{
mLock = PR_NewLock();
}
NativeProfilerImpl::~NativeProfilerImpl()
{
if (mLock) {
PR_DestroyLock(mLock);
}
}
u_vector<u_string>
NativeProfilerImpl::GetNames() const
{
return mTraceTable.GetNames();
}
u_vector<TrieNode>
NativeProfilerImpl::GetTraces() const
{
return mTraceTable.GetTraces();
}
const u_vector<AllocEvent>&
NativeProfilerImpl::GetEvents() const
{
return mAllocEvents;
}
void
NativeProfilerImpl::reset()
{
mTraceTable.Reset();
mAllocEvents.clear();
mNativeEntries.clear();
}
void
NativeProfilerImpl::sampleNative(void* addr, uint32_t size)
{
AutoMPLock lock(mLock);
size_t nSamples = AddBytesSampled(size);
if (nSamples > 0) {
u_vector<u_string> trace = GetStacktrace();
AllocEvent ai(mTraceTable.Insert(trace), nSamples * mSampleSize, TimeStamp::Now());
mNativeEntries.insert(std::make_pair(addr, AllocEntry(mAllocEvents.size())));
mAllocEvents.push_back(ai);
}
}
void
NativeProfilerImpl::removeNative(void* addr)
{
AutoMPLock lock(mLock);
auto res = mNativeEntries.find(addr);
if (res == mNativeEntries.end()) {
return;
}
AllocEvent& oldEvent = mAllocEvents[res->second.mEventIdx];
AllocEvent newEvent(oldEvent.mTraceIdx, -oldEvent.mSize, TimeStamp::Now());
mAllocEvents.push_back(newEvent);
mNativeEntries.erase(res);
}
} // namespace mozilla

View File

@ -0,0 +1,43 @@
/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
/* vim: set ts=2 et sw=2 tw=80: */
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this file,
* You can obtain one at http://mozilla.org/MPL/2.0/. */
#ifndef memory_profiler_NativeProfilerImpl_h
#define memory_profiler_NativeProfilerImpl_h
#include "CompactTraceTable.h"
#include "MemoryProfiler.h"
#include "jsfriendapi.h"
struct PRLock;
namespace mozilla {
class NativeProfilerImpl final : public NativeProfiler
, public ProfilerImpl
{
public:
NativeProfilerImpl();
~NativeProfilerImpl() override;
u_vector<u_string> GetNames() const override;
u_vector<TrieNode> GetTraces() const override;
const u_vector<AllocEvent>& GetEvents() const override;
void reset() override;
void sampleNative(void* addr, uint32_t size) override;
void removeNative(void* addr) override;
private:
PRLock* mLock;
AllocMap mNativeEntries;
u_vector<AllocEvent> mAllocEvents;
CompactTraceTable mTraceTable;
};
} // namespace mozilla
#endif // memory_profiler_NativeProfilerImpl_h

View File

@ -0,0 +1,105 @@
/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
/* vim: set ts=2 et sw=2 tw=80: */
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this file,
* You can obtain one at http://mozilla.org/MPL/2.0/. */
#include "UncensoredAllocator.h"
#include "mozilla/unused.h"
#include "jsfriendapi.h"
#ifdef MOZ_REPLACE_MALLOC
#include "replace_malloc_bridge.h"
#endif
namespace mozilla {
static bool sMemoryHookEnabled = false;
static malloc_hook_table_t sMallocHook;
static NativeProfiler* sNativeProfiler;
static void* (*uncensored_malloc)(size_t size);
static void (*uncensored_free)(void* ptr);
static void*
SampleNative(void* addr, size_t size)
{
if (sMemoryHookEnabled) {
sNativeProfiler->sampleNative(addr, size);
}
return addr;
}
static void
RemoveNative(void* addr)
{
if (sMemoryHookEnabled) {
sNativeProfiler->removeNative(addr);
}
}
void*
u_malloc(size_t size)
{
if (uncensored_malloc) {
return uncensored_malloc(size);
} else {
return malloc(size);
}
}
void
u_free(void* ptr)
{
if (uncensored_free) {
uncensored_free(ptr);
} else {
free(ptr);
}
}
void InitializeMallocHook()
{
#ifdef MOZ_REPLACE_MALLOC
sMallocHook.free_hook = RemoveNative;
sMallocHook.malloc_hook = SampleNative;
ReplaceMallocBridge* bridge = ReplaceMallocBridge::Get(3);
if (bridge) {
mozilla::unused << bridge->RegisterHook("memory-profiler", nullptr, nullptr);
}
#endif
if (!uncensored_malloc && !uncensored_free) {
uncensored_malloc = malloc;
uncensored_free = free;
}
}
void EnableMallocHook(NativeProfiler* aNativeProfiler)
{
#ifdef MOZ_REPLACE_MALLOC
ReplaceMallocBridge* bridge = ReplaceMallocBridge::Get(3);
if (bridge) {
const malloc_table_t* alloc_funcs =
bridge->RegisterHook("memory-profiler", nullptr, &sMallocHook);
if (alloc_funcs) {
uncensored_malloc = alloc_funcs->malloc;
uncensored_free = alloc_funcs->free;
sNativeProfiler = aNativeProfiler;
sMemoryHookEnabled = true;
}
}
#endif
}
void DisableMallocHook()
{
#ifdef MOZ_REPLACE_MALLOC
ReplaceMallocBridge* bridge = ReplaceMallocBridge::Get(3);
if (bridge) {
bridge->RegisterHook("memory-profiler", nullptr, nullptr);
sMemoryHookEnabled = false;
}
#endif
}
} // namespace mozilla

View File

@ -0,0 +1,103 @@
/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
/* vim: set ts=2 et sw=2 tw=80: */
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this file,
* You can obtain one at http://mozilla.org/MPL/2.0/. */
#ifndef memory_profiler_UncensoredAllocator_h
#define memory_profiler_UncensoredAllocator_h
#include "mozilla/Compiler.h"
#include <string>
#include <unordered_map>
#include <vector>
class NativeProfiler;
#if MOZ_USING_STLPORT
namespace std {
using tr1::unordered_map;
} // namespace std
#endif
namespace mozilla {
void InitializeMallocHook();
void EnableMallocHook(NativeProfiler* aNativeProfiler);
void DisableMallocHook();
void* u_malloc(size_t size);
void u_free(void* ptr);
#ifdef MOZ_REPLACE_MALLOC
template<class Tp>
struct UncensoredAllocator
{
typedef size_t size_type;
typedef ptrdiff_t difference_type;
typedef Tp* pointer;
typedef const Tp* const_pointer;
typedef Tp& reference;
typedef const Tp& const_reference;
typedef Tp value_type;
UncensoredAllocator() {}
template<class T>
UncensoredAllocator(const UncensoredAllocator<T>&) {}
template<class Other>
struct rebind
{
typedef UncensoredAllocator<Other> other;
};
Tp* allocate(size_t n)
{
return reinterpret_cast<Tp*>(u_malloc(n * sizeof(Tp)));
}
void deallocate(Tp* p, size_t n)
{
u_free(reinterpret_cast<void*>(p));
}
void construct(Tp* p, const Tp& val)
{
new ((void*)p) Tp(val);
}
void destroy(Tp* p)
{
p->Tp::~Tp();
}
bool operator==(const UncensoredAllocator& rhs) const
{
return true;
}
bool operator!=(const UncensoredAllocator& rhs) const
{
return false;
}
size_type max_size() const
{
return static_cast<size_type>(-1) / sizeof(Tp);
}
};
using u_string =
std::basic_string<char, std::char_traits<char>, UncensoredAllocator<char>>;
template<typename T>
using u_vector = std::vector<T, UncensoredAllocator<T>>;
template<typename K, typename V, typename H = std::hash<K>>
using u_unordered_map =
std::unordered_map<K, V, H, std::equal_to<K>, UncensoredAllocator<std::pair<K, V>>>;
#else
using u_string = std::string;
using u_vector = std::vector;
using u_unordered_map = std::unordered_map;
#endif
} // namespace mozilla
#endif // memory_profiler_UncensoredAllocator_h

View File

@ -12,10 +12,12 @@ if CONFIG['MOZ_ENABLE_PROFILER_SPS']:
'nsIMemoryProfiler.idl', 'nsIMemoryProfiler.idl',
] ]
# This file cannot be built in unified mode because of name clashes with mozglue headers on Android. UNIFIED_SOURCES += [
SOURCES += [ 'GCHeapProfilerImpl.cpp',
'MemoryProfiler.cpp', 'MemoryProfiler.cpp',
'NativeProfilerImpl.cpp',
'nsMemoryProfilerFactory.cpp', 'nsMemoryProfilerFactory.cpp',
'UncensoredAllocator.cpp',
] ]
LOCAL_INCLUDES += [ LOCAL_INCLUDES += [

View File

@ -5,19 +5,68 @@
#include "nsISupports.idl" #include "nsISupports.idl"
[scriptable, uuid(f70db623-3bd5-4719-b8ce-4c6b350a925c)] /**
* The memory profiler samples allocation events. An allocation event
* includes a type (what and at where is going to be allocated), a
* size, a timestamp and the corresponding stack trace. Free events
* are also tracked. For managed languages, namely languages relying
* on garbage collection, a free event is generated when an object is
* reclaimed by the garbage collector. These sampled events can be
* used to approximate the full history of allocations afterwards.
* That means we can get various memory profiles of a program in
* different perspectives by post-processing the history in different
* ways. The profiler is designed at the very beginning to support not
* only JavaScript but also native codes. Naturally, not only
* JavaScript objects but also native allocations are tracked.
*
* The result returned is the sampled allocation event traces in a
* compact format. The events is sorted according to the timestamp
* when the event happened. Each event has a trace index pointing to
* the traces table. Each trace entry has a name index pointing to the
* names table and a parent index pointing to the parent trace in the
* traces table. By following the trace index one could rebuild the
* complete backtrace of an allocation event.
*
* [ Events ]
* +-------+-------+ +-------+
* | Size | Size | | Size |
* |-------|-------| |-------|
* | Time | Time |......| Time |
* |-------|-------| |-------|
* +-- Trace | Trace | | Trace |
* | +-------+-------+ +-------+
* |
* | [ Traces ]
* +->--------+--------+ +--------+ +--------+
* -| Name | Name | | Name | | Name |
* / |--------|--------|...|--------|...|--------|
* | | Parent | Parent | | Parent | | Parent |
* | +---|----+----^--++ +--^--|--+ +---^----+
* | | | | | | |
* | +---------+ +-------+ +----------+
* | [ Names ]
* | +-----------------+-----------------+
* +-> Function name | Function name |
* | & line numbers | & line numbers |......
* +-----------------+-----------------+
*
*/
[scriptable, uuid(1e10e7a9-bc05-4878-a687-36c9ea4428b1)]
interface nsIMemoryProfiler : nsISupports interface nsIMemoryProfiler : nsISupports
{ {
void startProfiler(); void startProfiler();
void stopProfiler(); void stopProfiler();
void resetProfiler(); void resetProfiler();
// Get results in an object which contains three tables: /**
// { * Get results in an object which contains three tables:
// names, // an array of function names and positions * {
// traces, // an array of {nameIdx, parentIdx} * names, // an array of function names and positions
// events, // an array of {size, timestamp, traceIdx} * traces, // an array of {nameIdx, parentIdx}
// } * events, // an array of {size, timestamp, traceIdx}
* }
* Should only be called after stopProfiler.
*/
[implicit_jscontext] [implicit_jscontext]
jsval getResults(); jsval getResults();
}; };

View File

@ -7,6 +7,8 @@
#include "nsCOMPtr.h" #include "nsCOMPtr.h"
#include "MemoryProfiler.h" #include "MemoryProfiler.h"
using mozilla::MemoryProfiler;
NS_GENERIC_FACTORY_CONSTRUCTOR(MemoryProfiler) NS_GENERIC_FACTORY_CONSTRUCTOR(MemoryProfiler)
NS_DEFINE_NAMED_CID(MEMORY_PROFILER_CID); NS_DEFINE_NAMED_CID(MEMORY_PROFILER_CID);