1. Fix jsdhash/pldhash.[ch] so that add/remove cycles for a given k do not

create a long chain of removed sentinels.  Also, when adding k to a table
    where k is not mapped, but where k hashes to a chain that includes removed
    sentinels, recycle the first removed sentinel in the chain for k's entry.
2.  Cache cx->resolving till js_DestroyContext, to avoid high JSDHashTable
    new/destroy overhead in js_LookupProperty.
3.  Add NS_TraceStack to nsTraceMalloc.[ch] and clean the .c file up a bit.
This commit is contained in:
brendan%mozilla.org 2001-09-12 06:39:31 +00:00
parent 1db0a058f3
commit 3efd0e519f
11 changed files with 575 additions and 410 deletions

View File

@ -245,6 +245,12 @@ js_DestroyContext(JSContext *cx, JSGCMode gcmode)
JS_free(cx, temp);
}
/* Destroy the resolve recursion damper. */
if (cx->resolving) {
JS_DHashTableDestroy(cx->resolving);
cx->resolving = NULL;
}
/* Finally, free cx itself. */
free(cx);
}

View File

@ -44,6 +44,9 @@
#include "jsutil.h" /* for JS_ASSERT */
#ifdef JS_DHASHMETER
# if defined MOZILLA_CLIENT && defined DEBUG_brendan
# include "nsTraceMalloc.h"
# endif
# define METER(x) x
#else
# define METER(x) /* nothing */
@ -169,7 +172,7 @@ JS_DHashTableInit(JSDHashTable *table, JSDHashTableOps *ops, void *data,
fprintf(stderr,
"jsdhash: for the table at address 0x%p, the given entrySize"
" of %lu %s favors chaining over double hashing.\n",
table,
(void *)table,
(unsigned long) entrySize,
(entrySize > 16 * sizeof(void*)) ? "definitely" : "probably");
}
@ -203,12 +206,29 @@ JS_DHashTableInit(JSDHashTable *table, JSDHashTableOps *ops, void *data,
#define HASH1(hash0, shift) ((hash0) >> (shift))
#define HASH2(hash0,log2,shift) ((((hash0) << (log2)) >> (shift)) | 1)
/* Reserve keyHash 0 for free entries and 1 for removed-entry sentinels. */
/*
* Reserve keyHash 0 for free entries and 1 for removed-entry sentinels. Note
* that a removed-entry sentinel need be stored only if the removed entry had
* a colliding entry added after it. Therefore we can use 1 as the collision
* flag in addition to the removed-entry sentinel value. Multiplicative hash
* uses the high order bits of keyHash, so this least-significant reservation
* should not hurt the hash function's effectiveness much.
*
* If you change any of these magic numbers, also update JS_DHASH_ENTRY_IS_LIVE
* in jsdhash.h. It used to be private to jsdhash.c, but then became public to
* assist iterator writers who inspect table->entryStore directly.
*/
#define COLLISION_FLAG ((JSDHashNumber) 1)
#define MARK_ENTRY_FREE(entry) ((entry)->keyHash = 0)
#define MARK_ENTRY_REMOVED(entry) ((entry)->keyHash = 1)
#define ENTRY_IS_REMOVED(entry) ((entry)->keyHash == 1)
#define ENTRY_IS_LIVE(entry) JS_DHASH_ENTRY_IS_LIVE(entry)
#define ENSURE_LIVE_KEYHASH(hash0) if (hash0 < 2) hash0 -= 2; else (void)0
/* Match an entry's keyHash against an unstored one computed from a key. */
#define MATCH_ENTRY_KEYHASH(entry,hash0) \
(((entry)->keyHash & ~COLLISION_FLAG) == (hash0))
/* Compute the address of the indexed entry in table. */
#define ADDRESS_ENTRY(table, index) \
((JSDHashEntryHdr *)((table)->entryStore + (index) * (table)->entrySize))
@ -220,6 +240,18 @@ JS_DHashTableFinish(JSDHashTable *table)
uint32 entrySize;
JSDHashEntryHdr *entry;
#ifdef DEBUG_brendan
static FILE *dumpfp = NULL;
if (!dumpfp) dumpfp = fopen("/tmp/jsdhash.bigdump", "w");
if (dumpfp) {
#ifdef MOZILLA_CLIENT
NS_TraceStack(1, dumpfp);
#endif
JS_DHashTableDumpMeter(table, NULL, dumpfp);
fputc('\n', dumpfp);
}
#endif
/* Call finalize before clearing entries. */
table->ops->finalize(table);
@ -241,15 +273,17 @@ JS_DHashTableFinish(JSDHashTable *table)
}
static JSDHashEntryHdr *
SearchTable(JSDHashTable *table, const void *key, JSDHashNumber keyHash)
SearchTable(JSDHashTable *table, const void *key, JSDHashNumber keyHash,
JSDHashOperator op)
{
JSDHashNumber hash1, hash2;
int hashShift, sizeLog2;
JSDHashEntryHdr *entry;
JSDHashEntryHdr *entry, *firstRemoved;
JSDHashMatchEntry matchEntry;
uint32 sizeMask;
METER(table->stats.searches++);
JS_ASSERT(!(keyHash & COLLISION_FLAG));
/* Compute the primary hash address. */
hashShift = table->hashShift;
@ -264,7 +298,7 @@ SearchTable(JSDHashTable *table, const void *key, JSDHashNumber keyHash)
/* Hit: return entry. */
matchEntry = table->ops->matchEntry;
if (entry->keyHash == keyHash && matchEntry(table, entry, key)) {
if (MATCH_ENTRY_KEYHASH(entry, keyHash) && matchEntry(table, entry, key)) {
METER(table->stats.hits++);
return entry;
}
@ -273,19 +307,54 @@ SearchTable(JSDHashTable *table, const void *key, JSDHashNumber keyHash)
sizeLog2 = table->sizeLog2;
hash2 = HASH2(keyHash, sizeLog2, hashShift);
sizeMask = JS_BITMASK(sizeLog2);
do {
/* Save the first removed entry pointer so JS_DHASH_ADD can recycle it. */
if (ENTRY_IS_REMOVED(entry)) {
firstRemoved = entry;
} else {
firstRemoved = NULL;
if (op == JS_DHASH_ADD)
entry->keyHash |= COLLISION_FLAG;
}
for (;;) {
METER(table->stats.steps++);
hash1 -= hash2;
hash1 &= sizeMask;
entry = ADDRESS_ENTRY(table, hash1);
if (JS_DHASH_ENTRY_IS_FREE(entry)) {
#ifdef DEBUG_brendan
extern char *getenv(const char *);
static JSBool gotFirstRemovedEnvar = JS_FALSE;
static char *doFirstRemoved = NULL;
if (!gotFirstRemovedEnvar) {
doFirstRemoved = getenv("DHASH_DO_FIRST_REMOVED");
gotFirstRemovedEnvar = JS_TRUE;
}
if (!doFirstRemoved) return entry;
#endif
METER(table->stats.misses++);
return (firstRemoved && op == JS_DHASH_ADD) ? firstRemoved : entry;
}
if (MATCH_ENTRY_KEYHASH(entry, keyHash) &&
matchEntry(table, entry, key)) {
METER(table->stats.hits++);
return entry;
}
} while (entry->keyHash != keyHash || !matchEntry(table, entry, key));
METER(table->stats.hits++);
return entry;
if (ENTRY_IS_REMOVED(entry)) {
if (!firstRemoved)
firstRemoved = entry;
} else {
if (op == JS_DHASH_ADD)
entry->keyHash |= COLLISION_FLAG;
}
}
/* NOTREACHED */
return NULL;
}
static JSBool
@ -328,11 +397,13 @@ ChangeTable(JSDHashTable *table, int deltaLog2)
for (i = 0; i < oldCapacity; i++) {
oldEntry = (JSDHashEntryHdr *)oldEntryAddr;
if (ENTRY_IS_LIVE(oldEntry)) {
oldEntry->keyHash &= ~COLLISION_FLAG;
newEntry = SearchTable(table, getKey(table, oldEntry),
oldEntry->keyHash);
oldEntry->keyHash, JS_DHASH_ADD);
JS_ASSERT(JS_DHASH_ENTRY_IS_FREE(newEntry));
moveEntry(table, oldEntry, newEntry);
newEntry->keyHash = oldEntry->keyHash;
newEntry->keyHash =
oldEntry->keyHash | (newEntry->keyHash & COLLISION_FLAG);
}
oldEntryAddr += entrySize;
}
@ -353,11 +424,12 @@ JS_DHashTableOperate(JSDHashTable *table, const void *key, JSDHashOperator op)
keyHash = table->ops->hashKey(table, key);
ENSURE_LIVE_KEYHASH(keyHash);
keyHash *= JS_DHASH_GOLDEN_RATIO;
keyHash &= ~COLLISION_FLAG;
switch (op) {
case JS_DHASH_LOOKUP:
METER(table->stats.lookups++);
entry = SearchTable(table, key, keyHash);
entry = SearchTable(table, key, keyHash, op);
break;
case JS_DHASH_ADD:
@ -391,10 +463,15 @@ JS_DHashTableOperate(JSDHashTable *table, const void *key, JSDHashOperator op)
* Look for entry after possibly growing, so we don't have to add it,
* then skip it while growing the table and re-add it after.
*/
entry = SearchTable(table, key, keyHash);
if (JS_DHASH_ENTRY_IS_FREE(entry)) {
entry = SearchTable(table, key, keyHash, op);
if (!ENTRY_IS_LIVE(entry)) {
/* Initialize the entry, indicating that it's no longer free. */
METER(table->stats.addMisses++);
if (ENTRY_IS_REMOVED(entry)) {
METER(table->stats.addOverRemoved++);
table->removedCount--;
keyHash |= COLLISION_FLAG;
}
if (table->ops->initEntry)
table->ops->initEntry(table, entry, key);
entry->keyHash = keyHash;
@ -404,8 +481,8 @@ JS_DHashTableOperate(JSDHashTable *table, const void *key, JSDHashOperator op)
break;
case JS_DHASH_REMOVE:
entry = SearchTable(table, key, keyHash);
if (JS_DHASH_ENTRY_IS_BUSY(entry)) {
entry = SearchTable(table, key, keyHash, op);
if (ENTRY_IS_LIVE(entry)) {
/* Clear this entry and mark it as "removed". */
METER(table->stats.removeHits++);
JS_DHashTableRawRemove(table, entry);
@ -432,9 +509,17 @@ JS_DHashTableOperate(JSDHashTable *table, const void *key, JSDHashOperator op)
JS_PUBLIC_API(void)
JS_DHashTableRawRemove(JSDHashTable *table, JSDHashEntryHdr *entry)
{
JSDHashNumber keyHash; /* load first in case clearEntry goofs it */
keyHash = entry->keyHash;
table->ops->clearEntry(table, entry);
MARK_ENTRY_REMOVED(entry);
table->removedCount++;
if (keyHash & COLLISION_FLAG) {
MARK_ENTRY_REMOVED(entry);
table->removedCount++;
} else {
METER(table->stats.removeFrees++);
MARK_ENTRY_FREE(entry);
}
table->entryCount--;
}
@ -503,14 +588,16 @@ JS_DHashTableDumpMeter(JSDHashTable *table, JSDHashEnumerator dump, FILE *fp)
entryAddr += entrySize;
if (!ENTRY_IS_LIVE(entry))
continue;
hash1 = saveHash1 = HASH1(entry->keyHash, table->hashShift);
hash1 = HASH1(entry->keyHash & ~COLLISION_FLAG, table->hashShift);
saveHash1 = hash1;
probe = ADDRESS_ENTRY(table, hash1);
chainLen = 1;
if (probe == entry) {
/* Start of a (possibly unit-length) chain. */
chainCount++;
} else {
hash2 = HASH2(entry->keyHash, table->sizeLog2, table->hashShift);
hash2 = HASH2(entry->keyHash & ~COLLISION_FLAG, table->sizeLog2,
table->hashShift);
do {
chainLen++;
hash1 -= hash2;
@ -527,13 +614,17 @@ JS_DHashTableDumpMeter(JSDHashTable *table, JSDHashEnumerator dump, FILE *fp)
}
entryCount = table->entryCount;
mean = (double)entryCount / chainCount;
variance = chainCount * sqsum - entryCount * entryCount;
if (variance < 0 || chainCount == 1)
variance = 0;
else
variance /= chainCount * (chainCount - 1);
sigma = sqrt(variance);
if (entryCount && chainCount) {
mean = (double)entryCount / chainCount;
variance = chainCount * sqsum - entryCount * entryCount;
if (variance < 0 || chainCount == 1)
variance = 0;
else
variance /= chainCount * (chainCount - 1);
sigma = sqrt(variance);
} else {
mean = sigma = 0;
}
fprintf(fp, "Double hashing statistics:\n");
fprintf(fp, " table size (in entries): %u\n", tableSize);
@ -549,17 +640,19 @@ JS_DHashTableDumpMeter(JSDHashTable *table, JSDHashEnumerator dump, FILE *fp)
fprintf(fp, " maximum hash chain length: %u\n", maxChainLen);
fprintf(fp, " number of lookups: %u\n", table->stats.lookups);
fprintf(fp, " adds that made a new entry: %u\n", table->stats.addMisses);
fprintf(fp, "adds that recycled removeds: %u\n", table->stats.addOverRemoved);
fprintf(fp, " adds that found an entry: %u\n", table->stats.addHits);
fprintf(fp, " add failures: %u\n", table->stats.addFailures);
fprintf(fp, " useful removes: %u\n", table->stats.removeHits);
fprintf(fp, " useless removes: %u\n", table->stats.removeMisses);
fprintf(fp, "removes that freed an entry: %u\n", table->stats.removeFrees);
fprintf(fp, " removes while enumerating: %u\n", table->stats.removeEnums);
fprintf(fp, " number of grows: %u\n", table->stats.grows);
fprintf(fp, " number of shrinks: %u\n", table->stats.shrinks);
fprintf(fp, " number of compresses: %u\n", table->stats.compresses);
fprintf(fp, "number of enumerate shrinks: %u\n", table->stats.enumShrinks);
if (maxChainLen && hash2) {
if (dump && maxChainLen && hash2) {
fputs("Maximum hash chain:\n", fp);
hash1 = maxChainHash1;
hash2 = maxChainHash2;

View File

@ -41,6 +41,10 @@
JS_BEGIN_EXTERN_C
#ifdef DEBUG_brendan
#define JS_DHASHMETER 1
#endif
/* Minimum table size, or gross entry count (net is at most .75 loaded). */
#ifndef JS_DHASH_MIN_SIZE
#define JS_DHASH_MIN_SIZE 16
@ -179,10 +183,12 @@ struct JSDHashTable {
uint32 misses; /* searches that didn't find key */
uint32 lookups; /* number of JS_DHASH_LOOKUPs */
uint32 addMisses; /* adds that miss, and do work */
uint32 addOverRemoved; /* adds that recycled a removed entry */
uint32 addHits; /* adds that hit an existing entry */
uint32 addFailures; /* out-of-memory during add growth */
uint32 removeHits; /* removes that hit, and do work */
uint32 removeMisses; /* useless removes that miss */
uint32 removeFrees; /* removes that freed entry directly */
uint32 removeEnums; /* removes done by Enumerate */
uint32 grows; /* table expansions */
uint32 shrinks; /* table contractions */

View File

@ -2203,10 +2203,6 @@ js_LookupProperty(JSContext *cx, JSObject *obj, jsid id, JSObject **objp,
JS_DHashTableRawRemove(table, entry);
else
JS_DHashTableOperate(table, &key, JS_DHASH_REMOVE);
if (table->entryCount == 0) {
cx->resolving = NULL;
JS_DHashTableDestroy(table);
}
if (!ok || *propp)
return ok;
}

View File

@ -3,6 +3,7 @@
/ * Double hashing, a la Knuth 6./a\
* GENERATED BY js/src/plify_jsdhash.sed -- DO NOT EDIT!!!
s/jsdhash_h___/pldhash_h___/
s/jsdhash\.bigdump/pldhash.bigdump/
s/jstypes\.h/prtypes.h/
s/jsbit\.h/prbit.h/
s/jsdhash\.h/pldhash.h/

View File

@ -53,6 +53,7 @@
#include <sys/time.h>
#endif
#include "plhash.h"
#include "pratom.h"
#include "prlog.h"
#include "prmon.h"
#include "prprf.h"
@ -232,15 +233,15 @@ typedef void (__stdcall *FREEDEBUGPROC) ( void *, int);
struct AllocationFuncs
{
MALLOCPROC malloc_proc;
CALLOCPROC calloc_proc;
REALLOCPROC realloc_proc;
MALLOCPROC malloc_proc;
CALLOCPROC calloc_proc;
REALLOCPROC realloc_proc;
FREEPROC free_proc;
#ifdef _DEBUG
MALLOCDEBUGPROC malloc_debug_proc;
CALLOCDEBUGPROC calloc_debug_proc;
REALLOCDEBUGPROC realloc_debug_proc;
FREEDEBUGPROC free_debug_proc;
MALLOCDEBUGPROC malloc_debug_proc;
CALLOCDEBUGPROC calloc_debug_proc;
REALLOCDEBUGPROC realloc_debug_proc;
FREEDEBUGPROC free_debug_proc;
#endif
int prevent_reentry;
}gAllocFuncs;
@ -271,6 +272,24 @@ static logfile *logfp = &default_logfile;
static PRMonitor *tmmon = NULL;
static char *sdlogname = NULL; /* filename for shutdown leak log */
/*
* This counter suppresses tracing, in case any tracing code needs to malloc,
* and it must be tested and manipulated only within tmmon.
*/
static uint32 suppress_tracing = 0;
#define TM_ENTER_MONITOR() \
PR_BEGIN_MACRO \
if (tmmon) \
PR_EnterMonitor(tmmon); \
PR_END_MACRO
#define TM_EXIT_MONITOR() \
PR_BEGIN_MACRO \
if (tmmon) \
PR_ExitMonitor(tmmon); \
PR_END_MACRO
/* We don't want more than 32 logfiles open at once, ok? */
typedef uint32 lfd_set;
@ -319,8 +338,7 @@ retry:
static void flush_logfile(logfile *fp)
{
int len, cnt;
int fd;
int len, cnt, fd;
char *bp;
len = fp->pos;
@ -442,11 +460,10 @@ struct callsite {
};
/* NB: these counters are incremented and decremented only within tmmon. */
static uint32 suppress_tracing = 0;
static uint32 library_serial_generator = 0;
static uint32 method_serial_generator = 0;
static uint32 callsite_serial_generator = 0;
static uint32 tmstats_serial_generator = 0;
static uint32 library_serial_generator = 0;
static uint32 method_serial_generator = 0;
static uint32 callsite_serial_generator = 0;
static uint32 tmstats_serial_generator = 0;
/* Root of the tree of callsites, the sum of all (cycle-compressed) stacks. */
static callsite calltree_root = {0, 0, LFD_SET_STATIC_INITIALIZER, NULL, NULL, 0, NULL, NULL, NULL};
@ -534,12 +551,11 @@ static PLHashTable *libraries = NULL;
/* Table mapping method names to logged 'N' record serial numbers. */
static PLHashTable *methods = NULL;
#if XP_WIN32
#ifdef XP_WIN32
#define MAX_STACKFRAMES 256
#define MAX_UNMANGLED_NAME_LEN 256
static callsite *calltree(int skip)
{
logfile *fp = logfp;
@ -570,19 +586,19 @@ static callsite *calltree(int skip)
ok = EnsureSymInitialized();
if (! ok)
return 0;
return 0;
/*
* Get the context information for this thread. That way we will
* know where our sp, fp, pc, etc. are and can fill in the
* STACKFRAME with the initial values.
* Get the context information for this thread. That way we will know
* where our sp, fp, pc, etc. are, and we can fill in the STACKFRAME with
* the initial values.
*/
context.ContextFlags = CONTEXT_FULL;
ok = GetThreadContext(myThread, &context);
if (! ok)
return 0;
return 0;
/* Setup initial stack frame to walk from */
/* Setup initial stack frame from which to walk. */
memset(&(frame[0]), 0, sizeof(frame[0]));
frame[0].AddrPC.Offset = context.Eip;
frame[0].AddrPC.Mode = AddrModeFlat;
@ -590,40 +606,38 @@ static callsite *calltree(int skip)
frame[0].AddrStack.Mode = AddrModeFlat;
frame[0].AddrFrame.Offset = context.Ebp;
frame[0].AddrFrame.Mode = AddrModeFlat;
while (1) {
PIMAGEHLP_SYMBOL symbol = (PIMAGEHLP_SYMBOL) buf;
if (framenum)
{
memcpy(&(frame[framenum]),&(frame[framenum-1]),sizeof(STACKFRAME));
}
for (;;) {
PIMAGEHLP_SYMBOL symbol = (PIMAGEHLP_SYMBOL) buf;
if (framenum)
memcpy(&(frame[framenum]),&(frame[framenum-1]),sizeof(STACKFRAME));
ok = _StackWalk(IMAGE_FILE_MACHINE_I386,
myProcess,
myThread,
&(frame[framenum]),
&context,
0, /* read process memory routine */
_SymFunctionTableAccess, /* function table access
routine */
_SymGetModuleBase, /* module base routine */
0); /* translate address routine */
ok = _StackWalk(IMAGE_FILE_MACHINE_I386,
myProcess,
myThread,
&(frame[framenum]),
&context,
0, /* read process memory hook */
_SymFunctionTableAccess, /* function table access hook */
_SymGetModuleBase, /* module base hook */
0); /* translate address hook */
if (!ok) {
break;
}
if (skip)
{
skip--;
continue;/*skip tells us to skip the first skip amount of stackframes*/
}
if (frame[framenum].AddrPC.Offset == 0)
break;
framenum++;
if (!ok)
break;
if (skip) {
/* skip tells us to skip the first skip amount of stackframes */
skip--;
continue;
}
if (frame[framenum].AddrPC.Offset == 0)
break;
framenum++;
}
depth = framenum;
maxstack = (depth > tmstats.calltree_maxstack);
if (maxstack)
tmstats.calltree_maxstack = depth;
/* Reverse the stack again, finding and building a path in the tree. */
parent = &calltree_root;
do {
@ -675,17 +689,16 @@ static callsite *calltree(int skip)
/*
* Not in tree at all, or not logged to fp: let's find our symbolic
* callsite info. XXX static syms are masked by nearest lower global
* Load up the info for the dll.
*/
if (!_SymGetModuleInfo(myProcess,frame[framenum].AddrPC.Offset,&imagehelp))/*load up the info for the dll*/
{
if (!_SymGetModuleInfo(myProcess,
frame[framenum].AddrPC.Offset,
&imagehelp)) {
DWORD error = GetLastError();
PR_ASSERT(error);
library = "unknown";/* ew */
}
else
{
library = imagehelp.ModuleName;
library = "unknown"; /* XXX mjudge sez "ew!" */
} else {
library = imagehelp.ModuleName;
}
symbol = (PIMAGEHLP_SYMBOL) buf;
@ -842,9 +855,8 @@ static callsite *calltree(int skip)
return site;
}
#else /*XP_UNIX*/
#else
/*XP_UNIX*/
static callsite *calltree(uint32 *bp)
{
logfile *fp = logfp;
@ -1116,8 +1128,7 @@ backtrace(int skip)
return site;
}
#else
/*XP_UNIX*/
#else /*XP_UNIX*/
callsite *
backtrace(int skip)
@ -1153,7 +1164,7 @@ backtrace(int skip)
}
#endif /*XP_WIN32*/
#endif /* XP_UNIX */
typedef struct allocation {
@ -1228,7 +1239,7 @@ static PLHashTable *new_allocations(void)
#define get_allocations() (allocations ? allocations : new_allocations())
#if XP_UNIX
#ifdef XP_UNIX
__ptr_t malloc(size_t size)
{
@ -1238,8 +1249,7 @@ __ptr_t malloc(size_t size)
allocation *alloc;
ptr = __libc_malloc(size);
if (tmmon)
PR_EnterMonitor(tmmon);
TM_ENTER_MONITOR();
tmstats.malloc_calls++;
if (!ptr) {
tmstats.malloc_failures++;
@ -1257,8 +1267,7 @@ __ptr_t malloc(size_t size)
}
}
}
if (tmmon)
PR_ExitMonitor(tmmon);
TM_EXIT_MONITOR();
return ptr;
}
@ -1270,8 +1279,7 @@ __ptr_t calloc(size_t count, size_t size)
allocation *alloc;
ptr = __libc_calloc(count, size);
if (tmmon)
PR_EnterMonitor(tmmon);
TM_ENTER_MONITOR();
tmstats.calloc_calls++;
if (!ptr) {
tmstats.calloc_failures++;
@ -1290,8 +1298,7 @@ __ptr_t calloc(size_t count, size_t size)
}
}
}
if (tmmon)
PR_ExitMonitor(tmmon);
TM_EXIT_MONITOR();
return ptr;
}
@ -1304,8 +1311,7 @@ __ptr_t realloc(__ptr_t ptr, size_t size)
PLHashEntry **hep, *he;
allocation *alloc;
if (tmmon)
PR_EnterMonitor(tmmon);
TM_ENTER_MONITOR();
tmstats.realloc_calls++;
if (suppress_tracing == 0) {
oldptr = ptr;
@ -1323,13 +1329,11 @@ __ptr_t realloc(__ptr_t ptr, size_t size)
}
}
}
if (tmmon)
PR_ExitMonitor(tmmon);
TM_EXIT_MONITOR();
ptr = __libc_realloc(ptr, size);
if (tmmon)
PR_EnterMonitor(tmmon);
TM_ENTER_MONITOR();
if (!ptr && size) {
/*
* When realloc() fails, the original block is not freed or moved, so
@ -1346,8 +1350,9 @@ __ptr_t realloc(__ptr_t ptr, size_t size)
suppress_tracing++;
if (ptr != oldptr) {
/*
* If we're reallocating (not allocating new space by passing
* null to realloc) and realloc moved the block, free oldptr.
* If we're reallocating (not merely allocating new space by
* passing null to realloc) and realloc has moved the block,
* free oldptr.
*/
if (he)
PL_HashTableRemove(allocations, oldptr);
@ -1356,8 +1361,8 @@ __ptr_t realloc(__ptr_t ptr, size_t size)
he = PL_HashTableAdd(allocations, ptr, site);
} else {
/*
* If we haven't yet recorded an allocation (possibly due to a
* temporary memory shortage), do it now.
* If we haven't yet recorded an allocation (possibly due to
* a temporary memory shortage), do it now.
*/
if (!he)
he = PL_HashTableAdd(allocations, ptr, site);
@ -1369,8 +1374,7 @@ __ptr_t realloc(__ptr_t ptr, size_t size)
}
}
}
if (tmmon)
PR_ExitMonitor(tmmon);
TM_EXIT_MONITOR();
return ptr;
}
@ -1380,8 +1384,7 @@ void free(__ptr_t ptr)
callsite *site;
allocation *alloc;
if (tmmon)
PR_EnterMonitor(tmmon);
TM_ENTER_MONITOR();
tmstats.free_calls++;
if (!ptr) {
tmstats.null_free_calls++;
@ -1393,17 +1396,18 @@ void free(__ptr_t ptr)
site = (callsite*) he->value;
if (site) {
alloc = (allocation*) he;
log_event2(logfp, TM_EVENT_FREE, site->serial, alloc->size);
log_event2(logfp, TM_EVENT_FREE, site->serial,
alloc->size);
}
PL_HashTableRawRemove(allocations, hep, he);
}
}
}
if (tmmon)
PR_ExitMonitor(tmmon);
TM_EXIT_MONITOR();
__libc_free(ptr);
}
#endif
#endif /* XP_UNIX */
static const char magic[] = NS_TRACE_MALLOC_MAGIC;
@ -1426,10 +1430,11 @@ PR_IMPLEMENT(void) NS_TraceMallocStartup(int logfd)
atexit(NS_TraceMallocShutdown);
tmmon = PR_NewMonitor();
/*register listeners for win32*/
#ifdef XP_WIN32
/* Register listeners for win32. */
{
StartupHooker();
StartupHooker();
}
#endif
}
@ -1494,8 +1499,8 @@ PR_IMPLEMENT(int) NS_TraceMallocStartupArgs(int argc, char* argv[])
int pipefds[2];
switch (*tmlogname) {
#if XP_UNIX
case '|':
#ifdef XP_UNIX
case '|':
if (pipe(pipefds) == 0) {
pid_t pid = fork();
if (pid == 0) {
@ -1544,7 +1549,7 @@ PR_IMPLEMENT(int) NS_TraceMallocStartupArgs(int argc, char* argv[])
exit(1);
}
break;
#endif /*XP_UNIX*/
#endif /*XP_UNIX*/
case '-':
/* Don't log from startup, but do prepare to log later. */
/* XXX traditional meaning of '-' as option argument is "stdin" or "stdout" */
@ -1610,32 +1615,26 @@ PR_IMPLEMENT(void) NS_TraceMallocDisable()
{
logfile *fp;
if (tmmon)
PR_EnterMonitor(tmmon);
TM_ENTER_MONITOR();
for (fp = logfile_list; fp; fp = fp->next)
flush_logfile(fp);
suppress_tracing++;
if (tmmon)
PR_ExitMonitor(tmmon);
TM_EXIT_MONITOR();
}
PR_IMPLEMENT(void) NS_TraceMallocEnable()
{
if (tmmon)
PR_EnterMonitor(tmmon);
TM_ENTER_MONITOR();
suppress_tracing--;
if (tmmon)
PR_ExitMonitor(tmmon);
TM_EXIT_MONITOR();
}
PR_IMPLEMENT(int) NS_TraceMallocChangeLogFD(int fd)
{
logfile *oldfp, *fp;
struct stat sb;
if (tmmon)
PR_EnterMonitor(tmmon);
TM_ENTER_MONITOR();
oldfp = logfp;
if (oldfp->fd != fd) {
flush_logfile(oldfp);
@ -1646,13 +1645,10 @@ PR_IMPLEMENT(int) NS_TraceMallocChangeLogFD(int fd)
(void) write(fd, magic, NS_TRACE_MALLOC_MAGIC_SIZE);
logfp = fp;
}
if (tmmon)
PR_ExitMonitor(tmmon);
TM_EXIT_MONITOR();
return oldfp->fd;
}
static PRIntn
lfd_clr_enumerator(PLHashEntry *he, PRIntn i, void *arg)
{
@ -1667,7 +1663,7 @@ static void
lfd_clr_walk(callsite *site, logfile *fp)
{
callsite *kid;
LFD_CLR(fp->lfd, &site->lfdset);
for (kid = site->kids; kid; kid = kid->siblings)
lfd_clr_walk(kid, fp);
@ -1678,8 +1674,7 @@ NS_TraceMallocCloseLogFD(int fd)
{
logfile *fp;
if (tmmon)
PR_EnterMonitor(tmmon);
TM_ENTER_MONITOR();
fp = get_logfile(fd);
if (fp) {
@ -1710,44 +1705,39 @@ NS_TraceMallocCloseLogFD(int fd)
}
}
if (tmmon)
PR_ExitMonitor(tmmon);
TM_EXIT_MONITOR();
close(fd);
}
PR_IMPLEMENT(void)
NS_TraceMallocLogTimestamp(const char *caption)
{
logfile *fp;
#if defined(XP_UNIX)
#ifdef XP_UNIX
struct timeval tv;
#endif
#if defined(XP_WIN32)
#ifdef XP_WIN32
struct _timeb tb;
#endif
if (tmmon)
PR_EnterMonitor(tmmon);
TM_ENTER_MONITOR();
fp = logfp;
log_byte(fp, TM_EVENT_TIMESTAMP);
#if defined(XP_UNIX)
#ifdef XP_UNIX
gettimeofday(&tv, NULL);
log_uint32(fp, (uint32) tv.tv_sec);
log_uint32(fp, (uint32) tv.tv_usec);
#endif
#if defined(XP_WIN32)
#ifdef XP_WIN32
_ftime(&tb);
log_uint32(fp, (uint32) tb.time);
log_uint32(fp, (uint32) tb.millitm);
#endif
log_string(fp, caption);
if (tmmon)
PR_ExitMonitor(tmmon);
TM_EXIT_MONITOR();
}
static PRIntn
@ -1775,6 +1765,18 @@ allocation_enumerator(PLHashEntry *he, PRIntn i, void *arg)
return HT_ENUMERATE_NEXT;
}
PR_IMPLEMENT(void)
NS_TraceStack(int skip, FILE *ofp)
{
callsite *site;
site = backtrace(skip + 1);
while (site) {
if (site->name || site->parent)
fprintf(ofp, "%s[%s +0x%X]\n", site->name, site->library, site->offset);
site = site->parent;
}
}
PR_IMPLEMENT(int)
NS_TraceMallocDumpAllocations(const char *pathname)
@ -1796,32 +1798,28 @@ NS_TraceMallocFlushLogfiles()
{
logfile *fp;
if (tmmon)
PR_EnterMonitor(tmmon);
TM_ENTER_MONITOR();
for (fp = logfile_list; fp; fp = fp->next)
flush_logfile(fp);
if (tmmon)
PR_ExitMonitor(tmmon);
TM_EXIT_MONITOR();
}
#ifdef XP_WIN32
PR_IMPLEMENT(void)
PR_IMPLEMENT(void)
MallocCallback(void *aPtr, size_t size)
{
callsite *site;
PLHashEntry *he;
allocation *alloc;
TM_ENTER_MONITOR();
tmstats.malloc_calls++;
if (!aPtr) {
tmstats.malloc_failures++;
} else if (suppress_tracing == 0) {
if (tmmon)
PR_EnterMonitor(tmmon);
site = backtrace(4);
if (site)
log_event2(logfp, TM_EVENT_MALLOC, site->serial, size);
@ -1834,13 +1832,10 @@ MallocCallback(void *aPtr, size_t size)
alloc->size = size;
}
}
if (tmmon)
PR_ExitMonitor(tmmon);
}
TM_EXIT_MONITOR();
}
PR_IMPLEMENT(void)
CallocCallback(void *ptr, size_t count, size_t size)
{
@ -1848,12 +1843,11 @@ CallocCallback(void *ptr, size_t count, size_t size)
PLHashEntry *he;
allocation *alloc;
TM_ENTER_MONITOR();
tmstats.calloc_calls++;
if (!ptr) {
tmstats.calloc_failures++;
} else if (suppress_tracing == 0) {
if (tmmon)
PR_EnterMonitor(tmmon);
site = backtrace(1);
size *= count;
if (site)
@ -1867,9 +1861,8 @@ CallocCallback(void *ptr, size_t count, size_t size)
alloc->size = size;
}
}
if (tmmon)
PR_ExitMonitor(tmmon);
}
TM_EXIT_MONITOR();
}
PR_IMPLEMENT(void)
@ -1881,13 +1874,12 @@ ReallocCallback(void * oldptr, void *ptr, size_t size)
PLHashEntry **hep, *he;
allocation *alloc;
TM_ENTER_MONITOR();
tmstats.realloc_calls++;
if (suppress_tracing == 0) {
oldsite = NULL;
oldsize = 0;
he = NULL;
if (tmmon)
PR_EnterMonitor(tmmon);
if (oldptr && get_allocations()) {
hash = hash_pointer(oldptr);
hep = PL_HashTableRawLookup(allocations, hash, oldptr);
@ -1898,31 +1890,15 @@ ReallocCallback(void * oldptr, void *ptr, size_t size)
oldsize = alloc->size;
}
}
#ifdef EXIT_TMMON_AROUND_REALLOC
/* XXX rusty.lynch@intel.com found that oldsize gets corrupted on
his SMP Linux box occasionally, unless tmmon is held across
the call to __libc_realloc. Figure out why that stack var
is being trashed, and until then use his workaround. */
if (tmmon)
PR_ExitMonitor(tmmon);
#endif
}
if (!ptr && size) {
tmstats.realloc_failures++;
#ifndef EXIT_TMMON_AROUND_REALLOC
if (tmmon && suppress_tracing == 0)
PR_ExitMonitor(tmmon);
#endif
}
if (!ptr && size) {
tmstats.realloc_failures++;
/*
* When realloc() fails, the original block is not freed or moved, so
* we'll leave the allocation entry untouched.
*/
} else if (suppress_tracing == 0) {
#ifdef EXIT_TMMON_AROUND_REALLOC
if (tmmon)
PR_EnterMonitor(tmmon);
#endif
/*
* When realloc() fails, the original block is not freed or moved, so
* we'll leave the allocation entry untouched.
*/
} else if (suppress_tracing == 0) {
site = backtrace(1);
if (site) {
log_event4(logfp, TM_EVENT_REALLOC, site->serial, size,
@ -1954,9 +1930,8 @@ ReallocCallback(void * oldptr, void *ptr, size_t size)
alloc->size = size;
}
}
if (tmmon)
PR_ExitMonitor(tmmon);
}
TM_EXIT_MONITOR();
}
PR_IMPLEMENT(void)
@ -1966,12 +1941,11 @@ FreeCallback(void * ptr)
callsite *site;
allocation *alloc;
TM_ENTER_MONITOR();
tmstats.free_calls++;
if (!ptr) {
tmstats.null_free_calls++;
} else if (suppress_tracing == 0) {
if (tmmon)
PR_EnterMonitor(tmmon);
if (get_allocations()) {
hep = PL_HashTableRawLookup(allocations, hash_pointer(ptr), ptr);
he = *hep;
@ -1984,11 +1958,10 @@ FreeCallback(void * ptr)
PL_HashTableRawRemove(allocations, hep, he);
}
}
if (tmmon)
PR_ExitMonitor(tmmon);
}
TM_EXIT_MONITOR();
}
#endif /*XP_WIN32*/
#endif /* NS_TRACE_MALLOC */

View File

@ -161,6 +161,15 @@ PR_EXTERN(void) NS_TraceMallocCloseLogFD(int fd);
*/
PR_EXTERN(void) NS_TraceMallocLogTimestamp(const char *caption);
/**
* Walk the stack, dumping frames in standard form to ofp. If skip is 0,
* exclude the frames for NS_TraceStack and anything it calls to do the walk.
* If skip is less than 0, include -skip such frames. If skip is positive,
* exclude that many frames leading to the call to NS_TraceStack.
*/
PR_EXTERN(void)
NS_TraceStack(int skip, FILE *ofp);
/**
* Dump a human-readable listing of current allocations and their compressed
* stack backtraces to the file named by pathname. Beware this file may have

View File

@ -53,6 +53,7 @@
#include <sys/time.h>
#endif
#include "plhash.h"
#include "pratom.h"
#include "prlog.h"
#include "prmon.h"
#include "prprf.h"
@ -232,15 +233,15 @@ typedef void (__stdcall *FREEDEBUGPROC) ( void *, int);
struct AllocationFuncs
{
MALLOCPROC malloc_proc;
CALLOCPROC calloc_proc;
REALLOCPROC realloc_proc;
MALLOCPROC malloc_proc;
CALLOCPROC calloc_proc;
REALLOCPROC realloc_proc;
FREEPROC free_proc;
#ifdef _DEBUG
MALLOCDEBUGPROC malloc_debug_proc;
CALLOCDEBUGPROC calloc_debug_proc;
REALLOCDEBUGPROC realloc_debug_proc;
FREEDEBUGPROC free_debug_proc;
MALLOCDEBUGPROC malloc_debug_proc;
CALLOCDEBUGPROC calloc_debug_proc;
REALLOCDEBUGPROC realloc_debug_proc;
FREEDEBUGPROC free_debug_proc;
#endif
int prevent_reentry;
}gAllocFuncs;
@ -271,6 +272,24 @@ static logfile *logfp = &default_logfile;
static PRMonitor *tmmon = NULL;
static char *sdlogname = NULL; /* filename for shutdown leak log */
/*
* This counter suppresses tracing, in case any tracing code needs to malloc,
* and it must be tested and manipulated only within tmmon.
*/
static uint32 suppress_tracing = 0;
#define TM_ENTER_MONITOR() \
PR_BEGIN_MACRO \
if (tmmon) \
PR_EnterMonitor(tmmon); \
PR_END_MACRO
#define TM_EXIT_MONITOR() \
PR_BEGIN_MACRO \
if (tmmon) \
PR_ExitMonitor(tmmon); \
PR_END_MACRO
/* We don't want more than 32 logfiles open at once, ok? */
typedef uint32 lfd_set;
@ -319,8 +338,7 @@ retry:
static void flush_logfile(logfile *fp)
{
int len, cnt;
int fd;
int len, cnt, fd;
char *bp;
len = fp->pos;
@ -442,11 +460,10 @@ struct callsite {
};
/* NB: these counters are incremented and decremented only within tmmon. */
static uint32 suppress_tracing = 0;
static uint32 library_serial_generator = 0;
static uint32 method_serial_generator = 0;
static uint32 callsite_serial_generator = 0;
static uint32 tmstats_serial_generator = 0;
static uint32 library_serial_generator = 0;
static uint32 method_serial_generator = 0;
static uint32 callsite_serial_generator = 0;
static uint32 tmstats_serial_generator = 0;
/* Root of the tree of callsites, the sum of all (cycle-compressed) stacks. */
static callsite calltree_root = {0, 0, LFD_SET_STATIC_INITIALIZER, NULL, NULL, 0, NULL, NULL, NULL};
@ -534,12 +551,11 @@ static PLHashTable *libraries = NULL;
/* Table mapping method names to logged 'N' record serial numbers. */
static PLHashTable *methods = NULL;
#if XP_WIN32
#ifdef XP_WIN32
#define MAX_STACKFRAMES 256
#define MAX_UNMANGLED_NAME_LEN 256
static callsite *calltree(int skip)
{
logfile *fp = logfp;
@ -570,19 +586,19 @@ static callsite *calltree(int skip)
ok = EnsureSymInitialized();
if (! ok)
return 0;
return 0;
/*
* Get the context information for this thread. That way we will
* know where our sp, fp, pc, etc. are and can fill in the
* STACKFRAME with the initial values.
* Get the context information for this thread. That way we will know
* where our sp, fp, pc, etc. are, and we can fill in the STACKFRAME with
* the initial values.
*/
context.ContextFlags = CONTEXT_FULL;
ok = GetThreadContext(myThread, &context);
if (! ok)
return 0;
return 0;
/* Setup initial stack frame to walk from */
/* Setup initial stack frame from which to walk. */
memset(&(frame[0]), 0, sizeof(frame[0]));
frame[0].AddrPC.Offset = context.Eip;
frame[0].AddrPC.Mode = AddrModeFlat;
@ -590,40 +606,38 @@ static callsite *calltree(int skip)
frame[0].AddrStack.Mode = AddrModeFlat;
frame[0].AddrFrame.Offset = context.Ebp;
frame[0].AddrFrame.Mode = AddrModeFlat;
while (1) {
PIMAGEHLP_SYMBOL symbol = (PIMAGEHLP_SYMBOL) buf;
if (framenum)
{
memcpy(&(frame[framenum]),&(frame[framenum-1]),sizeof(STACKFRAME));
}
for (;;) {
PIMAGEHLP_SYMBOL symbol = (PIMAGEHLP_SYMBOL) buf;
if (framenum)
memcpy(&(frame[framenum]),&(frame[framenum-1]),sizeof(STACKFRAME));
ok = _StackWalk(IMAGE_FILE_MACHINE_I386,
myProcess,
myThread,
&(frame[framenum]),
&context,
0, /* read process memory routine */
_SymFunctionTableAccess, /* function table access
routine */
_SymGetModuleBase, /* module base routine */
0); /* translate address routine */
ok = _StackWalk(IMAGE_FILE_MACHINE_I386,
myProcess,
myThread,
&(frame[framenum]),
&context,
0, /* read process memory hook */
_SymFunctionTableAccess, /* function table access hook */
_SymGetModuleBase, /* module base hook */
0); /* translate address hook */
if (!ok) {
break;
}
if (skip)
{
skip--;
continue;/*skip tells us to skip the first skip amount of stackframes*/
}
if (frame[framenum].AddrPC.Offset == 0)
break;
framenum++;
if (!ok)
break;
if (skip) {
/* skip tells us to skip the first skip amount of stackframes */
skip--;
continue;
}
if (frame[framenum].AddrPC.Offset == 0)
break;
framenum++;
}
depth = framenum;
maxstack = (depth > tmstats.calltree_maxstack);
if (maxstack)
tmstats.calltree_maxstack = depth;
/* Reverse the stack again, finding and building a path in the tree. */
parent = &calltree_root;
do {
@ -675,17 +689,16 @@ static callsite *calltree(int skip)
/*
* Not in tree at all, or not logged to fp: let's find our symbolic
* callsite info. XXX static syms are masked by nearest lower global
* Load up the info for the dll.
*/
if (!_SymGetModuleInfo(myProcess,frame[framenum].AddrPC.Offset,&imagehelp))/*load up the info for the dll*/
{
if (!_SymGetModuleInfo(myProcess,
frame[framenum].AddrPC.Offset,
&imagehelp)) {
DWORD error = GetLastError();
PR_ASSERT(error);
library = "unknown";/* ew */
}
else
{
library = imagehelp.ModuleName;
library = "unknown"; /* XXX mjudge sez "ew!" */
} else {
library = imagehelp.ModuleName;
}
symbol = (PIMAGEHLP_SYMBOL) buf;
@ -842,9 +855,8 @@ static callsite *calltree(int skip)
return site;
}
#else /*XP_UNIX*/
#else
/*XP_UNIX*/
static callsite *calltree(uint32 *bp)
{
logfile *fp = logfp;
@ -1116,8 +1128,7 @@ backtrace(int skip)
return site;
}
#else
/*XP_UNIX*/
#else /*XP_UNIX*/
callsite *
backtrace(int skip)
@ -1153,7 +1164,7 @@ backtrace(int skip)
}
#endif /*XP_WIN32*/
#endif /* XP_UNIX */
typedef struct allocation {
@ -1228,7 +1239,7 @@ static PLHashTable *new_allocations(void)
#define get_allocations() (allocations ? allocations : new_allocations())
#if XP_UNIX
#ifdef XP_UNIX
__ptr_t malloc(size_t size)
{
@ -1238,8 +1249,7 @@ __ptr_t malloc(size_t size)
allocation *alloc;
ptr = __libc_malloc(size);
if (tmmon)
PR_EnterMonitor(tmmon);
TM_ENTER_MONITOR();
tmstats.malloc_calls++;
if (!ptr) {
tmstats.malloc_failures++;
@ -1257,8 +1267,7 @@ __ptr_t malloc(size_t size)
}
}
}
if (tmmon)
PR_ExitMonitor(tmmon);
TM_EXIT_MONITOR();
return ptr;
}
@ -1270,8 +1279,7 @@ __ptr_t calloc(size_t count, size_t size)
allocation *alloc;
ptr = __libc_calloc(count, size);
if (tmmon)
PR_EnterMonitor(tmmon);
TM_ENTER_MONITOR();
tmstats.calloc_calls++;
if (!ptr) {
tmstats.calloc_failures++;
@ -1290,8 +1298,7 @@ __ptr_t calloc(size_t count, size_t size)
}
}
}
if (tmmon)
PR_ExitMonitor(tmmon);
TM_EXIT_MONITOR();
return ptr;
}
@ -1304,8 +1311,7 @@ __ptr_t realloc(__ptr_t ptr, size_t size)
PLHashEntry **hep, *he;
allocation *alloc;
if (tmmon)
PR_EnterMonitor(tmmon);
TM_ENTER_MONITOR();
tmstats.realloc_calls++;
if (suppress_tracing == 0) {
oldptr = ptr;
@ -1323,13 +1329,11 @@ __ptr_t realloc(__ptr_t ptr, size_t size)
}
}
}
if (tmmon)
PR_ExitMonitor(tmmon);
TM_EXIT_MONITOR();
ptr = __libc_realloc(ptr, size);
if (tmmon)
PR_EnterMonitor(tmmon);
TM_ENTER_MONITOR();
if (!ptr && size) {
/*
* When realloc() fails, the original block is not freed or moved, so
@ -1346,8 +1350,9 @@ __ptr_t realloc(__ptr_t ptr, size_t size)
suppress_tracing++;
if (ptr != oldptr) {
/*
* If we're reallocating (not allocating new space by passing
* null to realloc) and realloc moved the block, free oldptr.
* If we're reallocating (not merely allocating new space by
* passing null to realloc) and realloc has moved the block,
* free oldptr.
*/
if (he)
PL_HashTableRemove(allocations, oldptr);
@ -1356,8 +1361,8 @@ __ptr_t realloc(__ptr_t ptr, size_t size)
he = PL_HashTableAdd(allocations, ptr, site);
} else {
/*
* If we haven't yet recorded an allocation (possibly due to a
* temporary memory shortage), do it now.
* If we haven't yet recorded an allocation (possibly due to
* a temporary memory shortage), do it now.
*/
if (!he)
he = PL_HashTableAdd(allocations, ptr, site);
@ -1369,8 +1374,7 @@ __ptr_t realloc(__ptr_t ptr, size_t size)
}
}
}
if (tmmon)
PR_ExitMonitor(tmmon);
TM_EXIT_MONITOR();
return ptr;
}
@ -1380,8 +1384,7 @@ void free(__ptr_t ptr)
callsite *site;
allocation *alloc;
if (tmmon)
PR_EnterMonitor(tmmon);
TM_ENTER_MONITOR();
tmstats.free_calls++;
if (!ptr) {
tmstats.null_free_calls++;
@ -1393,17 +1396,18 @@ void free(__ptr_t ptr)
site = (callsite*) he->value;
if (site) {
alloc = (allocation*) he;
log_event2(logfp, TM_EVENT_FREE, site->serial, alloc->size);
log_event2(logfp, TM_EVENT_FREE, site->serial,
alloc->size);
}
PL_HashTableRawRemove(allocations, hep, he);
}
}
}
if (tmmon)
PR_ExitMonitor(tmmon);
TM_EXIT_MONITOR();
__libc_free(ptr);
}
#endif
#endif /* XP_UNIX */
static const char magic[] = NS_TRACE_MALLOC_MAGIC;
@ -1426,10 +1430,11 @@ PR_IMPLEMENT(void) NS_TraceMallocStartup(int logfd)
atexit(NS_TraceMallocShutdown);
tmmon = PR_NewMonitor();
/*register listeners for win32*/
#ifdef XP_WIN32
/* Register listeners for win32. */
{
StartupHooker();
StartupHooker();
}
#endif
}
@ -1494,8 +1499,8 @@ PR_IMPLEMENT(int) NS_TraceMallocStartupArgs(int argc, char* argv[])
int pipefds[2];
switch (*tmlogname) {
#if XP_UNIX
case '|':
#ifdef XP_UNIX
case '|':
if (pipe(pipefds) == 0) {
pid_t pid = fork();
if (pid == 0) {
@ -1544,7 +1549,7 @@ PR_IMPLEMENT(int) NS_TraceMallocStartupArgs(int argc, char* argv[])
exit(1);
}
break;
#endif /*XP_UNIX*/
#endif /*XP_UNIX*/
case '-':
/* Don't log from startup, but do prepare to log later. */
/* XXX traditional meaning of '-' as option argument is "stdin" or "stdout" */
@ -1610,32 +1615,26 @@ PR_IMPLEMENT(void) NS_TraceMallocDisable()
{
logfile *fp;
if (tmmon)
PR_EnterMonitor(tmmon);
TM_ENTER_MONITOR();
for (fp = logfile_list; fp; fp = fp->next)
flush_logfile(fp);
suppress_tracing++;
if (tmmon)
PR_ExitMonitor(tmmon);
TM_EXIT_MONITOR();
}
PR_IMPLEMENT(void) NS_TraceMallocEnable()
{
if (tmmon)
PR_EnterMonitor(tmmon);
TM_ENTER_MONITOR();
suppress_tracing--;
if (tmmon)
PR_ExitMonitor(tmmon);
TM_EXIT_MONITOR();
}
PR_IMPLEMENT(int) NS_TraceMallocChangeLogFD(int fd)
{
logfile *oldfp, *fp;
struct stat sb;
if (tmmon)
PR_EnterMonitor(tmmon);
TM_ENTER_MONITOR();
oldfp = logfp;
if (oldfp->fd != fd) {
flush_logfile(oldfp);
@ -1646,13 +1645,10 @@ PR_IMPLEMENT(int) NS_TraceMallocChangeLogFD(int fd)
(void) write(fd, magic, NS_TRACE_MALLOC_MAGIC_SIZE);
logfp = fp;
}
if (tmmon)
PR_ExitMonitor(tmmon);
TM_EXIT_MONITOR();
return oldfp->fd;
}
static PRIntn
lfd_clr_enumerator(PLHashEntry *he, PRIntn i, void *arg)
{
@ -1667,7 +1663,7 @@ static void
lfd_clr_walk(callsite *site, logfile *fp)
{
callsite *kid;
LFD_CLR(fp->lfd, &site->lfdset);
for (kid = site->kids; kid; kid = kid->siblings)
lfd_clr_walk(kid, fp);
@ -1678,8 +1674,7 @@ NS_TraceMallocCloseLogFD(int fd)
{
logfile *fp;
if (tmmon)
PR_EnterMonitor(tmmon);
TM_ENTER_MONITOR();
fp = get_logfile(fd);
if (fp) {
@ -1710,44 +1705,39 @@ NS_TraceMallocCloseLogFD(int fd)
}
}
if (tmmon)
PR_ExitMonitor(tmmon);
TM_EXIT_MONITOR();
close(fd);
}
PR_IMPLEMENT(void)
NS_TraceMallocLogTimestamp(const char *caption)
{
logfile *fp;
#if defined(XP_UNIX)
#ifdef XP_UNIX
struct timeval tv;
#endif
#if defined(XP_WIN32)
#ifdef XP_WIN32
struct _timeb tb;
#endif
if (tmmon)
PR_EnterMonitor(tmmon);
TM_ENTER_MONITOR();
fp = logfp;
log_byte(fp, TM_EVENT_TIMESTAMP);
#if defined(XP_UNIX)
#ifdef XP_UNIX
gettimeofday(&tv, NULL);
log_uint32(fp, (uint32) tv.tv_sec);
log_uint32(fp, (uint32) tv.tv_usec);
#endif
#if defined(XP_WIN32)
#ifdef XP_WIN32
_ftime(&tb);
log_uint32(fp, (uint32) tb.time);
log_uint32(fp, (uint32) tb.millitm);
#endif
log_string(fp, caption);
if (tmmon)
PR_ExitMonitor(tmmon);
TM_EXIT_MONITOR();
}
static PRIntn
@ -1775,6 +1765,18 @@ allocation_enumerator(PLHashEntry *he, PRIntn i, void *arg)
return HT_ENUMERATE_NEXT;
}
PR_IMPLEMENT(void)
NS_TraceStack(int skip, FILE *ofp)
{
callsite *site;
site = backtrace(skip + 1);
while (site) {
if (site->name || site->parent)
fprintf(ofp, "%s[%s +0x%X]\n", site->name, site->library, site->offset);
site = site->parent;
}
}
PR_IMPLEMENT(int)
NS_TraceMallocDumpAllocations(const char *pathname)
@ -1796,32 +1798,28 @@ NS_TraceMallocFlushLogfiles()
{
logfile *fp;
if (tmmon)
PR_EnterMonitor(tmmon);
TM_ENTER_MONITOR();
for (fp = logfile_list; fp; fp = fp->next)
flush_logfile(fp);
if (tmmon)
PR_ExitMonitor(tmmon);
TM_EXIT_MONITOR();
}
#ifdef XP_WIN32
PR_IMPLEMENT(void)
PR_IMPLEMENT(void)
MallocCallback(void *aPtr, size_t size)
{
callsite *site;
PLHashEntry *he;
allocation *alloc;
TM_ENTER_MONITOR();
tmstats.malloc_calls++;
if (!aPtr) {
tmstats.malloc_failures++;
} else if (suppress_tracing == 0) {
if (tmmon)
PR_EnterMonitor(tmmon);
site = backtrace(4);
if (site)
log_event2(logfp, TM_EVENT_MALLOC, site->serial, size);
@ -1834,13 +1832,10 @@ MallocCallback(void *aPtr, size_t size)
alloc->size = size;
}
}
if (tmmon)
PR_ExitMonitor(tmmon);
}
TM_EXIT_MONITOR();
}
PR_IMPLEMENT(void)
CallocCallback(void *ptr, size_t count, size_t size)
{
@ -1848,12 +1843,11 @@ CallocCallback(void *ptr, size_t count, size_t size)
PLHashEntry *he;
allocation *alloc;
TM_ENTER_MONITOR();
tmstats.calloc_calls++;
if (!ptr) {
tmstats.calloc_failures++;
} else if (suppress_tracing == 0) {
if (tmmon)
PR_EnterMonitor(tmmon);
site = backtrace(1);
size *= count;
if (site)
@ -1867,9 +1861,8 @@ CallocCallback(void *ptr, size_t count, size_t size)
alloc->size = size;
}
}
if (tmmon)
PR_ExitMonitor(tmmon);
}
TM_EXIT_MONITOR();
}
PR_IMPLEMENT(void)
@ -1881,13 +1874,12 @@ ReallocCallback(void * oldptr, void *ptr, size_t size)
PLHashEntry **hep, *he;
allocation *alloc;
TM_ENTER_MONITOR();
tmstats.realloc_calls++;
if (suppress_tracing == 0) {
oldsite = NULL;
oldsize = 0;
he = NULL;
if (tmmon)
PR_EnterMonitor(tmmon);
if (oldptr && get_allocations()) {
hash = hash_pointer(oldptr);
hep = PL_HashTableRawLookup(allocations, hash, oldptr);
@ -1898,31 +1890,15 @@ ReallocCallback(void * oldptr, void *ptr, size_t size)
oldsize = alloc->size;
}
}
#ifdef EXIT_TMMON_AROUND_REALLOC
/* XXX rusty.lynch@intel.com found that oldsize gets corrupted on
his SMP Linux box occasionally, unless tmmon is held across
the call to __libc_realloc. Figure out why that stack var
is being trashed, and until then use his workaround. */
if (tmmon)
PR_ExitMonitor(tmmon);
#endif
}
if (!ptr && size) {
tmstats.realloc_failures++;
#ifndef EXIT_TMMON_AROUND_REALLOC
if (tmmon && suppress_tracing == 0)
PR_ExitMonitor(tmmon);
#endif
}
if (!ptr && size) {
tmstats.realloc_failures++;
/*
* When realloc() fails, the original block is not freed or moved, so
* we'll leave the allocation entry untouched.
*/
} else if (suppress_tracing == 0) {
#ifdef EXIT_TMMON_AROUND_REALLOC
if (tmmon)
PR_EnterMonitor(tmmon);
#endif
/*
* When realloc() fails, the original block is not freed or moved, so
* we'll leave the allocation entry untouched.
*/
} else if (suppress_tracing == 0) {
site = backtrace(1);
if (site) {
log_event4(logfp, TM_EVENT_REALLOC, site->serial, size,
@ -1954,9 +1930,8 @@ ReallocCallback(void * oldptr, void *ptr, size_t size)
alloc->size = size;
}
}
if (tmmon)
PR_ExitMonitor(tmmon);
}
TM_EXIT_MONITOR();
}
PR_IMPLEMENT(void)
@ -1966,12 +1941,11 @@ FreeCallback(void * ptr)
callsite *site;
allocation *alloc;
TM_ENTER_MONITOR();
tmstats.free_calls++;
if (!ptr) {
tmstats.null_free_calls++;
} else if (suppress_tracing == 0) {
if (tmmon)
PR_EnterMonitor(tmmon);
if (get_allocations()) {
hep = PL_HashTableRawLookup(allocations, hash_pointer(ptr), ptr);
he = *hep;
@ -1984,11 +1958,10 @@ FreeCallback(void * ptr)
PL_HashTableRawRemove(allocations, hep, he);
}
}
if (tmmon)
PR_ExitMonitor(tmmon);
}
TM_EXIT_MONITOR();
}
#endif /*XP_WIN32*/
#endif /* NS_TRACE_MALLOC */

View File

@ -161,6 +161,15 @@ PR_EXTERN(void) NS_TraceMallocCloseLogFD(int fd);
*/
PR_EXTERN(void) NS_TraceMallocLogTimestamp(const char *caption);
/**
* Walk the stack, dumping frames in standard form to ofp. If skip is 0,
* exclude the frames for NS_TraceStack and anything it calls to do the walk.
* If skip is less than 0, include -skip such frames. If skip is positive,
* exclude that many frames leading to the call to NS_TraceStack.
*/
PR_EXTERN(void)
NS_TraceStack(int skip, FILE *ofp);
/**
* Dump a human-readable listing of current allocations and their compressed
* stack backtraces to the file named by pathname. Beware this file may have

View File

@ -45,6 +45,9 @@
#include "prlog.h" /* for PR_ASSERT */
#ifdef PL_DHASHMETER
# if defined MOZILLA_CLIENT && defined DEBUG_brendan
# include "nsTraceMalloc.h"
# endif
# define METER(x) x
#else
# define METER(x) /* nothing */
@ -170,7 +173,7 @@ PL_DHashTableInit(PLDHashTable *table, PLDHashTableOps *ops, void *data,
fprintf(stderr,
"pldhash: for the table at address 0x%p, the given entrySize"
" of %lu %s favors chaining over double hashing.\n",
table,
(void *)table,
(unsigned long) entrySize,
(entrySize > 16 * sizeof(void*)) ? "definitely" : "probably");
}
@ -204,12 +207,29 @@ PL_DHashTableInit(PLDHashTable *table, PLDHashTableOps *ops, void *data,
#define HASH1(hash0, shift) ((hash0) >> (shift))
#define HASH2(hash0,log2,shift) ((((hash0) << (log2)) >> (shift)) | 1)
/* Reserve keyHash 0 for free entries and 1 for removed-entry sentinels. */
/*
* Reserve keyHash 0 for free entries and 1 for removed-entry sentinels. Note
* that a removed-entry sentinel need be stored only if the removed entry had
* a colliding entry added after it. Therefore we can use 1 as the collision
* flag in addition to the removed-entry sentinel value. Multiplicative hash
* uses the high order bits of keyHash, so this least-significant reservation
* should not hurt the hash function's effectiveness much.
*
* If you change any of these magic numbers, also update PL_DHASH_ENTRY_IS_LIVE
* in pldhash.h. It used to be private to pldhash.c, but then became public to
* assist iterator writers who inspect table->entryStore directly.
*/
#define COLLISION_FLAG ((PLDHashNumber) 1)
#define MARK_ENTRY_FREE(entry) ((entry)->keyHash = 0)
#define MARK_ENTRY_REMOVED(entry) ((entry)->keyHash = 1)
#define ENTRY_IS_REMOVED(entry) ((entry)->keyHash == 1)
#define ENTRY_IS_LIVE(entry) PL_DHASH_ENTRY_IS_LIVE(entry)
#define ENSURE_LIVE_KEYHASH(hash0) if (hash0 < 2) hash0 -= 2; else (void)0
/* Match an entry's keyHash against an unstored one computed from a key. */
#define MATCH_ENTRY_KEYHASH(entry,hash0) \
(((entry)->keyHash & ~COLLISION_FLAG) == (hash0))
/* Compute the address of the indexed entry in table. */
#define ADDRESS_ENTRY(table, index) \
((PLDHashEntryHdr *)((table)->entryStore + (index) * (table)->entrySize))
@ -221,6 +241,18 @@ PL_DHashTableFinish(PLDHashTable *table)
PRUint32 entrySize;
PLDHashEntryHdr *entry;
#ifdef DEBUG_brendan
static FILE *dumpfp = NULL;
if (!dumpfp) dumpfp = fopen("/tmp/pldhash.bigdump", "w");
if (dumpfp) {
#ifdef MOZILLA_CLIENT
NS_TraceStack(1, dumpfp);
#endif
PL_DHashTableDumpMeter(table, NULL, dumpfp);
fputc('\n', dumpfp);
}
#endif
/* Call finalize before clearing entries. */
table->ops->finalize(table);
@ -242,15 +274,17 @@ PL_DHashTableFinish(PLDHashTable *table)
}
static PLDHashEntryHdr *
SearchTable(PLDHashTable *table, const void *key, PLDHashNumber keyHash)
SearchTable(PLDHashTable *table, const void *key, PLDHashNumber keyHash,
PLDHashOperator op)
{
PLDHashNumber hash1, hash2;
int hashShift, sizeLog2;
PLDHashEntryHdr *entry;
PLDHashEntryHdr *entry, *firstRemoved;
PLDHashMatchEntry matchEntry;
PRUint32 sizeMask;
METER(table->stats.searches++);
PR_ASSERT(!(keyHash & COLLISION_FLAG));
/* Compute the primary hash address. */
hashShift = table->hashShift;
@ -265,7 +299,7 @@ SearchTable(PLDHashTable *table, const void *key, PLDHashNumber keyHash)
/* Hit: return entry. */
matchEntry = table->ops->matchEntry;
if (entry->keyHash == keyHash && matchEntry(table, entry, key)) {
if (MATCH_ENTRY_KEYHASH(entry, keyHash) && matchEntry(table, entry, key)) {
METER(table->stats.hits++);
return entry;
}
@ -274,19 +308,54 @@ SearchTable(PLDHashTable *table, const void *key, PLDHashNumber keyHash)
sizeLog2 = table->sizeLog2;
hash2 = HASH2(keyHash, sizeLog2, hashShift);
sizeMask = PR_BITMASK(sizeLog2);
do {
/* Save the first removed entry pointer so PL_DHASH_ADD can recycle it. */
if (ENTRY_IS_REMOVED(entry)) {
firstRemoved = entry;
} else {
firstRemoved = NULL;
if (op == PL_DHASH_ADD)
entry->keyHash |= COLLISION_FLAG;
}
for (;;) {
METER(table->stats.steps++);
hash1 -= hash2;
hash1 &= sizeMask;
entry = ADDRESS_ENTRY(table, hash1);
if (PL_DHASH_ENTRY_IS_FREE(entry)) {
#ifdef DEBUG_brendan
extern char *getenv(const char *);
static PRBool gotFirstRemovedEnvar = PR_FALSE;
static char *doFirstRemoved = NULL;
if (!gotFirstRemovedEnvar) {
doFirstRemoved = getenv("DHASH_DO_FIRST_REMOVED");
gotFirstRemovedEnvar = PR_TRUE;
}
if (!doFirstRemoved) return entry;
#endif
METER(table->stats.misses++);
return (firstRemoved && op == PL_DHASH_ADD) ? firstRemoved : entry;
}
if (MATCH_ENTRY_KEYHASH(entry, keyHash) &&
matchEntry(table, entry, key)) {
METER(table->stats.hits++);
return entry;
}
} while (entry->keyHash != keyHash || !matchEntry(table, entry, key));
METER(table->stats.hits++);
return entry;
if (ENTRY_IS_REMOVED(entry)) {
if (!firstRemoved)
firstRemoved = entry;
} else {
if (op == PL_DHASH_ADD)
entry->keyHash |= COLLISION_FLAG;
}
}
/* NOTREACHED */
return NULL;
}
static PRBool
@ -329,11 +398,13 @@ ChangeTable(PLDHashTable *table, int deltaLog2)
for (i = 0; i < oldCapacity; i++) {
oldEntry = (PLDHashEntryHdr *)oldEntryAddr;
if (ENTRY_IS_LIVE(oldEntry)) {
oldEntry->keyHash &= ~COLLISION_FLAG;
newEntry = SearchTable(table, getKey(table, oldEntry),
oldEntry->keyHash);
oldEntry->keyHash, PL_DHASH_ADD);
PR_ASSERT(PL_DHASH_ENTRY_IS_FREE(newEntry));
moveEntry(table, oldEntry, newEntry);
newEntry->keyHash = oldEntry->keyHash;
newEntry->keyHash =
oldEntry->keyHash | (newEntry->keyHash & COLLISION_FLAG);
}
oldEntryAddr += entrySize;
}
@ -354,11 +425,12 @@ PL_DHashTableOperate(PLDHashTable *table, const void *key, PLDHashOperator op)
keyHash = table->ops->hashKey(table, key);
ENSURE_LIVE_KEYHASH(keyHash);
keyHash *= PL_DHASH_GOLDEN_RATIO;
keyHash &= ~COLLISION_FLAG;
switch (op) {
case PL_DHASH_LOOKUP:
METER(table->stats.lookups++);
entry = SearchTable(table, key, keyHash);
entry = SearchTable(table, key, keyHash, op);
break;
case PL_DHASH_ADD:
@ -392,10 +464,15 @@ PL_DHashTableOperate(PLDHashTable *table, const void *key, PLDHashOperator op)
* Look for entry after possibly growing, so we don't have to add it,
* then skip it while growing the table and re-add it after.
*/
entry = SearchTable(table, key, keyHash);
if (PL_DHASH_ENTRY_IS_FREE(entry)) {
entry = SearchTable(table, key, keyHash, op);
if (!ENTRY_IS_LIVE(entry)) {
/* Initialize the entry, indicating that it's no longer free. */
METER(table->stats.addMisses++);
if (ENTRY_IS_REMOVED(entry)) {
METER(table->stats.addOverRemoved++);
table->removedCount--;
keyHash |= COLLISION_FLAG;
}
if (table->ops->initEntry)
table->ops->initEntry(table, entry, key);
entry->keyHash = keyHash;
@ -405,8 +482,8 @@ PL_DHashTableOperate(PLDHashTable *table, const void *key, PLDHashOperator op)
break;
case PL_DHASH_REMOVE:
entry = SearchTable(table, key, keyHash);
if (PL_DHASH_ENTRY_IS_BUSY(entry)) {
entry = SearchTable(table, key, keyHash, op);
if (ENTRY_IS_LIVE(entry)) {
/* Clear this entry and mark it as "removed". */
METER(table->stats.removeHits++);
PL_DHashTableRawRemove(table, entry);
@ -433,9 +510,17 @@ PL_DHashTableOperate(PLDHashTable *table, const void *key, PLDHashOperator op)
PR_IMPLEMENT(void)
PL_DHashTableRawRemove(PLDHashTable *table, PLDHashEntryHdr *entry)
{
PLDHashNumber keyHash; /* load first in case clearEntry goofs it */
keyHash = entry->keyHash;
table->ops->clearEntry(table, entry);
MARK_ENTRY_REMOVED(entry);
table->removedCount++;
if (keyHash & COLLISION_FLAG) {
MARK_ENTRY_REMOVED(entry);
table->removedCount++;
} else {
METER(table->stats.removeFrees++);
MARK_ENTRY_FREE(entry);
}
table->entryCount--;
}
@ -504,14 +589,16 @@ PL_DHashTableDumpMeter(PLDHashTable *table, PLDHashEnumerator dump, FILE *fp)
entryAddr += entrySize;
if (!ENTRY_IS_LIVE(entry))
continue;
hash1 = saveHash1 = HASH1(entry->keyHash, table->hashShift);
hash1 = HASH1(entry->keyHash & ~COLLISION_FLAG, table->hashShift);
saveHash1 = hash1;
probe = ADDRESS_ENTRY(table, hash1);
chainLen = 1;
if (probe == entry) {
/* Start of a (possibly unit-length) chain. */
chainCount++;
} else {
hash2 = HASH2(entry->keyHash, table->sizeLog2, table->hashShift);
hash2 = HASH2(entry->keyHash & ~COLLISION_FLAG, table->sizeLog2,
table->hashShift);
do {
chainLen++;
hash1 -= hash2;
@ -528,13 +615,17 @@ PL_DHashTableDumpMeter(PLDHashTable *table, PLDHashEnumerator dump, FILE *fp)
}
entryCount = table->entryCount;
mean = (double)entryCount / chainCount;
variance = chainCount * sqsum - entryCount * entryCount;
if (variance < 0 || chainCount == 1)
variance = 0;
else
variance /= chainCount * (chainCount - 1);
sigma = sqrt(variance);
if (entryCount && chainCount) {
mean = (double)entryCount / chainCount;
variance = chainCount * sqsum - entryCount * entryCount;
if (variance < 0 || chainCount == 1)
variance = 0;
else
variance /= chainCount * (chainCount - 1);
sigma = sqrt(variance);
} else {
mean = sigma = 0;
}
fprintf(fp, "Double hashing statistics:\n");
fprintf(fp, " table size (in entries): %u\n", tableSize);
@ -550,17 +641,19 @@ PL_DHashTableDumpMeter(PLDHashTable *table, PLDHashEnumerator dump, FILE *fp)
fprintf(fp, " maximum hash chain length: %u\n", maxChainLen);
fprintf(fp, " number of lookups: %u\n", table->stats.lookups);
fprintf(fp, " adds that made a new entry: %u\n", table->stats.addMisses);
fprintf(fp, "adds that recycled removeds: %u\n", table->stats.addOverRemoved);
fprintf(fp, " adds that found an entry: %u\n", table->stats.addHits);
fprintf(fp, " add failures: %u\n", table->stats.addFailures);
fprintf(fp, " useful removes: %u\n", table->stats.removeHits);
fprintf(fp, " useless removes: %u\n", table->stats.removeMisses);
fprintf(fp, "removes that freed an entry: %u\n", table->stats.removeFrees);
fprintf(fp, " removes while enumerating: %u\n", table->stats.removeEnums);
fprintf(fp, " number of grows: %u\n", table->stats.grows);
fprintf(fp, " number of shrinks: %u\n", table->stats.shrinks);
fprintf(fp, " number of compresses: %u\n", table->stats.compresses);
fprintf(fp, "number of enumerate shrinks: %u\n", table->stats.enumShrinks);
if (maxChainLen && hash2) {
if (dump && maxChainLen && hash2) {
fputs("Maximum hash chain:\n", fp);
hash1 = maxChainHash1;
hash2 = maxChainHash2;

View File

@ -42,6 +42,10 @@
PR_BEGIN_EXTERN_C
#ifdef DEBUG_brendan
#define PL_DHASHMETER 1
#endif
/* Minimum table size, or gross entry count (net is at most .75 loaded). */
#ifndef PL_DHASH_MIN_SIZE
#define PL_DHASH_MIN_SIZE 16
@ -180,10 +184,12 @@ struct PLDHashTable {
PRUint32 misses; /* searches that didn't find key */
PRUint32 lookups; /* number of PL_DHASH_LOOKUPs */
PRUint32 addMisses; /* adds that miss, and do work */
PRUint32 addOverRemoved; /* adds that recycled a removed entry */
PRUint32 addHits; /* adds that hit an existing entry */
PRUint32 addFailures; /* out-of-memory during add growth */
PRUint32 removeHits; /* removes that hit, and do work */
PRUint32 removeMisses; /* useless removes that miss */
PRUint32 removeFrees; /* removes that freed entry directly */
PRUint32 removeEnums; /* removes done by Enumerate */
PRUint32 grows; /* table expansions */
PRUint32 shrinks; /* table contractions */