Add macrology for presetting double hashtable capacity to avoid growth given a known initial population (356116, r=dbaron).

This commit is contained in:
brendan%mozilla.org 2007-01-10 22:11:34 +00:00
parent 2516c54c15
commit 2e4976944e
8 changed files with 72 additions and 17 deletions

View File

@ -240,8 +240,8 @@ JS_DHashTableInit(JSDHashTable *table, const JSDHashTableOps *ops, void *data,
if (capacity >= JS_DHASH_SIZE_LIMIT)
return JS_FALSE;
table->hashShift = JS_DHASH_BITS - log2;
table->maxAlphaFrac = 0xC0; /* .75 */
table->minAlphaFrac = 0x40; /* .25 */
table->maxAlphaFrac = (uint8)(0x100 * JS_DHASH_DEFAULT_MAX_ALPHA);
table->minAlphaFrac = (uint8)(0x100 * JS_DHASH_DEFAULT_MIN_ALPHA);
table->entrySize = entrySize;
table->entryCount = table->removedCount = 0;
table->generation = 0;

View File

@ -455,6 +455,30 @@ JS_DHashTableSetAlphaBounds(JSDHashTable *table,
((float)((table)->entrySize / sizeof(void *) - 1) \
/ ((table)->entrySize / sizeof(void *) + (k)))
/*
* Default max/min alpha, and macros to compute the value for the |capacity|
* parameter to JS_NewDHashTable and JS_DHashTableInit, given default or any
* max alpha, such that adding entryCount entries right after initializing the
* table will not require a reallocation (so JS_DHASH_ADD can't fail for those
* JS_DHashTableOperate calls).
*
* NB: JS_DHASH_CAP is a helper macro meant for use only in JS_DHASH_CAPACITY.
* Don't use it directly!
*/
#define JS_DHASH_DEFAULT_MAX_ALPHA 0.75
#define JS_DHASH_DEFAULT_MIN_ALPHA 0.25
#define JS_DHASH_CAP(entryCount, maxAlpha) \
((uint32)((double)(entryCount) / (maxAlpha)))
#define JS_DHASH_CAPACITY(entryCount, maxAlpha) \
(JS_DHASH_CAP(entryCount, maxAlpha) + \
(((JS_DHASH_CAP(entryCount, maxAlpha) * (uint8)(0x100 * (maxAlpha))) \
>> 8) < (entryCount)))
#define JS_DHASH_DEFAULT_CAPACITY(entryCount) \
JS_DHASH_CAPACITY(entryCount, JS_DHASH_DEFAULT_MAX_ALPHA)
/*
* Finalize table's data, free its entry storage using table->ops->freeTable,
* and leave its members unchanged from their last live values (which leaves

View File

@ -803,8 +803,8 @@ js_AddRootRT(JSRuntime *rt, void *rp, const char *name)
} while (rt->gcLevel > 0);
}
#endif
rhe = (JSGCRootHashEntry *) JS_DHashTableOperate(&rt->gcRootsHash, rp,
JS_DHASH_ADD);
rhe = (JSGCRootHashEntry *)
JS_DHashTableOperate(&rt->gcRootsHash, rp, JS_DHASH_ADD);
if (rhe) {
rhe->root = rp;
rhe->name = name;
@ -2977,10 +2977,6 @@ restart:
js_GCMarkSharpMap(cx, &acx->sharpObjectMap);
}
#ifdef DUMP_CALL_TABLE
js_DumpCallTable(cx);
#endif
/*
* Mark children of things that caused too deep recursion during above
* marking phase.
@ -3013,6 +3009,14 @@ restart:
/* Finalize iterator states before the objects they iterate over. */
CloseIteratorStates(cx);
#ifdef DUMP_CALL_TABLE
/*
* Call js_DumpCallTable here so it can meter and then clear weak refs to
* GC-things that are about to be finalized.
*/
js_DumpCallTable(cx);
#endif
/*
* Sweep phase.
*

View File

@ -2085,8 +2085,9 @@ FindPropertyValue(JSParseNode *pn, JSParseNode *pnid, FindPropValData *data)
data->numvars >= BIG_DESTRUCTURING &&
pn->pn_count >= BIG_OBJECT_INIT &&
JS_DHashTableInit(&data->table, &FindPropValOps, pn,
sizeof(FindPropValEntry), pn->pn_count)) {
sizeof(FindPropValEntry),
JS_DHASH_DEFAULT_CAPACITY(pn->pn_count)))
{
for (pn = pn->pn_head; pn; pn = pn->pn_next) {
ASSERT_VALID_PROPERTY_KEY(pn->pn_left);
entry = (FindPropValEntry *)

View File

@ -1474,7 +1474,8 @@ js_GetSrcNoteCached(JSContext *cx, JSScript *script, jsbytecode *pc)
++nsrcnotes;
}
if (!JS_DHashTableInit(&JS_GSN_CACHE(cx).table, JS_DHashGetStubOps(),
NULL, sizeof(GSNCacheEntry), nsrcnotes)) {
NULL, sizeof(GSNCacheEntry),
JS_DHASH_DEFAULT_CAPACITY(nsrcnotes))) {
JS_GSN_CACHE(cx).table.ops = NULL;
} else {
pc = script->code;

View File

@ -790,9 +790,10 @@ JS_XDRFindClassIdByName(JSXDRState *xdr, const char *name)
/* Bootstrap reghash from registry on first overpopulated Find. */
if (!xdr->reghash) {
xdr->reghash = JS_NewDHashTable(JS_DHashGetStubOps(), NULL,
xdr->reghash =
JS_NewDHashTable(JS_DHashGetStubOps(), NULL,
sizeof(JSRegHashEntry),
numclasses);
JS_DHASH_DEFAULT_CAPACITY(numclasses));
if (xdr->reghash) {
for (i = 0; i < numclasses; i++) {
JSClass *clasp = xdr->registry[i];

View File

@ -241,8 +241,8 @@ PL_DHashTableInit(PLDHashTable *table, const PLDHashTableOps *ops, void *data,
if (capacity >= PL_DHASH_SIZE_LIMIT)
return PR_FALSE;
table->hashShift = PL_DHASH_BITS - log2;
table->maxAlphaFrac = 0xC0; /* .75 */
table->minAlphaFrac = 0x40; /* .25 */
table->maxAlphaFrac = (uint8)(0x100 * PL_DHASH_DEFAULT_MAX_ALPHA);
table->minAlphaFrac = (uint8)(0x100 * PL_DHASH_DEFAULT_MIN_ALPHA);
table->entrySize = entrySize;
table->entryCount = table->removedCount = 0;
table->generation = 0;

View File

@ -456,6 +456,30 @@ PL_DHashTableSetAlphaBounds(PLDHashTable *table,
((float)((table)->entrySize / sizeof(void *) - 1) \
/ ((table)->entrySize / sizeof(void *) + (k)))
/*
* Default max/min alpha, and macros to compute the value for the |capacity|
* parameter to PL_NewDHashTable and PL_DHashTableInit, given default or any
* max alpha, such that adding entryCount entries right after initializing the
* table will not require a reallocation (so PL_DHASH_ADD can't fail for those
* PL_DHashTableOperate calls).
*
* NB: PL_DHASH_CAP is a helper macro meant for use only in PL_DHASH_CAPACITY.
* Don't use it directly!
*/
#define PL_DHASH_DEFAULT_MAX_ALPHA 0.75
#define PL_DHASH_DEFAULT_MIN_ALPHA 0.25
#define PL_DHASH_CAP(entryCount, maxAlpha) \
((PRUint32)((double)(entryCount) / (maxAlpha)))
#define PL_DHASH_CAPACITY(entryCount, maxAlpha) \
(PL_DHASH_CAP(entryCount, maxAlpha) + \
(((PL_DHASH_CAP(entryCount, maxAlpha) * (uint8)(0x100 * (maxAlpha))) \
>> 8) < (entryCount)))
#define PL_DHASH_DEFAULT_CAPACITY(entryCount) \
PL_DHASH_CAPACITY(entryCount, PL_DHASH_DEFAULT_MAX_ALPHA)
/*
* Finalize table's data, free its entry storage using table->ops->freeTable,
* and leave its members unchanged from their last live values (which leaves