Bug 675545 - Completely re-do jsarena.{cpp,h}. r=cdleary.

This commit is contained in:
Nicholas Nethercote 2011-08-28 23:04:45 -07:00
parent de84e18e46
commit 84daccb5d0
2 changed files with 77 additions and 137 deletions

View File

@ -53,121 +53,73 @@
using namespace js;
#define JS_ARENA_DEFAULT_ALIGN sizeof(double)
/* If JSArena's length is a multiple of 8, that ensures its payload is 8-aligned. */
JS_STATIC_ASSERT(sizeof(JSArena) % 8 == 0);
JS_PUBLIC_API(void)
JS_InitArenaPool(JSArenaPool *pool, const char *name, size_t size,
size_t align)
JS_InitArenaPool(JSArenaPool *pool, const char *name, size_t size, size_t align)
{
if (align == 0)
align = JS_ARENA_DEFAULT_ALIGN;
pool->mask = JS_BITMASK(JS_CeilingLog2(align));
/* Restricting ourselves to some simple alignments keeps things simple. */
if (align == 1 || align == 2 || align == 4 || align == 8) {
pool->mask = align - 1;
} else {
/* This shouldn't happen, but set pool->mask reasonably if it does. */
JS_NOT_REACHED("JS_InitArenaPool: bad align");
pool->mask = 7;
}
pool->first.next = NULL;
/* pool->first is a zero-sized dummy arena that's never allocated from. */
pool->first.base = pool->first.avail = pool->first.limit =
JS_ARENA_ALIGN(pool, &pool->first + 1);
pool->current = &pool->first;
pool->arenasize = size;
}
/*
* An allocation that consumes more than pool->arenasize also has a header
* pointing back to its previous arena's next member. This header is not
* included in [a->base, a->limit), so its space can't be wrongly claimed.
*
* As the header is a pointer, it must be well-aligned. If pool->mask is
* greater than or equal to POINTER_MASK, the header just preceding a->base
* for an oversized arena a is well-aligned, because a->base is well-aligned.
* However, we may need to add more space to pad the JSArena ** back-pointer
* so that it lies just behind a->base, because a might not be aligned such
* that (jsuword)(a + 1) is on a pointer boundary.
*
* By how much must we pad? Let M be the alignment modulus for pool and P
* the modulus for a pointer. Given M >= P, the base of an oversized arena
* that satisfies M is well-aligned for P.
*
* On the other hand, if M < P, we must include enough space in the header
* size to align the back-pointer on a P boundary so that it can be found by
* subtracting P from a->base. This means a->base must be on a P boundary,
* even though subsequent allocations from a may be aligned on a lesser (M)
* boundary. Given powers of two M and P as above, the extra space needed
* when M < P is P-M or POINTER_MASK - pool->mask.
*
* The size of a header including padding is given by the HEADER_SIZE macro,
* below, for any pool (for any value of M).
*
* The mask to align a->base for any pool is (pool->mask | POINTER_MASK), or
* HEADER_BASE_MASK(pool).
*
* PTR_TO_HEADER computes the address of the back-pointer, given an oversized
* allocation at p. By definition, p must be a->base for the arena a that
* contains p. GET_HEADER and SET_HEADER operate on an oversized arena a, in
* the case of SET_HEADER with back-pointer ap.
*/
#define POINTER_MASK ((jsuword)(JS_ALIGN_OF_POINTER - 1))
#define HEADER_SIZE(pool) (sizeof(JSArena **) \
+ (((pool)->mask < POINTER_MASK) \
? POINTER_MASK - (pool)->mask \
: 0))
#define HEADER_BASE_MASK(pool) ((pool)->mask | POINTER_MASK)
#define PTR_TO_HEADER(pool,p) (JS_ASSERT(((jsuword)(p) \
& HEADER_BASE_MASK(pool)) \
== 0), \
(JSArena ***)(p) - 1)
#define GET_HEADER(pool,a) (*PTR_TO_HEADER(pool, (a)->base))
#define SET_HEADER(pool,a,ap) (*PTR_TO_HEADER(pool, (a)->base) = (ap))
JS_PUBLIC_API(void *)
JS_ArenaAllocate(JSArenaPool *pool, size_t nb)
{
JSArena **ap, *a, *b;
jsuword extra, hdrsz, gross;
void *p;
/*
* Search pool from current forward till we find or make enough space.
*
* NB: subtract nb from a->limit in the loop condition, instead of adding
* nb to a->avail, to avoid overflowing a 32-bit address space (possible
* when running a 32-bit program on a 64-bit system where the kernel maps
* the heap up against the top of the 32-bit address space).
*
* Thanks to Juergen Kreileder <jk@blackdown.de>, who brought this up in
* https://bugzilla.mozilla.org/show_bug.cgi?id=279273.
* nb to a->avail, to avoid overflow (possible when running a 32-bit
* program on a 64-bit system where the kernel maps the heap up against the
* top of the 32-bit address space, see bug 279273). Note that this
* necessitates a comparison between nb and a->limit that looks like a
* (conceptual) type error but isn't.
*/
JS_ASSERT((nb & pool->mask) == 0);
for (a = pool->current; nb > a->limit || a->avail > a->limit - nb;
pool->current = a) {
ap = &a->next;
JSArena *a;
/*
* Comparing nb to a->limit looks like a (conceptual) type error, but it's
* necessary to avoid wrap-around. Yuk.
*/
for (a = pool->current; nb > a->limit || a->avail > a->limit - nb; pool->current = a) {
JSArena **ap = &a->next;
if (!*ap) {
/* Not enough space in pool, so we must malloc. */
extra = (nb > pool->arenasize) ? HEADER_SIZE(pool) : 0;
hdrsz = sizeof *a + extra + pool->mask;
gross = hdrsz + JS_MAX(nb, pool->arenasize);
if (gross < nb)
return NULL;
b = (JSArena *) OffTheBooks::malloc_(gross);
if (!b)
size_t gross = sizeof(JSArena) + JS_MAX(nb, pool->arenasize);
a = (JSArena *) OffTheBooks::malloc_(gross);
if (!a)
return NULL;
b->next = NULL;
b->limit = (jsuword)b + gross;
a->next = NULL;
a->base = a->avail = jsuword(a) + sizeof(JSArena);
/*
* Because malloc returns 8-aligned pointers and sizeof(JSArena) is
* a multiple of 8, a->base will always be 8-aligned, which should
* suffice for any valid pool.
*/
JS_ASSERT(a->base == JS_ARENA_ALIGN(pool, a->base));
a->limit = (jsuword)a + gross;
/* If oversized, store ap in the header, just before a->base. */
*ap = a = b;
JS_ASSERT(gross <= JS_UPTRDIFF(a->limit, a));
if (extra) {
a->base = a->avail =
((jsuword)a + hdrsz) & ~HEADER_BASE_MASK(pool);
SET_HEADER(pool, a, ap);
} else {
a->base = a->avail = JS_ARENA_ALIGN(pool, a + 1);
}
*ap = a;
continue;
}
a = *ap; /* move to next arena */
a = *ap; /* move to next arena */
}
p = (void *)a->avail;
void* p = (void *)a->avail;
a->avail += nb;
JS_ASSERT(a->base <= a->avail && a->avail <= a->limit);
return p;
@ -176,59 +128,43 @@ JS_ArenaAllocate(JSArenaPool *pool, size_t nb)
JS_PUBLIC_API(void *)
JS_ArenaRealloc(JSArenaPool *pool, void *p, size_t size, size_t incr)
{
JSArena **ap, *a, *b;
jsuword boff, aoff, extra, hdrsz, gross;
/* If we've called JS_ArenaRealloc, the new size must be bigger than pool->arenasize. */
JS_ASSERT(size + incr > pool->arenasize);
/*
* Use the oversized-single-allocation header to avoid searching for ap.
* See JS_ArenaAllocate, the SET_HEADER call.
*/
if (size > pool->arenasize) {
ap = *PTR_TO_HEADER(pool, p);
/* Find the arena containing |p|. */
JSArena *a;
JSArena **ap = &pool->first.next;
while (true) {
a = *ap;
} else {
ap = &pool->first.next;
while ((a = *ap) != pool->current)
ap = &a->next;
if (JS_IS_IN_ARENA(a, p))
break;
JS_ASSERT(a != pool->current);
ap = &a->next;
}
/* If we've called JS_ArenaRealloc, p must be at the start of an arena. */
JS_ASSERT(a->base == jsuword(p));
JS_ASSERT(a->base == (jsuword)p);
boff = JS_UPTRDIFF(a->base, a);
aoff = JS_ARENA_ALIGN(pool, size + incr);
JS_ASSERT(aoff > pool->arenasize);
extra = HEADER_SIZE(pool); /* oversized header holds ap */
hdrsz = sizeof *a + extra + pool->mask; /* header and alignment slop */
gross = hdrsz + aoff;
JS_ASSERT(gross > aoff);
size_t gross = sizeof(JSArena) + JS_ARENA_ALIGN(pool, size + incr);
a = (JSArena *) OffTheBooks::realloc_(a, gross);
if (!a)
return NULL;
a->base = jsuword(a) + sizeof(JSArena);
a->avail = a->limit = jsuword(a) + gross;
/*
* Because realloc returns 8-aligned pointers and sizeof(JSArena) is a
* multiple of 8, a->base will always be 8-aligned, which should suffice
* for any valid pool.
*/
JS_ASSERT(a->base == JS_ARENA_ALIGN(pool, a->base));
if (a != *ap) {
/* Oops, realloc moved the allocation: update other pointers to a. */
/* realloc moved the allocation: update other pointers to a. */
if (pool->current == *ap)
pool->current = a;
b = a->next;
if (b && b->avail - b->base > pool->arenasize) {
JS_ASSERT(GET_HEADER(pool, b) == &(*ap)->next);
SET_HEADER(pool, b, &a->next);
}
/* Now update *ap, the next link of the arena before a. */
*ap = a;
}
a->base = ((jsuword)a + hdrsz) & ~HEADER_BASE_MASK(pool);
a->limit = (jsuword)a + gross;
a->avail = a->base + aoff;
JS_ASSERT(a->base <= a->avail && a->avail <= a->limit);
/* Check whether realloc aligned differently, and copy if necessary. */
if (boff != JS_UPTRDIFF(a->base, a))
memmove((void *)a->base, (char *)a + boff, size);
/* Store ap in the oversized-load arena header. */
SET_HEADER(pool, a, ap);
return (void *)a->base;
}
@ -290,7 +226,7 @@ JS_ArenaRelease(JSArenaPool *pool, char *mark)
for (a = &pool->first; a; a = a->next) {
JS_ASSERT(a->base <= a->avail && a->avail <= a->limit);
if (JS_ARENA_MARK_MATCH(a, mark)) {
if (JS_IS_IN_ARENA(a, mark)) {
a->avail = JS_ARENA_ALIGN(pool, mark);
JS_ASSERT(a->avail <= a->limit);
FreeArenaList(pool, a);

View File

@ -82,13 +82,11 @@ struct JSArenaPool {
/*
* NB: In JS_ARENA_ALLOCATE_CAST and JS_ARENA_GROW_CAST, always subtract _nb
* from a->limit rather than adding _nb to _p, to avoid overflowing a 32-bit
* address space (possible when running a 32-bit program on a 64-bit system
* where the kernel maps the heap up against the top of the 32-bit address
* space).
*
* Thanks to Juergen Kreileder <jk@blackdown.de>, who brought this up in
* https://bugzilla.mozilla.org/show_bug.cgi?id=279273.
* from a->limit rather than adding _nb to _p, to avoid overflow (possible when
* running a 32-bit program on a 64-bit system where the kernel maps the heap
* up against the top of the 32-bit address space, see bug 279273). Note that
* this necessitates a comparison between nb and a->limit that looks like a
* (conceptual) type error but isn't.
*/
#define JS_ARENA_ALLOCATE_COMMON(p, type, pool, nb, guard) \
JS_BEGIN_MACRO \
@ -110,16 +108,21 @@ struct JSArenaPool {
JS_BEGIN_MACRO \
JSArena *_a = (pool)->current; \
if (_a->avail == (jsuword)(p) + JS_ARENA_ALIGN(pool, size)) { \
/* p was the last thing allocated in the current arena... */ \
size_t _nb = (size) + (incr); \
_nb = JS_ARENA_ALIGN(pool, _nb); \
if (_a->limit >= _nb && (jsuword)(p) <= _a->limit - _nb) { \
/* ... and we have space, so just extend p in-place */ \
_a->avail = (jsuword)(p) + _nb; \
} else if ((jsuword)(p) == _a->base) { \
/* ... p is also the 1st thing in this arena */ \
p = (type) JS_ArenaRealloc(pool, p, size, incr); \
} else { \
/* hard case */ \
p = (type) JS_ArenaGrow(pool, p, size, incr); \
} \
} else { \
/* hard case */ \
p = (type) JS_ArenaGrow(pool, p, size, incr); \
} \
STATIC_ASSUME(!p || ubound((char *)p) >= size + incr); \
@ -131,7 +134,7 @@ struct JSArenaPool {
/*
* Check if the mark is inside arena's allocated area.
*/
#define JS_ARENA_MARK_MATCH(a, mark) \
#define JS_IS_IN_ARENA(a, mark) \
(JS_UPTRDIFF(mark, (a)->base) <= JS_UPTRDIFF((a)->avail, (a)->base))
#ifdef DEBUG
@ -149,7 +152,7 @@ struct JSArenaPool {
JS_BEGIN_MACRO \
char *_m = (char *)(mark); \
JSArena *_a = (pool)->current; \
if (_a != &(pool)->first && JS_ARENA_MARK_MATCH(_a, _m)) { \
if (_a != &(pool)->first && JS_IS_IN_ARENA(_a, _m)) { \
_a->avail = (jsuword)JS_ARENA_ALIGN(pool, _m); \
JS_ASSERT(_a->avail <= _a->limit); \
JS_CLEAR_UNUSED(_a); \
@ -169,7 +172,8 @@ struct JSArenaPool {
JS_END_MACRO
/*
* Initialize an arena pool with a minimum size per arena of size bytes.
* Initialize an arena pool with a minimum size per arena of |size| bytes.
* |align| must be 1, 2, 4 or 8.
*/
extern JS_PUBLIC_API(void)
JS_InitArenaPool(JSArenaPool *pool, const char *name, size_t size,