Backed out changeset b866396faae4

This commit is contained in:
Luke Wagner 2009-09-10 16:44:01 -07:00
parent 3497f76ae5
commit a6c9498948
8 changed files with 68 additions and 1056 deletions

View File

@ -101,7 +101,6 @@
#include "jsstr.h"
#include "jsstaticcheck.h"
#include "jsvector.h"
#include "jshashmap.h"
#include "jsatominlines.h"
@ -1468,10 +1467,17 @@ array_toSource(JSContext *cx, uintN argc, jsval *vp)
}
#endif
static JSHashNumber
js_hash_array(const void *key)
{
return (JSHashNumber)JS_PTR_TO_UINT32(key) >> JSVAL_TAGBITS;
}
bool
js_InitContextBusyArrayTable(JSContext *cx)
{
cx->busyArrayTable = cx->create<JSBusyArrayTable>(cx);
cx->busyArrayTable = JS_NewHashTable(4, js_hash_array, JS_CompareValues,
JS_CompareValues, NULL, NULL);
return cx->busyArrayTable != NULL;
}
@ -1485,16 +1491,22 @@ array_toString_sub(JSContext *cx, JSObject *obj, JSBool locale,
* This hash table is shared between toString invocations and must be empty
* after the root invocation completes.
*/
JSBusyArrayTable &busy = *cx->busyArrayTable;
JSHashTable *table = cx->busyArrayTable;
/*
* Use HashTable entry as the cycle indicator. On first visit, create the
* entry, and, when leaving, remove the entry.
*/
JSBusyArrayTable::Pointer entryPtr = busy.lookup(obj);
if (!entryPtr) {
if (!busy.addAfterMiss(obj, false, entryPtr))
JSHashNumber hash = js_hash_array(obj);
JSHashEntry **hep = JS_HashTableRawLookup(table, hash, obj);
JSHashEntry *he = *hep;
if (!he) {
/* Not in hash table, so not a cycle. */
he = JS_HashTableRawAdd(table, hep, hash, obj, NULL);
if (!he) {
JS_ReportOutOfMemory(cx);
return false;
}
} else {
/* Cycle, so return empty string. */
*rval = ATOM_KEY(cx->runtime->atomState.emptyAtom);
@ -1569,10 +1581,10 @@ array_toString_sub(JSContext *cx, JSObject *obj, JSBool locale,
out:
/*
* It is possible that 'entryPtr' may have been invalidated by subsequent
* table operations, so use the slower non-Pointer version.
* It is possible that 'hep' may have been invalidated by subsequent
* RawAdd/Remove. Hence, 'RawRemove' must not be used.
*/
busy.remove(obj);
JS_HashTableRemove(table, obj);
return ok;
}

View File

@ -69,7 +69,6 @@
#include "jsstaticcheck.h"
#include "jsstr.h"
#include "jstracer.h"
#include "jshashmap.h"
static void
FreeContext(JSContext *cx);
@ -776,7 +775,7 @@ FreeContext(JSContext *cx)
/* Destroy the busy array table. */
if (cx->busyArrayTable) {
cx->destroy(cx->busyArrayTable);
JS_HashTableDestroy(cx->busyArrayTable);
cx->busyArrayTable = NULL;
}

View File

@ -1008,7 +1008,7 @@ struct JSContext {
/* State for object and array toSource conversion. */
JSSharpObjectMap sharpObjectMap;
JSBusyArrayTable *busyArrayTable;
JSHashTable *busyArrayTable;
/* Argument formatter support for JS_{Convert,Push}Arguments{,VA}. */
JSArgumentFormatMap *argumentFormatMap;

View File

@ -1,603 +0,0 @@
/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*-
* vim: set ts=8 sw=4 et tw=99 ft=cpp:
*
* ***** BEGIN LICENSE BLOCK *****
* Version: MPL 1.1/GPL 2.0/LGPL 2.1
*
* The contents of this file are subject to the Mozilla Public License Version
* 1.1 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* http://www.mozilla.org/MPL/
*
* Software distributed under the License is distributed on an "AS IS" basis,
* WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
* for the specific language governing rights and limitations under the
* License.
*
* The Original Code is Mozilla SpiderMonkey JavaScript 1.9 code, released
* July 17, 2009.
*
* The Initial Developer of the Original Code is
* the Mozilla Corporation.
*
* Contributor(s):
* Luke Wagner <lw@mozilla.com>
*
* Alternatively, the contents of this file may be used under the terms of
* either of the GNU General Public License Version 2 or later (the "GPL"),
* or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
* in which case the provisions of the GPL or the LGPL are applicable instead
* of those above. If you wish to allow use of your version of this file only
* under the terms of either the GPL or the LGPL, and not to allow others to
* use your version of this file under the terms of the MPL, indicate your
* decision by deleting the provisions above and replace them with the notice
* and other provisions required by the GPL or the LGPL. If you do not delete
* the provisions above, a recipient may use your version of this file under
* the terms of any one of the MPL, the GPL or the LGPL.
*
* ***** END LICENSE BLOCK ***** */
#ifndef jshashmap_h_
#define jshashmap_h_
#include "jspool.h"
#include "jsbit.h"
namespace js {
/* Default hashing policies. */
template <class Key> struct DefaultHasher {
static uint32 hash(const Key &key) {
/* hash if can implicitly cast to hash number type. */
return key;
}
};
template <class T> struct DefaultHasher<T *> {
static uint32 hash(const T *key) {
/* hash pointers like JS_DHashVoidPtrKeyStub. */
return (uint32)(unsigned long)key >> 2;
}
};
/*
* JS-friendly, STL-like container providing a hash table with parameterized
* error handling. HashMap calls the constructors/destructors of all key/value
* objects added, so non-PODs may be used safely.
*
* Key/Value requirements:
* - default constructible, destructible, assignable, copy constructible
* - operations do not throw
* Hasher requirements:
* - static member function: static HashNumber hash(const Key &);
* N:
* - specifies the number of elements to store in-line before the first
* dynamic allocation. (default 0)
* AllocPolicy:
* - see "Allocation policies" in jstl.h (default ContextAllocPolicy)
*
* N.B: HashMap is not reentrant: T member functions called during HashMap
* member functions must not call back into the same object.
*/
template <class Key, class Value, class Hasher,
size_t N, class AllocPolicy>
class HashMap
{
public:
/* public element interface */
struct Element
{
const Key key;
Value value;
};
private:
/* utilities */
void initTable();
void destroyAll();
typedef uint32 HashNumber;
struct HashEntry : Element
{
HashEntry *next;
HashNumber keyHash;
};
struct InternalRange;
InternalRange internalAll() const;
/* share the AllocPolicy object stored in the Pool */
void *malloc(size_t bytes) { return entryPool.allocPolicy().malloc(bytes); }
void free(void *p) { entryPool.allocPolicy().free(p); }
void reportAllocOverflow() { entryPool.allocPolicy().reportAllocOverflow(); }
/* magic constants */
static const size_t sHashBits = 32;
/* compute constants */
/*
* Since the goal is to not resize for the first N entries, increase the
* table size according so that 'overloaded(N-1)' is false.
*/
static const size_t sInlineCount =
tl::Max<2, tl::RoundUpPow2<(8 * N) / 7>::result>::result;
static const size_t sInlineTableShift =
sHashBits - tl::FloorLog2<sInlineCount>::result;
static const size_t sTableElemBytes =
sizeof(HashEntry *);
static const size_t sInlineBytes =
sInlineCount * sTableElemBytes;
static const uint32 sGoldenRatio = 0x9E3779B9U; /* taken from jshash.h */
static const size_t sMinHashShrinkLimit =
tl::Max<64, sInlineCount>::result;
/* member data */
/* number of elements that have been added to the hash table */
size_t numElem;
/* size of table = size_t(1) << (sHashBits - shift) */
uint32 shift;
/* inline storage for the first |sInlineCount| buckets */
char tableBuf[sInlineBytes];
/* pointer to array of buckets */
HashEntry **table;
/* pool of hash table elements */
Pool<HashEntry, N, AllocPolicy> entryPool;
#ifdef DEBUG
friend class ReentrancyGuard;
bool entered;
#endif
/* more utilities */
uint32 tableCapacity() const {
JS_ASSERT(shift > 0 && shift <= sHashBits);
return uint32(1) << (sHashBits - shift);
}
bool overloaded(size_t tableCapacity) const {
/* lifted from OVERLOADED macro in jshash.cpp */
return numElem >= (tableCapacity - (tableCapacity >> 3));
}
bool underloaded(size_t tableCapacity) const {
/* lifted from UNDERLOADED macro in jshash.cpp */
return numElem < (tableCapacity >> 2) &&
tableCapacity > sMinHashShrinkLimit;
}
HashEntry **hashToBucket(HashNumber hn) const {
JS_ASSERT(shift > 0 && shift < sHashBits);
return table + ((hn * sGoldenRatio) >> shift);
}
public:
/* construction / destruction */
HashMap(AllocPolicy = AllocPolicy());
~HashMap();
HashMap(const HashMap &);
HashMap &operator=(const HashMap &);
/*
* Type representing a pointer either to an element (if !null()) or a
* pointer to where an element might be inserted (if null()). Pointers
* become invalid after every HashMap operation (even lookup).
*/
class Pointer {
typedef void (Pointer::* ConvertibleToBool)();
friend class HashMap;
void nonNull() {}
Pointer(HashEntry **e, HashNumber h) : hepp(e), keyHash(h) {}
HashEntry **hepp;
HashNumber keyHash;
public:
typedef Element ElementType;
bool operator==(const Pointer &rhs) const { return *hepp == *rhs.hepp; }
bool operator!=(const Pointer &rhs) const { return *hepp != *rhs.hepp; }
bool null() const { return !*hepp; }
operator ConvertibleToBool();
/* dereference; assumes non-null */
ElementType &operator*() const { return **hepp; }
ElementType *operator->() const { return *hepp; }
};
/* Type representing a range of elements in the hash table. */
class Range {
friend class HashMap;
Range(HashEntry *hep, HashEntry **te, HashEntry **end)
: hep(hep), tableEntry(te), tableEnd(end) {}
HashEntry *hep, **tableEntry, **tableEnd;
public:
typedef Element ElementType;
/* !empty() is a precondition for calling front() and popFront(). */
bool empty() const { return tableEntry == tableEnd; }
ElementType &front() const { return *hep; }
void popFront();
};
/* enumeration: all() returns a Range containing count() elements. */
Range all() const;
bool empty() const { return numElem == 0; }
size_t count() const { return numElem; }
/*
* lookup: query whether there is a (key,value) pair with the given key in
* the table. The 'keyHash' overload allows the user to use a pre-computed
* hash for the given key. If |p = lookup(k)| and |p.null()|, then
* |addAfterMiss(k,v,p)| may be called to efficiently add (k,v).
*/
Pointer lookup(const Key &k) const;
Pointer lookup(const Key &k, HashNumber keyHash) const;
/*
* put: put the given (key,value) pair to the table, returning 'true' if
* the operation succeeded. When the given key is already present, the
* existing value will be overwritten and the call will succeed. The
* 'keyHash' overload allows the user to supply a pre-computed hash for the
* given key. The 'addAfterMiss' overload preserves 'ptr' so that, if the
* call succeeds, 'ptr' points to the newly-added (key,value) pair.
*/
bool put(const Key &k, const Value &v);
bool put(const Key &k, const Value &v, HashNumber keyHash);
bool addAfterMiss(const Key &k, const Value &v, Pointer &ptr);
/*
* lookup and put: analogous to the std::map operation. Lookup the given
* key and, if found, return a pointer to the value. Otherwise, construct a
* new (key, value) pair with the given key and default-constructed value
* and return a pointer to the value. Return NULL on failure.
*/
Value *findOrAdd(const Key &k);
/*
* remove: remove a (key,value) pair with the given key, if it exists. The
* 'keyHash' overload allows the user to supply a pre-computed hash for the
* given key.
*/
void remove(const Key &k);
void remove(const Key &k, HashNumber);
void remove(Pointer);
/*
* Remove all elements, optionally freeing the underlying memory cache.
*
* N.B. for PODs, freeing the underlying cache is more efficient (O(1))
* since the underlying pool can be freed as a whole instead of freeing
* each individual element's allocation (O(n)).
*/
void clear(bool freeCache = true);
};
/* Implementation */
template <class K, class V, class H, size_t N, class AP>
inline void
HashMap<K,V,H,N,AP>::initTable()
{
shift = sInlineTableShift;
table = reinterpret_cast<HashEntry **>(tableBuf);
for (HashEntry **p = table, **end = table + sInlineCount; p != end; ++p)
*p = NULL;
}
template <class K, class V, class H, size_t N, class AP>
inline
HashMap<K,V,H,N,AP>::HashMap(AP ap)
: numElem(0),
entryPool(ap)
#ifdef DEBUG
, entered(false)
#endif
{
initTable();
}
template <class K, class V, class H, size_t N, class AP>
inline void
HashMap<K,V,H,N,AP>::destroyAll()
{
/* Non-PODs need explicit destruction. */
if (!tl::IsPodType<K>::result || !tl::IsPodType<V>::result)
entryPool.clear(internalAll());
else
entryPool.freeRawMemory();
/* Free memory if memory was allocated. */
if ((void *)table != (void *)tableBuf)
this->free(table);
}
template <class K, class V, class H, size_t N, class AP>
inline
HashMap<K,V,H,N,AP>::~HashMap()
{
ReentrancyGuard g(*this);
destroyAll();
}
template <class K, class V, class H, size_t N, class AP>
inline
HashMap<K,V,H,N,AP>::Pointer::operator ConvertibleToBool()
{
return null() ? NULL : &Pointer::nonNull;
}
template <class K, class V, class H, size_t N, class AP>
inline typename HashMap<K,V,H,N,AP>::InternalRange
HashMap<K,V,H,N,AP>::internalAll() const
{
HashEntry **p = table, **end = table + tableCapacity();
while (p != end && !*p)
++p;
/* For a clean crash on misuse, set hepp to NULL for empty ranges. */
return Range(p == end ? NULL : *p, p, end);
}
template <class K, class V, class H, size_t N, class AP>
inline typename HashMap<K,V,H,N,AP>::Range
HashMap<K,V,H,N,AP>::all() const
{
JS_ASSERT(!entered);
return internalAll();
}
template <class K, class V, class H, size_t N, class AP>
inline void
HashMap<K,V,H,N,AP>::Range::popFront()
{
if ((hep = hep->next))
return;
++tableEntry;
for (; tableEntry != tableEnd; ++tableEntry) {
if ((hep = *tableEntry))
return;
}
hep = NULL;
}
template <class K, class V, class H, size_t N, class AP>
inline typename HashMap<K,V,H,N,AP>::Pointer
HashMap<K,V,H,N,AP>::lookup(const K &k, HashNumber hash) const
{
JS_ASSERT(!entered);
HashEntry **bucket = hashToBucket(hash), **p = bucket;
/* search bucket for match */
for (HashEntry *e = *p; e; p = &e->next, e = *p) {
if (e->keyHash == hash && k == e->key) {
/* Move to front of chain */
*p = e->next;
e->next = *bucket;
*bucket = e;
return Pointer(bucket, hash);
}
}
/* miss, return insertion point */
return Pointer(p, hash);
}
template <class K, class V, class H, size_t N, class AP>
inline typename HashMap<K,V,H,N,AP>::Pointer
HashMap<K,V,H,N,AP>::lookup(const K &k) const
{
return lookup(k, H::hash(k));
}
template <class K, class V, class H, size_t N, class AP>
inline bool
HashMap<K,V,H,N,AP>::addAfterMiss(const K &k, const V &v, Pointer &ptr)
{
ReentrancyGuard g(*this);
JS_ASSERT(ptr.null());
/* Resize on overflow. */
uint32 cap = tableCapacity();
if (overloaded(cap)) {
/* Compute bytes to allocate, checking for overflow. */
if (shift <= 1 + tl::CeilingLog2<sTableElemBytes>::result) {
this->reportAllocOverflow();
return false;
}
size_t bytes = cap * 2 * sTableElemBytes;
/* Allocate/clear new table. */
HashEntry **newTable = (HashEntry **)this->malloc(bytes);
if (!newTable)
return false;
memset(newTable, NULL, bytes);
/* Swap in new table before calling hashToBucket. */
HashEntry **oldTable = table;
table = newTable;
--shift;
JS_ASSERT(shift > 0);
/* Insert old table into new. */
for (HashEntry **p = oldTable, **end = oldTable + cap; p != end; ++p) {
for (HashEntry *e = *p, *next; e; e = next) {
next = e->next;
HashEntry **dstBucket = hashToBucket(e->keyHash);
e->next = *dstBucket;
*dstBucket = e;
}
}
/* Free old table. */
if ((void *)oldTable != (void *)tableBuf) {
/* Table was dynamically allocated, so free. */
this->free(oldTable);
} else {
/* Table was inline buffer, so recycle buffer into pool. */
size_t usable = sInlineBytes - sInlineBytes % sizeof(HashEntry);
entryPool.lendUnusedMemory(tableBuf, tableBuf + usable);
}
/* Maintain 'ptr'. */
ptr.hepp = hashToBucket(ptr.keyHash);
}
/* Allocate and insert new hash entry. */
HashEntry *alloc = entryPool.create();
if (!alloc)
return false;
const_cast<K &>(alloc->key) = k;
alloc->value = v;
alloc->keyHash = ptr.keyHash;
alloc->next = *ptr.hepp; /* Could be nonnull after table realloc. */
*ptr.hepp = alloc;
++numElem;
return true;
}
template <class K, class V, class H, size_t N, class AP>
inline bool
HashMap<K,V,H,N,AP>::put(const K &k, const V &v, HashNumber hn)
{
JS_ASSERT(!entered);
Pointer p = lookup(k, hn);
if (p.null())
return addAfterMiss(k, v, p);
p->value = v;
return true;
}
template <class K, class V, class H, size_t N, class AP>
inline bool
HashMap<K,V,H,N,AP>::put(const K &k, const V &v)
{
return put(k, v, H::hash(k));
}
template <class K, class V, class H, size_t N, class AP>
inline V *
HashMap<K,V,H,N,AP>::findOrAdd(const K &k)
{
JS_ASSERT(!entered);
Pointer p = lookup(k);
if (p.null() && !addAfterMiss(k, V(), p))
return NULL;
JS_ASSERT(!p.null());
return &p->value;
}
/*
* The static return type of r.front() is Element, even though we know that the
* elements are actually HashEntry objects. This class makes the cast.
*/
template <class K, class V, class H, size_t N, class AP>
struct HashMap<K,V,H,N,AP>::InternalRange : Range {
InternalRange(const Range &r) : Range(r) {}
HashEntry &front() { return static_cast<HashEntry &>(Range::front()); }
};
template <class K, class V, class H, size_t N, class AP>
inline void
HashMap<K,V,H,N,AP>::remove(Pointer p)
{
ReentrancyGuard g(*this);
/* Remove entry */
HashEntry *e = *p.hepp;
*p.hepp = e->next;
--numElem;
entryPool.destroy(e);
size_t cap = tableCapacity();
if (underloaded(cap)) {
/* This function is infalliable; so leave HashMap unmodified on fail. */
JS_ASSERT((void *)table != (void *)tableBuf);
/* Allocate new table or go back to inline buffer. */
size_t newCap = cap >> 1;
size_t tableBytes;
HashEntry **newTable;
if (newCap <= sInlineCount) {
newCap = sInlineCount;
tableBytes = sInlineBytes;
newTable = (HashEntry **)tableBuf;
} else {
tableBytes = newCap * sTableElemBytes;
newTable = (HashEntry **)this->malloc(tableBytes);
if (!newTable)
return;
}
/* Consolidate elements into new contiguous pool buffer. */
InternalRange r = internalAll();
HashEntry *array = static_cast<HashEntry *>(
entryPool.consolidate(r, numElem));
if (!array) {
if ((void *)newTable != (void *)tableBuf)
this->free(newTable);
return;
}
/* Not going to fail so update state now, before calling hashToBucket. */
this->free(table);
table = newTable;
++shift;
memset(newTable, NULL, tableBytes);
/* Fill the new table. */
for (HashEntry *p = array, *end = array + numElem; p != end; ++p) {
HashEntry **dstBucket = hashToBucket(p->keyHash);
p->next = *dstBucket;
*dstBucket = p;
}
}
}
template <class K, class V, class H, size_t N, class AP>
inline void
HashMap<K,V,H,N,AP>::remove(const K &k, HashNumber hash)
{
JS_ASSERT(!entered);
if (Pointer p = lookup(k, hash))
remove(p);
}
template <class K, class V, class H, size_t N, class AP>
inline void
HashMap<K,V,H,N,AP>::remove(const K &k)
{
return remove(k, H::hash(k));
}
template <class K, class V, class H, size_t N, class AP>
inline void
HashMap<K,V,H,N,AP>::clear(bool freeCache)
{
ReentrancyGuard g(*this);
destroyAll();
numElem = 0;
initTable();
}
} /* namespace js */
#endif

View File

@ -1,378 +0,0 @@
/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*-
* vim: set ts=8 sw=4 et tw=99 ft=cpp:
*
* ***** BEGIN LICENSE BLOCK *****
* Version: MPL 1.1/GPL 2.0/LGPL 2.1
*
* The contents of this file are subject to the Mozilla Public License Version
* 1.1 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* http://www.mozilla.org/MPL/
*
* Software distributed under the License is distributed on an "AS IS" basis,
* WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
* for the specific language governing rights and limitations under the
* License.
*
* The Original Code is Mozilla SpiderMonkey JavaScript 1.9 code, released
* July 16, 2009.
*
* The Initial Developer of the Original Code is
* the Mozilla Corporation.
*
* Contributor(s):
* Luke Wagner <lw@mozilla.com>
*
* Alternatively, the contents of this file may be used under the terms of
* either of the GNU General Public License Version 2 or later (the "GPL"),
* or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
* in which case the provisions of the GPL or the LGPL are applicable instead
* of those above. If you wish to allow use of your version of this file only
* under the terms of either the GPL or the LGPL, and not to allow others to
* use your version of this file under the terms of the MPL, indicate your
* decision by deleting the provisions above and replace them with the notice
* and other provisions required by the GPL or the LGPL. If you do not delete
* the provisions above, a recipient may use your version of this file under
* the terms of any one of the MPL, the GPL or the LGPL.
*
* ***** END LICENSE BLOCK ***** */
#ifndef jspool_h_
#define jspool_h_
#include "jstl.h"
#include <new>
namespace js {
/*
* A container providing a short-lived, dynamic pool allocation of homogeneous
* objects. Pool allocation does not have to be contiguous, hence it can avoid
* reallocation when it grows which provides stable pointers into the pool.
* Pointers are, however, invalidated by several members as described in their
* comments below. Pool calls the constructors and destructors of objects
* created and destroyed in the pool, so non-PODs may be used safely. However,
* for performance reasons, Pool assumes (and asserts) that all objects in the
* pool are destroyed before the Pool itself is destroyed. ('freeRawMemory' and
* 'clear' provide workarounds.)
*
* T requirements:
* - sizeof(T) >= sizeof(void *) (statically asserted)
* - default constructible, destructible
* - operations do not throw
* N:
* - specifies the number of elements to store in-line before the first
* dynamic allocation. (default 0)
* AllocPolicy:
* - see "Allocation policies" in jstl.h (default ContextAllocPolicy)
*
* N.B: Pool is not reentrant: T member functions called during Pool member
* functions must not call back into the same object.
*/
template <class T, size_t N, class AllocPolicy>
class Pool : AllocPolicy
{
typedef typename tl::StaticAssert<sizeof(T) >= sizeof(void *)>::result _;
/* Pool is non-copyable */
Pool(const Pool &);
void operator=(const Pool &);
/* utilities */
void addRangeToFreeList(char *begin, char *end);
bool addChunk();
void freeChunkChain();
/* magic constants */
static const size_t sGrowthFactor = 2;
static const size_t sInitialAllocElems = 32;
/* compute constants */
static const size_t sInlineBytes =
N * sizeof(T);
static const size_t sInitialAllocBytes =
tl::Max<2 * sInlineBytes, sInitialAllocElems * sizeof(T)>::result;
/* member data */
#ifdef DEBUG
friend class ReentrancyGuard;
bool entered;
size_t allocCount;
#endif
/*
* Place |buf| in a template class so that it may be removed if sInlineBytes
* is zero (C++ does not allow). Place the other members in the same class
* to avoid wasting space on empty-objects (apparently not optimized away).
*/
template <size_t NonZeroBytes, class>
struct MemberData
{
/* inline storage for first sInlineBytes / sizeof(T) objects */
char buf[sInlineBytes];
/* singly-linked list of allocated chunks of memory */
void *chunkHead;
/* number of bytes allocated in last chunk */
size_t lastAlloc;
/* LIFO singly-linked list of available T-sized uninitialized memory */
void *freeHead;
void init(Pool &p) {
chunkHead = NULL;
lastAlloc = 0;
freeHead = NULL;
p.addRangeToFreeList(buf, tl::ArrayEnd(buf));
}
};
MemberData<sInlineBytes, void> m;
public:
Pool(AllocPolicy = AllocPolicy());
~Pool();
AllocPolicy &allocPolicy() { return *this; }
/*
* Allocate and default-construct an object. Objects created in this way
* should be released by 'destroy'. Returns NULL on failure.
*/
T *create();
/* Call destructor and free associated memory of the given object. */
void destroy(T *);
/*
* This function adds memory that has not been allocated by the pool to the
* pool's free list. This memory will not be freed by the pool and must
* remain valid until a call to clear(), freeRawMemory(), consolidate(), or
* the Pool's destructor.
*/
void lendUnusedMemory(void *begin, void *end);
/*
* Assuming that the given Range contains all the elements in the pool,
* destroy all such elements and free all allocated memory.
*/
template <class Range>
void clear(Range r);
/*
* Assuming that all elements have been destroyed, or that T is a POD, free
* all allocated memory.
*/
void freeRawMemory();
/*
* Assuming that the given Range contains all the elements in the pool, and
* 'count' is the size of the range, copy the pool elements into a new
* buffer that is exactly big enough to hold them and free the old buffers.
* Clearly, this breaks pointer stability, so return a pointer to the new
* contiguous array of elements. On failure, returns NULL.
*/
template <class InputRange>
T *consolidate(InputRange i, size_t count);
};
/* Implementation */
/*
* When sInlineBytes is zero, remove |buf| member variable. The 'Unused'
* parameter does nothing and is only included because C++ has strange rules
* (14.7.3.17-18) regarding explicit specializations.
*/
template <class T, size_t N, class AP>
template <class Unused>
struct Pool<T,N,AP>::MemberData<0,Unused>
{
void *chunkHead;
size_t lastAlloc;
void *freeHead;
void init(Pool<T,N,AP> &) {
chunkHead = NULL;
lastAlloc = 0;
freeHead = NULL;
}
};
/*
* Divide the range of uninitialized memory [begin, end) into sizeof(T)
* pieces of free memory in the free list.
*/
template <class T, size_t N, class AP>
inline void
Pool<T,N,AP>::addRangeToFreeList(char *begin, char *end)
{
JS_ASSERT((end - begin) % sizeof(T) == 0);
void *oldHead = m.freeHead;
void **last = &m.freeHead;
for (char *p = begin; p != end; p += sizeof(T)) {
*last = p;
last = reinterpret_cast<void **>(p);
}
*last = oldHead;
}
template <class T, size_t N, class AP>
inline
Pool<T,N,AP>::Pool(AP ap)
: AP(ap)
#ifdef DEBUG
, entered(false), allocCount(0)
#endif
{
m.init(*this);
}
template <class T, size_t N, class AP>
inline void
Pool<T,N,AP>::freeChunkChain()
{
void *p = m.chunkHead;
while (p) {
void *next = *reinterpret_cast<void **>(p);
this->free(p);
p = next;
}
}
template <class T, size_t N, class AP>
inline
Pool<T,N,AP>::~Pool()
{
JS_ASSERT(allocCount == 0 && !entered);
freeChunkChain();
}
template <class T, size_t N, class AP>
inline bool
Pool<T,N,AP>::addChunk()
{
/* Check for overflow in multiplication and ptrdiff_t. */
if (m.lastAlloc & tl::MulOverflowMask<2 * sGrowthFactor>::result) {
this->reportAllocOverflow();
return false;
}
if (!m.lastAlloc)
m.lastAlloc = sInitialAllocBytes;
else
m.lastAlloc *= sGrowthFactor;
char *bytes = (char *)this->malloc(m.lastAlloc);
if (!bytes)
return false;
/*
* Add new chunk to the pool. To avoid alignment issues, start first free
* element at the next multiple of sizeof(T), not sizeof(void*).
*/
*reinterpret_cast<void **>(bytes) = m.chunkHead;
m.chunkHead = bytes;
addRangeToFreeList(bytes + sizeof(T), bytes + m.lastAlloc);
return true;
}
template <class T, size_t N, class AP>
inline T *
Pool<T,N,AP>::create()
{
ReentrancyGuard g(*this);
if (!m.freeHead && !addChunk())
return NULL;
void *objMem = m.freeHead;
m.freeHead = *reinterpret_cast<void **>(m.freeHead);
#ifdef DEBUG
++allocCount;
#endif
return new(objMem) T();
}
template <class T, size_t N, class AP>
inline void
Pool<T,N,AP>::destroy(T *p)
{
ReentrancyGuard g(*this);
JS_ASSERT(p && allocCount-- > 0);
p->~T();
*reinterpret_cast<void **>(p) = m.freeHead;
m.freeHead = p;
}
template <class T, size_t N, class AP>
inline void
Pool<T,N,AP>::lendUnusedMemory(void *vbegin, void *vend)
{
JS_ASSERT(!entered);
char *begin = (char *)vbegin, *end = (char *)vend;
size_t mod = (end - begin) % sizeof(T);
if (mod)
end -= mod;
addRangeToFreeList(begin, end);
}
template <class T, size_t N, class AP>
inline void
Pool<T,N,AP>::freeRawMemory()
{
JS_ASSERT(!entered);
#ifdef DEBUG
allocCount = 0;
#endif
freeChunkChain();
m.init(*this);
}
template <class T, size_t N, class AP>
template <class Range>
inline void
Pool<T,N,AP>::clear(Range r)
{
typedef typename Range::ElementType Elem;
for (; !r.empty(); r.popFront())
r.front().~Elem();
freeRawMemory();
}
template <class T, size_t N, class AP>
template <class Range>
inline T *
Pool<T,N,AP>::consolidate(Range r, size_t count)
{
/* Cannot overflow because already allocated. */
size_t size = (count + 1) * sizeof(T);
char *bytes = (char *)this->malloc(size);
if (!bytes)
return NULL;
/* Initialize new chunk with copies of old elements, destroy old. */
*reinterpret_cast<void **>(bytes) = NULL;
T *arrayBegin = reinterpret_cast<T *>(bytes);
++arrayBegin; /* skip 'next' pointer hidden in first element */
for (T *dst = arrayBegin; !r.empty(); r.popFront(), ++dst) {
JS_ASSERT(count-- > 0);
new(dst) T(r.front());
r.front().~T();
}
JS_ASSERT(count == 0);
freeChunkChain();
/* Update pool state. */
m.init(*this);
m.chunkHead = bytes;
m.lastAlloc = size;
return arrayBegin;
}
}
#endif /* jspool_h_ */

View File

@ -154,29 +154,11 @@ template <class T,
class AllocPolicy = ContextAllocPolicy>
class Vector;
template <class T,
size_t MinInlineCapacity = 0,
class AllocPolicy = ContextAllocPolicy>
class Pool;
template <class>
struct DefaultHasher;
template <class Key,
class Value = void,
class Hasher = DefaultHasher<Key>,
size_t MinInlineCapacity = 0,
class AllocPolicy = ContextAllocPolicy>
class HashMap;
} /* namespace js */
/* Common instantiations. */
typedef js::Vector<jschar, 32> JSCharBuffer;
typedef js::HashMap<JSObject *, bool, js::DefaultHasher<JSObject *>, 4,
js::ContextAllocPolicy> JSBusyArrayTable;
} /* export "C++" */
#endif /* __cplusplus */

View File

@ -172,7 +172,7 @@ class ReentrancyGuard
template <class T>
ReentrancyGuard(T &obj)
#ifdef DEBUG
: entered(obj.entered)
: entered(obj.mEntered)
#endif
{
#ifdef DEBUG
@ -235,16 +235,16 @@ PointerRangeSize(T *begin, T *end)
*/
class ContextAllocPolicy
{
JSContext *cx;
JSContext *mCx;
public:
ContextAllocPolicy(JSContext *cx) : cx(cx) {}
JSContext *context() const { return cx; }
ContextAllocPolicy(JSContext *cx) : mCx(cx) {}
JSContext *context() const { return mCx; }
void *malloc(size_t bytes) { return cx->malloc(bytes); }
void free(void *p) { cx->free(p); }
void *realloc(void *p, size_t bytes) { return cx->realloc(p, bytes); }
void reportAllocOverflow() const { js_ReportAllocationOverflow(cx); }
void *malloc(size_t bytes) { return mCx->malloc(bytes); }
void free(void *p) { mCx->free(p); }
void *realloc(void *p, size_t bytes) { return mCx->realloc(p, bytes); }
void reportAllocOverflow() const { js_ReportAllocationOverflow(mCx); }
};
/* Policy for using system memory functions and doing no error reporting. */

View File

@ -204,15 +204,15 @@ class Vector : AllocPolicy
* heapCapacity()) holds uninitialized memory.
*/
struct BufferPtrs {
T *begin, *end;
T *mBegin, *mEnd;
};
/*
* Since a vector either stores elements inline or in a heap-allocated
* buffer, reuse the storage. lengthOrCapacity serves as the union
* discriminator. In inline mode (when elements are stored in u.buf),
* lengthOrCapacity holds the vector's length. In heap mode (when elements
* are stored in [u.ptrs.begin, u.ptrs.end)), lengthOrCapacity holds the
* buffer, reuse the storage. mLengthOrCapacity serves as the union
* discriminator. In inline mode (when elements are stored in u.mBuf),
* mLengthOrCapacity holds the vector's length. In heap mode (when elements
* are stored in [u.ptrs.mBegin, u.ptrs.mEnd)), mLengthOrCapacity holds the
* vector's capacity.
*/
static const size_t sInlineCapacity =
@ -225,77 +225,77 @@ class Vector : AllocPolicy
/* member data */
size_t lengthOrCapacity;
bool usingInlineStorage() const { return lengthOrCapacity <= sInlineCapacity; }
size_t mLengthOrCapacity;
bool usingInlineStorage() const { return mLengthOrCapacity <= sInlineCapacity; }
union {
BufferPtrs ptrs;
char buf[sInlineBytes];
char mBuf[sInlineBytes];
} u;
/* Only valid when usingInlineStorage() */
size_t &inlineLength() {
JS_ASSERT(usingInlineStorage());
return lengthOrCapacity;
return mLengthOrCapacity;
}
size_t inlineLength() const {
JS_ASSERT(usingInlineStorage());
return lengthOrCapacity;
return mLengthOrCapacity;
}
T *inlineBegin() const {
JS_ASSERT(usingInlineStorage());
return (T *)u.buf;
return (T *)u.mBuf;
}
T *inlineEnd() const {
JS_ASSERT(usingInlineStorage());
return ((T *)u.buf) + lengthOrCapacity;
return ((T *)u.mBuf) + mLengthOrCapacity;
}
/* Only valid when !usingInlineStorage() */
size_t heapLength() const {
JS_ASSERT(!usingInlineStorage());
/* Guaranteed by calculateNewCapacity. */
JS_ASSERT(size_t(u.ptrs.end - u.ptrs.begin) ==
((size_t(u.ptrs.end) - size_t(u.ptrs.begin)) / sizeof(T)));
return u.ptrs.end - u.ptrs.begin;
JS_ASSERT(size_t(u.ptrs.mEnd - u.ptrs.mBegin) ==
((size_t(u.ptrs.mEnd) - size_t(u.ptrs.mBegin)) / sizeof(T)));
return u.ptrs.mEnd - u.ptrs.mBegin;
}
size_t &heapCapacity() {
JS_ASSERT(!usingInlineStorage());
return lengthOrCapacity;
return mLengthOrCapacity;
}
T *&heapBegin() {
JS_ASSERT(!usingInlineStorage());
return u.ptrs.begin;
return u.ptrs.mBegin;
}
T *&heapEnd() {
JS_ASSERT(!usingInlineStorage());
return u.ptrs.end;
return u.ptrs.mEnd;
}
size_t heapCapacity() const {
JS_ASSERT(!usingInlineStorage());
return lengthOrCapacity;
return mLengthOrCapacity;
}
T *heapBegin() const {
JS_ASSERT(!usingInlineStorage());
return u.ptrs.begin;
return u.ptrs.mBegin;
}
T *heapEnd() const {
JS_ASSERT(!usingInlineStorage());
return u.ptrs.end;
return u.ptrs.mEnd;
}
#ifdef DEBUG
friend class ReentrancyGuard;
bool entered;
bool mEntered;
#endif
Vector(const Vector &);
@ -320,42 +320,42 @@ class Vector : AllocPolicy
}
T *begin() {
JS_ASSERT(!entered);
JS_ASSERT(!mEntered);
return usingInlineStorage() ? inlineBegin() : heapBegin();
}
const T *begin() const {
JS_ASSERT(!entered);
JS_ASSERT(!mEntered);
return usingInlineStorage() ? inlineBegin() : heapBegin();
}
T *end() {
JS_ASSERT(!entered);
JS_ASSERT(!mEntered);
return usingInlineStorage() ? inlineEnd() : heapEnd();
}
const T *end() const {
JS_ASSERT(!entered);
JS_ASSERT(!mEntered);
return usingInlineStorage() ? inlineEnd() : heapEnd();
}
T &operator[](size_t i) {
JS_ASSERT(!entered && i < length());
JS_ASSERT(!mEntered && i < length());
return begin()[i];
}
const T &operator[](size_t i) const {
JS_ASSERT(!entered && i < length());
JS_ASSERT(!mEntered && i < length());
return begin()[i];
}
T &back() {
JS_ASSERT(!entered && !empty());
JS_ASSERT(!mEntered && !empty());
return *(end() - 1);
}
const T &back() const {
JS_ASSERT(!entered && !empty());
JS_ASSERT(!mEntered && !empty());
return *(end() - 1);
}
@ -424,9 +424,9 @@ js_AppendLiteral(Vector<T,N,AP> &v, const char (&array)[ArrayLength])
template <class T, size_t N, class AP>
inline
Vector<T,N,AP>::Vector(AP ap)
: AP(ap), lengthOrCapacity(0)
: AP(ap), mLengthOrCapacity(0)
#ifdef DEBUG
, entered(false)
, mEntered(false)
#endif
{}
@ -515,7 +515,7 @@ Vector<T,N,AP>::convertToHeapStorage(size_t lengthInc)
Impl::destroy(inlineBegin(), inlineEnd());
/* Switch in heap buffer. */
lengthOrCapacity = newCap; /* marks us as !usingInlineStorage() */
mLengthOrCapacity = newCap; /* marks us as !usingInlineStorage() */
heapBegin() = newBuf;
heapEnd() = newBuf + length;
return true;
@ -734,7 +734,7 @@ Vector<T,N,AP>::extractRawBuffer()
}
T *ret = heapBegin();
lengthOrCapacity = 0; /* marks us as !usingInlineStorage() */
mLengthOrCapacity = 0; /* marks us as !usingInlineStorage() */
return ret;
}
@ -756,15 +756,15 @@ Vector<T,N,AP>::replaceRawBuffer(T *p, size_t length)
/* Take in the new buffer. */
if (length <= sInlineCapacity) {
/*
* (lengthOrCapacity <= sInlineCapacity) means inline storage, so we
* (mLengthOrCapacity <= sInlineCapacity) means inline storage, so we
* MUST use inline storage, even though p might otherwise be acceptable.
*/
lengthOrCapacity = length; /* marks us as usingInlineStorage() */
mLengthOrCapacity = length; /* marks us as usingInlineStorage() */
Impl::copyConstruct(inlineBegin(), p, p + length);
Impl::destroy(p, p + length);
this->free(p);
} else {
lengthOrCapacity = length; /* marks us as !usingInlineStorage() */
mLengthOrCapacity = length; /* marks us as !usingInlineStorage() */
heapBegin() = p;
heapEnd() = heapBegin() + length;
}