Bug 1412234 - Make all allocator API entry points handle initialization properly. r=njn

Some need initialization to happen, some can be skipped when the
allocator was not initialized, and others should crash.

--HG--
extra : rebase_source : d6c2697ca27f6110fe52a067440a0583e0ed0ccd
This commit is contained in:
Mike Hommey 2017-10-27 17:25:18 +09:00
parent 5b0f50a0fc
commit 7fca98c0c3

View File

@ -1182,6 +1182,25 @@ _malloc_postfork_child(void);
// End forward declarations.
// ***************************************************************************
// FreeBSD's pthreads implementation calls malloc(3), so the malloc
// implementation has to take pains to avoid infinite recursion during
// initialization.
#if defined(XP_WIN)
#define malloc_init() true
#else
// Returns whether the allocator was successfully initialized.
static inline bool
malloc_init()
{
if (malloc_initialized == false) {
return malloc_init_hard();
}
return true;
}
#endif
static void
_malloc_message(const char* p)
{
@ -2161,8 +2180,10 @@ template<>
inline void
MozJemalloc::jemalloc_thread_local_arena(bool aEnabled)
{
if (malloc_init()) {
thread_local_arena(aEnabled);
}
}
// Choose an arena based on a per-thread value.
static inline arena_t*
@ -3318,7 +3339,9 @@ MozJemalloc::jemalloc_ptr_info(const void* aPtr, jemalloc_ptr_info_t* aInfo)
arena_chunk_t* chunk = GetChunkForPtr(aPtr);
// Is the pointer null, or within one chunk's size of null?
if (!chunk) {
// Alternatively, if the allocator is not initialized yet, the pointer
// can't be known.
if (!chunk || !malloc_initialized) {
*aInfo = { TagUnknown, nullptr, 0 };
return;
}
@ -4061,25 +4084,6 @@ huge_dalloc(void* aPtr)
base_node_dealloc(node);
}
// FreeBSD's pthreads implementation calls malloc(3), so the malloc
// implementation has to take pains to avoid infinite recursion during
// initialization.
#if defined(XP_WIN)
#define malloc_init() true
#else
// Returns whether the allocator was successfully initialized.
static inline bool
malloc_init()
{
if (malloc_initialized == false) {
return malloc_init_hard();
}
return true;
}
#endif
static size_t
GetKernelPageSize()
{
@ -4408,7 +4412,7 @@ BaseAllocator::realloc(void* aPtr, size_t aSize)
}
if (aPtr) {
MOZ_ASSERT(malloc_initialized);
MOZ_RELEASE_ASSERT(malloc_initialized);
ret = iralloc(aPtr, aSize, mArena);
@ -4438,8 +4442,10 @@ BaseAllocator::free(void* aPtr)
// A version of idalloc that checks for nullptr pointer.
offset = GetChunkOffsetForPtr(aPtr);
if (offset != 0) {
MOZ_RELEASE_ASSERT(malloc_initialized);
arena_dalloc(aPtr, offset);
} else if (aPtr) {
MOZ_RELEASE_ASSERT(malloc_initialized);
huge_dalloc(aPtr);
}
}
@ -4557,7 +4563,13 @@ MozJemalloc::jemalloc_stats(jemalloc_stats_t* aStats)
{
size_t non_arena_mapped, chunk_header_size;
MOZ_ASSERT(aStats);
if (!aStats) {
return;
}
if (!malloc_initialized) {
memset(aStats, 0, sizeof(*aStats));
return;
}
// Gather runtime settings.
aStats->opt_junk = opt_junk;
@ -4717,11 +4729,13 @@ template<>
inline void
MozJemalloc::jemalloc_purge_freed_pages()
{
if (malloc_initialized) {
MutexAutoLock lock(arenas_lock);
for (auto arena : gArenaTree.iter()) {
arena->HardPurge();
}
}
}
#else // !defined MALLOC_DOUBLE_PURGE
@ -4738,16 +4752,21 @@ template<>
inline void
MozJemalloc::jemalloc_free_dirty_pages(void)
{
if (malloc_initialized) {
MutexAutoLock lock(arenas_lock);
for (auto arena : gArenaTree.iter()) {
MutexAutoLock arena_lock(arena->mLock);
arena->Purge(true);
}
}
}
inline arena_t*
arena_t::GetById(arena_id_t aArenaId)
{
if (!malloc_initialized) {
return nullptr;
}
arena_t key;
key.mId = aArenaId;
MutexAutoLock lock(arenas_lock);
@ -4761,21 +4780,26 @@ template<>
inline arena_id_t
MozJemalloc::moz_create_arena()
{
if (malloc_init()) {
arena_t* arena = arenas_extend();
return arena->mId;
}
return 0;
}
template<>
inline void
MozJemalloc::moz_dispose_arena(arena_id_t aArenaId)
{
arena_t* arena = arena_t::GetById(aArenaId);
if (arena) {
MutexAutoLock lock(arenas_lock);
gArenaTree.Remove(arena);
// The arena is leaked, and remaining allocations in it still are alive
// until they are freed. After that, the arena will be empty but still
// taking have at least a chunk taking address space. TODO: bug 1364359.
}
}
#define MALLOC_DECL(name, return_type, ...) \
template<> \