Bug 1407469 - Remove malloc_spin_*. r=njn

The malloc_spin_* functions have ended up being strictly identical to
the malloc_mutex_* functions, so use the latter instead of the former.

--HG--
extra : rebase_source : 746bdf57cb4a33fd65335174a748cb567630e05b
This commit is contained in:
Mike Hommey 2017-10-06 16:50:17 +09:00
parent 749fd79e7a
commit a055106404

View File

@ -507,17 +507,12 @@ static size_t recycled_size;
*/
#if defined(XP_WIN)
#define malloc_mutex_t CRITICAL_SECTION
#define malloc_spinlock_t CRITICAL_SECTION
#elif defined(XP_DARWIN)
struct malloc_mutex_t {
OSSpinLock lock;
};
struct malloc_spinlock_t {
OSSpinLock lock;
};
#else
typedef pthread_mutex_t malloc_mutex_t;
typedef pthread_mutex_t malloc_spinlock_t;
#endif
/* Set to true once the allocator has been initialized. */
@ -673,7 +668,7 @@ class AddressRadixTree {
static_assert(kBitsAtLevel1 + (kHeight - 1) * kBitsPerLevel == Bits,
"AddressRadixTree parameters don't work out");
malloc_spinlock_t mLock;
malloc_mutex_t mLock;
void** mRoot;
public:
@ -930,7 +925,7 @@ struct arena_t {
RedBlackTreeNode<arena_t> mLink;
/* All operations on this arena require that lock be locked. */
malloc_spinlock_t mLock;
malloc_mutex_t mLock;
arena_stats_t mStats;
@ -1124,7 +1119,7 @@ static size_t base_committed;
// the type being defined anymore.
static RedBlackTree<arena_t, ArenaTreeTrait> gArenaTree;
static unsigned narenas;
static malloc_spinlock_t arenas_lock; /* Protects arenas initialization. */
static malloc_mutex_t arenas_lock; /* Protects arenas initialization. */
/*
* The arena associated with the current thread (per jemalloc_thread_local_arena)
@ -1301,79 +1296,10 @@ malloc_mutex_unlock(malloc_mutex_t *mutex)
#endif
}
#if (defined(__GNUC__))
__attribute__((unused))
# endif
static bool
malloc_spin_init(malloc_spinlock_t *lock)
{
#if defined(XP_WIN)
if (!InitializeCriticalSectionAndSpinCount(lock, _CRT_SPINCOUNT))
return (true);
#elif defined(XP_DARWIN)
lock->lock = OS_SPINLOCK_INIT;
#elif defined(XP_LINUX) && !defined(ANDROID)
pthread_mutexattr_t attr;
if (pthread_mutexattr_init(&attr) != 0)
return (true);
pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_ADAPTIVE_NP);
if (pthread_mutex_init(lock, &attr) != 0) {
pthread_mutexattr_destroy(&attr);
return (true);
}
pthread_mutexattr_destroy(&attr);
#else
if (pthread_mutex_init(lock, nullptr) != 0)
return (true);
#endif
return (false);
}
static inline void
malloc_spin_lock(malloc_spinlock_t *lock)
{
#if defined(XP_WIN)
EnterCriticalSection(lock);
#elif defined(XP_DARWIN)
OSSpinLockLock(&lock->lock);
#else
pthread_mutex_lock(lock);
#endif
}
static inline void
malloc_spin_unlock(malloc_spinlock_t *lock)
{
#if defined(XP_WIN)
LeaveCriticalSection(lock);
#elif defined(XP_DARWIN)
OSSpinLockUnlock(&lock->lock);
#else
pthread_mutex_unlock(lock);
#endif
}
/*
* End mutex.
*/
/******************************************************************************/
/*
* Begin spin lock. Spin locks here are actually adaptive mutexes that block
* after a period of spinning, because unbounded spinning would allow for
* priority inversion.
*/
#if !defined(XP_DARWIN)
# define malloc_spin_init malloc_mutex_init
# define malloc_spin_lock malloc_mutex_lock
# define malloc_spin_unlock malloc_mutex_unlock
#endif
/*
* End spin lock.
*/
/******************************************************************************/
/*
* Begin Utility functions/macros.
*/
@ -1753,7 +1679,7 @@ template <size_t Bits>
bool
AddressRadixTree<Bits>::Init()
{
malloc_spin_init(&mLock);
malloc_mutex_init(&mLock);
mRoot = (void**)base_calloc(1 << kBitsAtLevel1, sizeof(void*));
return mRoot;
@ -1807,7 +1733,7 @@ AddressRadixTree<Bits>::Get(void* aKey)
ret = *slot;
}
#ifdef MOZ_DEBUG
malloc_spin_lock(&mlock);
malloc_mutex_lock(&mlock);
/*
* Suppose that it were possible for a jemalloc-allocated chunk to be
* munmap()ped, followed by a different allocator in another thread re-using
@ -1822,13 +1748,13 @@ AddressRadixTree<Bits>::Get(void* aKey)
slot = GetSlot(aKey);
}
if (slot) {
// The malloc_spin_lock call above should act as a memory barrier, forcing
// The malloc_mutex_lock call above should act as a memory barrier, forcing
// the compiler to emit a new read instruction for *slot.
MOZ_ASSERT(ret == *slot);
} else {
MOZ_ASSERT(ret == nullptr);
}
malloc_spin_unlock(&mlock);
malloc_mutex_unlock(&mlock);
#endif
return ret;
}
@ -1837,12 +1763,12 @@ template <size_t Bits>
bool
AddressRadixTree<Bits>::Set(void* aKey, void* aValue)
{
malloc_spin_lock(&mLock);
malloc_mutex_lock(&mLock);
void** slot = GetSlot(aKey, /* create */ true);
if (slot) {
*slot = aValue;
}
malloc_spin_unlock(&mLock);
malloc_mutex_unlock(&mLock);
return slot;
}
@ -3194,7 +3120,7 @@ arena_t::MallocSmall(size_t aSize, bool aZero)
}
MOZ_DIAGNOSTIC_ASSERT(aSize == bin->reg_size);
malloc_spin_lock(&mLock);
malloc_mutex_lock(&mLock);
if ((run = bin->runcur) && run->nfree > 0) {
ret = MallocBinEasy(bin, run);
} else {
@ -3202,12 +3128,12 @@ arena_t::MallocSmall(size_t aSize, bool aZero)
}
if (!ret) {
malloc_spin_unlock(&mLock);
malloc_mutex_unlock(&mLock);
return nullptr;
}
mStats.allocated_small += aSize;
malloc_spin_unlock(&mLock);
malloc_mutex_unlock(&mLock);
if (aZero == false) {
if (opt_junk) {
@ -3228,14 +3154,14 @@ arena_t::MallocLarge(size_t aSize, bool aZero)
/* Large allocation. */
aSize = PAGE_CEILING(aSize);
malloc_spin_lock(&mLock);
malloc_mutex_lock(&mLock);
ret = AllocRun(nullptr, aSize, true, aZero);
if (!ret) {
malloc_spin_unlock(&mLock);
malloc_mutex_unlock(&mLock);
return nullptr;
}
mStats.allocated_large += aSize;
malloc_spin_unlock(&mLock);
malloc_mutex_unlock(&mLock);
if (aZero == false) {
if (opt_junk) {
@ -3282,10 +3208,10 @@ arena_t::Palloc(size_t aAlignment, size_t aSize, size_t aAllocSize)
MOZ_ASSERT((aSize & pagesize_mask) == 0);
MOZ_ASSERT((aAlignment & pagesize_mask) == 0);
malloc_spin_lock(&mLock);
malloc_mutex_lock(&mLock);
ret = AllocRun(nullptr, aAllocSize, true, false);
if (!ret) {
malloc_spin_unlock(&mLock);
malloc_mutex_unlock(&mLock);
return nullptr;
}
@ -3314,7 +3240,7 @@ arena_t::Palloc(size_t aAlignment, size_t aSize, size_t aAllocSize)
}
mStats.allocated_large += aSize;
malloc_spin_unlock(&mLock);
malloc_mutex_unlock(&mLock);
if (opt_junk) {
memset(ret, kAllocJunk, aSize);
@ -3755,7 +3681,7 @@ arena_dalloc(void *ptr, size_t offset)
MOZ_ASSERT(arena);
MOZ_DIAGNOSTIC_ASSERT(arena->mMagic == ARENA_MAGIC);
malloc_spin_lock(&arena->mLock);
malloc_mutex_lock(&arena->mLock);
pageind = offset >> pagesize_2pow;
mapelm = &chunk->map[pageind];
MOZ_DIAGNOSTIC_ASSERT((mapelm->bits & CHUNK_MAP_ALLOCATED) != 0);
@ -3766,7 +3692,7 @@ arena_dalloc(void *ptr, size_t offset)
/* Large allocation. */
arena->DallocLarge(chunk, ptr);
}
malloc_spin_unlock(&arena->mLock);
malloc_mutex_unlock(&arena->mLock);
}
static inline void
@ -3793,10 +3719,10 @@ arena_t::RallocShrinkLarge(arena_chunk_t* aChunk, void* aPtr, size_t aSize,
* Shrink the run, and make trailing pages available for other
* allocations.
*/
malloc_spin_lock(&mLock);
malloc_mutex_lock(&mLock);
TrimRunTail(aChunk, (arena_run_t*)aPtr, aOldSize, aSize, true);
mStats.allocated_large -= aOldSize - aSize;
malloc_spin_unlock(&mLock);
malloc_mutex_unlock(&mLock);
}
bool
@ -3806,7 +3732,7 @@ arena_t::RallocGrowLarge(arena_chunk_t* aChunk, void* aPtr, size_t aSize,
size_t pageind = (uintptr_t(aPtr) - uintptr_t(aChunk)) >> pagesize_2pow;
size_t npages = aOldSize >> pagesize_2pow;
malloc_spin_lock(&mLock);
malloc_mutex_lock(&mLock);
MOZ_DIAGNOSTIC_ASSERT(aOldSize == (aChunk->map[pageind].bits & ~pagesize_mask));
/* Try to extend the run. */
@ -3829,10 +3755,10 @@ arena_t::RallocGrowLarge(arena_chunk_t* aChunk, void* aPtr, size_t aSize,
CHUNK_MAP_ALLOCATED;
mStats.allocated_large += aSize - aOldSize;
malloc_spin_unlock(&mLock);
malloc_mutex_unlock(&mLock);
return false;
}
malloc_spin_unlock(&mLock);
malloc_mutex_unlock(&mLock);
return true;
}
@ -3963,7 +3889,7 @@ arena_t::Init()
arena_bin_t* bin;
size_t prev_run_size;
if (malloc_spin_init(&mLock))
if (malloc_mutex_init(&mLock))
return true;
memset(&mLink, 0, sizeof(mLink));
@ -4062,13 +3988,13 @@ arenas_extend()
return arenas_fallback();
}
malloc_spin_lock(&arenas_lock);
malloc_mutex_lock(&arenas_lock);
// TODO: Use random Ids.
ret->mId = narenas++;
gArenaTree.Insert(ret);
malloc_spin_unlock(&arenas_lock);
malloc_mutex_unlock(&arenas_lock);
return ret;
}
@ -4473,7 +4399,7 @@ MALLOC_OUT:
base_nodes = nullptr;
malloc_mutex_init(&base_mtx);
malloc_spin_init(&arenas_lock);
malloc_mutex_init(&arenas_lock);
/*
* Initialize one arena here.
@ -4838,7 +4764,7 @@ MozJemalloc::jemalloc_stats(jemalloc_stats_t* aStats)
MOZ_ASSERT(base_mapped >= base_committed);
malloc_mutex_unlock(&base_mtx);
malloc_spin_lock(&arenas_lock);
malloc_mutex_lock(&arenas_lock);
/* Iterate over arenas. */
for (auto arena : gArenaTree.iter()) {
size_t arena_mapped, arena_allocated, arena_committed, arena_dirty, j,
@ -4852,7 +4778,7 @@ MozJemalloc::jemalloc_stats(jemalloc_stats_t* aStats)
arena_headers = 0;
arena_unused = 0;
malloc_spin_lock(&arena->mLock);
malloc_mutex_lock(&arena->mLock);
arena_mapped = arena->mStats.mapped;
@ -4881,7 +4807,7 @@ MozJemalloc::jemalloc_stats(jemalloc_stats_t* aStats)
arena_headers += bin->stats.curruns * bin->reg0_offset;
}
malloc_spin_unlock(&arena->mLock);
malloc_mutex_unlock(&arena->mLock);
MOZ_ASSERT(arena_mapped >= arena_committed);
MOZ_ASSERT(arena_committed >= arena_allocated + arena_dirty);
@ -4896,7 +4822,7 @@ MozJemalloc::jemalloc_stats(jemalloc_stats_t* aStats)
aStats->bin_unused += arena_unused;
aStats->bookkeeping += arena_headers;
}
malloc_spin_unlock(&arenas_lock);
malloc_mutex_unlock(&arenas_lock);
/* Account for arena chunk headers in bookkeeping rather than waste. */
chunk_header_size =
@ -4946,24 +4872,24 @@ hard_purge_chunk(arena_chunk_t *chunk)
void
arena_t::HardPurge()
{
malloc_spin_lock(&mLock);
malloc_mutex_lock(&mLock);
while (!mChunksMAdvised.isEmpty()) {
arena_chunk_t* chunk = mChunksMAdvised.popFront();
hard_purge_chunk(chunk);
}
malloc_spin_unlock(&mLock);
malloc_mutex_unlock(&mLock);
}
template<> inline void
MozJemalloc::jemalloc_purge_freed_pages()
{
malloc_spin_lock(&arenas_lock);
malloc_mutex_lock(&arenas_lock);
for (auto arena : gArenaTree.iter()) {
arena->HardPurge();
}
malloc_spin_unlock(&arenas_lock);
malloc_mutex_unlock(&arenas_lock);
}
#else /* !defined MALLOC_DOUBLE_PURGE */
@ -4980,13 +4906,13 @@ MozJemalloc::jemalloc_purge_freed_pages()
template<> inline void
MozJemalloc::jemalloc_free_dirty_pages(void)
{
malloc_spin_lock(&arenas_lock);
malloc_mutex_lock(&arenas_lock);
for (auto arena : gArenaTree.iter()) {
malloc_spin_lock(&arena->mLock);
malloc_mutex_lock(&arena->mLock);
arena->Purge(true);
malloc_spin_unlock(&arena->mLock);
malloc_mutex_unlock(&arena->mLock);
}
malloc_spin_unlock(&arenas_lock);
malloc_mutex_unlock(&arenas_lock);
}
inline arena_t*
@ -4994,9 +4920,9 @@ arena_t::GetById(arena_id_t aArenaId)
{
arena_t key;
key.mId = aArenaId;
malloc_spin_lock(&arenas_lock);
malloc_mutex_lock(&arenas_lock);
arena_t* result = gArenaTree.Search(&key);
malloc_spin_unlock(&arenas_lock);
malloc_mutex_unlock(&arenas_lock);
MOZ_RELEASE_ASSERT(result);
return result;
}
@ -5013,12 +4939,12 @@ template<> inline void
MozJemalloc::moz_dispose_arena(arena_id_t aArenaId)
{
arena_t* arena = arena_t::GetById(aArenaId);
malloc_spin_lock(&arenas_lock);
malloc_mutex_lock(&arenas_lock);
gArenaTree.Remove(arena);
// The arena is leaked, and remaining allocations in it still are alive
// until they are freed. After that, the arena will be empty but still
// taking have at least a chunk taking address space. TODO: bug 1364359.
malloc_spin_unlock(&arenas_lock);
malloc_mutex_unlock(&arenas_lock);
}
#define MALLOC_DECL(name, return_type, ...) \
@ -5063,10 +4989,10 @@ _malloc_prefork(void)
{
/* Acquire all mutexes in a safe order. */
malloc_spin_lock(&arenas_lock);
malloc_mutex_lock(&arenas_lock);
for (auto arena : gArenaTree.iter()) {
malloc_spin_lock(&arena->mLock);
malloc_mutex_lock(&arena->mLock);
}
malloc_mutex_lock(&base_mtx);
@ -5087,9 +5013,9 @@ _malloc_postfork_parent(void)
malloc_mutex_unlock(&base_mtx);
for (auto arena : gArenaTree.iter()) {
malloc_spin_unlock(&arena->mLock);
malloc_mutex_unlock(&arena->mLock);
}
malloc_spin_unlock(&arenas_lock);
malloc_mutex_unlock(&arenas_lock);
}
#ifndef XP_DARWIN
@ -5105,9 +5031,9 @@ _malloc_postfork_child(void)
malloc_mutex_init(&base_mtx);
for (auto arena : gArenaTree.iter()) {
malloc_spin_init(&arena->mLock);
malloc_mutex_init(&arena->mLock);
}
malloc_spin_init(&arenas_lock);
malloc_mutex_init(&arenas_lock);
}
/*