mirror of
https://github.com/mozilla/gecko-dev.git
synced 2024-11-25 05:41:12 +00:00
Bug 1365460 - Replace assert with MOZ_ASSERT. r=njn
--HG-- extra : rebase_source : fab2172c763518c8f08efd99b0f9d9c3fdde42af
This commit is contained in:
parent
7c28b3d25e
commit
175a4c0a2a
@ -1206,12 +1206,6 @@ _malloc_message(const char *p1, const char *p2, const char *p3, const char *p4)
|
||||
// Note: MozTaggedAnonymousMmap() could call an LD_PRELOADed mmap
|
||||
// instead of the one defined here; use only MozTagAnonymousMemory().
|
||||
|
||||
#ifdef MOZ_DEBUG
|
||||
# define assert(e) MOZ_ASSERT(e)
|
||||
#else
|
||||
# define assert(e)
|
||||
#endif
|
||||
|
||||
#ifdef MOZ_MEMORY_ANDROID
|
||||
// Android's pthread.h does not declare pthread_atfork() until SDK 21.
|
||||
extern "C" MOZ_EXPORT
|
||||
@ -1225,7 +1219,7 @@ int pthread_atfork(void (*)(void), void (*)(void), void(*)(void));
|
||||
} \
|
||||
} while (0)
|
||||
#else
|
||||
# define RELEASE_ASSERT(assertion) assert(assertion)
|
||||
# define RELEASE_ASSERT(assertion) MOZ_ASSERT(assertion)
|
||||
#endif
|
||||
|
||||
/******************************************************************************/
|
||||
@ -1491,7 +1485,7 @@ base_pages_alloc(size_t minsize)
|
||||
size_t csize;
|
||||
size_t pminsize;
|
||||
|
||||
assert(minsize != 0);
|
||||
MOZ_ASSERT(minsize != 0);
|
||||
csize = CHUNK_CEILING(minsize);
|
||||
base_pages = chunk_alloc(csize, chunksize, true, false);
|
||||
if (base_pages == NULL)
|
||||
@ -1789,7 +1783,7 @@ pages_map_align(size_t size, size_t alignment)
|
||||
*/
|
||||
ret = mmap((void *)alignment, size, PROT_READ | PROT_WRITE,
|
||||
MAP_PRIVATE | MAP_NOSYNC | MAP_ALIGN | MAP_ANON, -1, 0);
|
||||
assert(ret != NULL);
|
||||
MOZ_ASSERT(ret != NULL);
|
||||
|
||||
if (ret == MAP_FAILED)
|
||||
ret = NULL;
|
||||
@ -1852,7 +1846,7 @@ pages_map(void *addr, size_t size)
|
||||
*/
|
||||
ret = mmap(addr, size, PROT_READ | PROT_WRITE,
|
||||
MAP_PRIVATE | MAP_ANON, -1, 0);
|
||||
assert(ret != NULL);
|
||||
MOZ_ASSERT(ret != NULL);
|
||||
#endif
|
||||
if (ret == MAP_FAILED) {
|
||||
ret = NULL;
|
||||
@ -1891,10 +1885,10 @@ pages_map(void *addr, size_t size)
|
||||
}
|
||||
|
||||
#if defined(__ia64__) || (defined(__sparc__) && defined(__arch64__) && defined(__linux__))
|
||||
assert(ret == NULL || (!check_placement && ret != NULL)
|
||||
MOZ_ASSERT(ret == NULL || (!check_placement && ret != NULL)
|
||||
|| (check_placement && ret == addr));
|
||||
#else
|
||||
assert(ret == NULL || (addr == NULL && ret != addr)
|
||||
MOZ_ASSERT(ret == NULL || (addr == NULL && ret != addr)
|
||||
|| (addr != NULL && ret == addr));
|
||||
#endif
|
||||
return (ret);
|
||||
@ -1923,9 +1917,9 @@ static inline void
|
||||
pages_copy(void *dest, const void *src, size_t n)
|
||||
{
|
||||
|
||||
assert((void *)((uintptr_t)dest & ~pagesize_mask) == dest);
|
||||
assert(n >= VM_COPY_MIN);
|
||||
assert((void *)((uintptr_t)src & ~pagesize_mask) == src);
|
||||
MOZ_ASSERT((void *)((uintptr_t)dest & ~pagesize_mask) == dest);
|
||||
MOZ_ASSERT(n >= VM_COPY_MIN);
|
||||
MOZ_ASSERT((void *)((uintptr_t)src & ~pagesize_mask) == src);
|
||||
|
||||
vm_copy(mach_task_self(), (vm_address_t)src, (vm_size_t)n,
|
||||
(vm_address_t)dest);
|
||||
@ -2030,7 +2024,7 @@ MALLOC_RTREE_GET_GENERATE(malloc_rtree_get_locked)
|
||||
* check.
|
||||
*/
|
||||
# define MALLOC_RTREE_GET_VALIDATE \
|
||||
assert(malloc_rtree_get_locked(rtree, key) == ret);
|
||||
MOZ_ASSERT(malloc_rtree_get_locked(rtree, key) == ret);
|
||||
#else
|
||||
# define MALLOC_RTREE_GET_VALIDATE
|
||||
#endif
|
||||
@ -2089,7 +2083,7 @@ pages_trim(void *addr, size_t alloc_size, size_t leadsize, size_t size)
|
||||
{
|
||||
void *ret = (void *)((uintptr_t)addr + leadsize);
|
||||
|
||||
assert(alloc_size >= leadsize + size);
|
||||
MOZ_ASSERT(alloc_size >= leadsize + size);
|
||||
#ifdef MOZ_MEMORY_WINDOWS
|
||||
{
|
||||
void *new_addr;
|
||||
@ -2134,7 +2128,7 @@ chunk_alloc_mmap_slow(size_t size, size_t alignment)
|
||||
ret = pages_trim(pages, alloc_size, leadsize, size);
|
||||
} while (ret == NULL);
|
||||
|
||||
assert(ret != NULL);
|
||||
MOZ_ASSERT(ret != NULL);
|
||||
return (ret);
|
||||
}
|
||||
|
||||
@ -2169,7 +2163,7 @@ chunk_alloc_mmap(size_t size, size_t alignment)
|
||||
return (chunk_alloc_mmap_slow(size, alignment));
|
||||
}
|
||||
|
||||
assert(ret != NULL);
|
||||
MOZ_ASSERT(ret != NULL);
|
||||
return (ret);
|
||||
#endif
|
||||
}
|
||||
@ -2250,7 +2244,7 @@ chunk_recycle(extent_tree_t *chunks_szad, extent_tree_t *chunks_ad, size_t size,
|
||||
}
|
||||
leadsize = ALIGNMENT_CEILING((uintptr_t)node->addr, alignment) -
|
||||
(uintptr_t)node->addr;
|
||||
assert(node->size >= leadsize + size);
|
||||
MOZ_ASSERT(node->size >= leadsize + size);
|
||||
trailsize = node->size - leadsize - size;
|
||||
ret = (void *)((uintptr_t)node->addr + leadsize);
|
||||
zeroed = node->zeroed;
|
||||
@ -2310,7 +2304,7 @@ chunk_recycle(extent_tree_t *chunks_szad, extent_tree_t *chunks_ad, size_t size,
|
||||
size_t *p = (size_t *)(uintptr_t)ret;
|
||||
|
||||
for (i = 0; i < size / sizeof(size_t); i++)
|
||||
assert(p[i] == 0);
|
||||
MOZ_ASSERT(p[i] == 0);
|
||||
}
|
||||
#endif
|
||||
}
|
||||
@ -2334,10 +2328,10 @@ chunk_alloc(size_t size, size_t alignment, bool base, bool zero)
|
||||
{
|
||||
void *ret;
|
||||
|
||||
assert(size != 0);
|
||||
assert((size & chunksize_mask) == 0);
|
||||
assert(alignment != 0);
|
||||
assert((alignment & chunksize_mask) == 0);
|
||||
MOZ_ASSERT(size != 0);
|
||||
MOZ_ASSERT((size & chunksize_mask) == 0);
|
||||
MOZ_ASSERT(alignment != 0);
|
||||
MOZ_ASSERT((alignment & chunksize_mask) == 0);
|
||||
|
||||
if (CAN_RECYCLE(size)) {
|
||||
ret = chunk_recycle(&chunks_szad_mmap, &chunks_ad_mmap,
|
||||
@ -2361,7 +2355,7 @@ RETURN:
|
||||
}
|
||||
}
|
||||
|
||||
assert(CHUNK_ADDR2BASE(ret) == ret);
|
||||
MOZ_ASSERT(CHUNK_ADDR2BASE(ret) == ret);
|
||||
return (ret);
|
||||
}
|
||||
|
||||
@ -2470,10 +2464,10 @@ static void
|
||||
chunk_dealloc(void *chunk, size_t size)
|
||||
{
|
||||
|
||||
assert(chunk != NULL);
|
||||
assert(CHUNK_ADDR2BASE(chunk) == chunk);
|
||||
assert(size != 0);
|
||||
assert((size & chunksize_mask) == 0);
|
||||
MOZ_ASSERT(chunk != NULL);
|
||||
MOZ_ASSERT(CHUNK_ADDR2BASE(chunk) == chunk);
|
||||
MOZ_ASSERT(size != 0);
|
||||
MOZ_ASSERT((size & chunksize_mask) == 0);
|
||||
|
||||
malloc_rtree_set(chunk_rtree, (uintptr_t)chunk, NULL);
|
||||
|
||||
@ -2565,8 +2559,8 @@ arena_chunk_comp(arena_chunk_t *a, arena_chunk_t *b)
|
||||
uintptr_t a_chunk = (uintptr_t)a;
|
||||
uintptr_t b_chunk = (uintptr_t)b;
|
||||
|
||||
assert(a != NULL);
|
||||
assert(b != NULL);
|
||||
MOZ_ASSERT(a != NULL);
|
||||
MOZ_ASSERT(b != NULL);
|
||||
|
||||
return ((a_chunk > b_chunk) - (a_chunk < b_chunk));
|
||||
}
|
||||
@ -2581,8 +2575,8 @@ arena_run_comp(arena_chunk_map_t *a, arena_chunk_map_t *b)
|
||||
uintptr_t a_mapelm = (uintptr_t)a;
|
||||
uintptr_t b_mapelm = (uintptr_t)b;
|
||||
|
||||
assert(a != NULL);
|
||||
assert(b != NULL);
|
||||
MOZ_ASSERT(a != NULL);
|
||||
MOZ_ASSERT(b != NULL);
|
||||
|
||||
return ((a_mapelm > b_mapelm) - (a_mapelm < b_mapelm));
|
||||
}
|
||||
@ -2629,8 +2623,8 @@ arena_run_reg_alloc(arena_run_t *run, arena_bin_t *bin)
|
||||
void *ret;
|
||||
unsigned i, mask, bit, regind;
|
||||
|
||||
assert(run->magic == ARENA_RUN_MAGIC);
|
||||
assert(run->regs_minelm < bin->regs_mask_nelms);
|
||||
MOZ_ASSERT(run->magic == ARENA_RUN_MAGIC);
|
||||
MOZ_ASSERT(run->regs_minelm < bin->regs_mask_nelms);
|
||||
|
||||
/*
|
||||
* Move the first check outside the loop, so that run->regs_minelm can
|
||||
@ -2644,7 +2638,7 @@ arena_run_reg_alloc(arena_run_t *run, arena_bin_t *bin)
|
||||
bit = ffs((int)mask) - 1;
|
||||
|
||||
regind = ((i << (SIZEOF_INT_2POW + 3)) + bit);
|
||||
assert(regind < bin->nregs);
|
||||
MOZ_ASSERT(regind < bin->nregs);
|
||||
ret = (void *)(((uintptr_t)run) + bin->reg0_offset
|
||||
+ (bin->reg_size * regind));
|
||||
|
||||
@ -2662,7 +2656,7 @@ arena_run_reg_alloc(arena_run_t *run, arena_bin_t *bin)
|
||||
bit = ffs((int)mask) - 1;
|
||||
|
||||
regind = ((i << (SIZEOF_INT_2POW + 3)) + bit);
|
||||
assert(regind < bin->nregs);
|
||||
MOZ_ASSERT(regind < bin->nregs);
|
||||
ret = (void *)(((uintptr_t)run) + bin->reg0_offset
|
||||
+ (bin->reg_size * regind));
|
||||
|
||||
@ -2722,8 +2716,8 @@ arena_run_reg_dalloc(arena_run_t *run, arena_bin_t *bin, void *ptr, size_t size)
|
||||
};
|
||||
unsigned diff, regind, elm, bit;
|
||||
|
||||
assert(run->magic == ARENA_RUN_MAGIC);
|
||||
assert(((sizeof(size_invs)) / sizeof(unsigned)) + 3
|
||||
MOZ_ASSERT(run->magic == ARENA_RUN_MAGIC);
|
||||
MOZ_ASSERT(((sizeof(size_invs)) / sizeof(unsigned)) + 3
|
||||
>= (SMALL_MAX_DEFAULT >> QUANTUM_2POW_MIN));
|
||||
|
||||
/*
|
||||
@ -2800,8 +2794,8 @@ arena_run_split(arena_t *arena, arena_run_t *run, size_t size, bool large,
|
||||
total_pages = (chunk->map[run_ind].bits & ~pagesize_mask) >>
|
||||
pagesize_2pow;
|
||||
need_pages = (size >> pagesize_2pow);
|
||||
assert(need_pages > 0);
|
||||
assert(need_pages <= total_pages);
|
||||
MOZ_ASSERT(need_pages > 0);
|
||||
MOZ_ASSERT(need_pages <= total_pages);
|
||||
rem_pages = total_pages - need_pages;
|
||||
|
||||
arena_avail_tree_remove(&arena->runs_avail, &chunk->map[run_ind]);
|
||||
@ -2836,7 +2830,7 @@ arena_run_split(arena_t *arena, arena_run_t *run, size_t size, bool large,
|
||||
for (j = 0; i + j < need_pages && (chunk->map[run_ind +
|
||||
i + j].bits & CHUNK_MAP_MADVISED_OR_DECOMMITTED); j++) {
|
||||
/* DECOMMITTED and MADVISED are mutually exclusive. */
|
||||
assert(!(chunk->map[run_ind + i + j].bits & CHUNK_MAP_DECOMMITTED &&
|
||||
MOZ_ASSERT(!(chunk->map[run_ind + i + j].bits & CHUNK_MAP_DECOMMITTED &&
|
||||
chunk->map[run_ind + i + j].bits & CHUNK_MAP_MADVISED));
|
||||
|
||||
chunk->map[run_ind + i + j].bits &=
|
||||
@ -2983,8 +2977,8 @@ arena_run_alloc(arena_t *arena, arena_bin_t *bin, size_t size, bool large,
|
||||
arena_run_t *run;
|
||||
arena_chunk_map_t *mapelm, key;
|
||||
|
||||
assert(size <= arena_maxclass);
|
||||
assert((size & pagesize_mask) == 0);
|
||||
MOZ_ASSERT(size <= arena_maxclass);
|
||||
MOZ_ASSERT((size & pagesize_mask) == 0);
|
||||
|
||||
/* Search the arena's chunks for the lowest best fit. */
|
||||
key.bits = size | CHUNK_MAP_KEY;
|
||||
@ -3047,7 +3041,7 @@ arena_purge(arena_t *arena, bool all)
|
||||
chunk) {
|
||||
ndirty += chunk->ndirty;
|
||||
} rb_foreach_end(arena_chunk_t, link_dirty, &arena->chunks_dirty, chunk)
|
||||
assert(ndirty == arena->ndirty);
|
||||
MOZ_ASSERT(ndirty == arena->ndirty);
|
||||
#endif
|
||||
RELEASE_ASSERT(all || (arena->ndirty > opt_dirty_max));
|
||||
|
||||
@ -3075,7 +3069,7 @@ arena_purge(arena_t *arena, bool all)
|
||||
#else
|
||||
const size_t free_operation = CHUNK_MAP_MADVISED;
|
||||
#endif
|
||||
assert((chunk->map[i].bits &
|
||||
MOZ_ASSERT((chunk->map[i].bits &
|
||||
CHUNK_MAP_MADVISED_OR_DECOMMITTED) == 0);
|
||||
chunk->map[i].bits ^= free_operation | CHUNK_MAP_DIRTY;
|
||||
/* Find adjacent dirty run(s). */
|
||||
@ -3084,7 +3078,7 @@ arena_purge(arena_t *arena, bool all)
|
||||
(chunk->map[i - 1].bits & CHUNK_MAP_DIRTY);
|
||||
npages++) {
|
||||
i--;
|
||||
assert((chunk->map[i].bits &
|
||||
MOZ_ASSERT((chunk->map[i].bits &
|
||||
CHUNK_MAP_MADVISED_OR_DECOMMITTED) == 0);
|
||||
chunk->map[i].bits ^= free_operation | CHUNK_MAP_DIRTY;
|
||||
}
|
||||
@ -3245,7 +3239,7 @@ arena_run_trim_head(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run,
|
||||
size_t pageind = ((uintptr_t)run - (uintptr_t)chunk) >> pagesize_2pow;
|
||||
size_t head_npages = (oldsize - newsize) >> pagesize_2pow;
|
||||
|
||||
assert(oldsize > newsize);
|
||||
MOZ_ASSERT(oldsize > newsize);
|
||||
|
||||
/*
|
||||
* Update the chunk map so that arena_run_dalloc() can treat the
|
||||
@ -3266,7 +3260,7 @@ arena_run_trim_tail(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run,
|
||||
size_t pageind = ((uintptr_t)run - (uintptr_t)chunk) >> pagesize_2pow;
|
||||
size_t npages = newsize >> pagesize_2pow;
|
||||
|
||||
assert(oldsize > newsize);
|
||||
MOZ_ASSERT(oldsize > newsize);
|
||||
|
||||
/*
|
||||
* Update the chunk map so that arena_run_dalloc() can treat the
|
||||
@ -3386,8 +3380,8 @@ arena_bin_run_size_calc(arena_bin_t *bin, size_t min_run_size)
|
||||
unsigned good_nregs, good_mask_nelms, good_reg0_offset;
|
||||
unsigned try_nregs, try_mask_nelms, try_reg0_offset;
|
||||
|
||||
assert(min_run_size >= pagesize);
|
||||
assert(min_run_size <= arena_maxclass);
|
||||
MOZ_ASSERT(min_run_size >= pagesize);
|
||||
MOZ_ASSERT(min_run_size <= arena_maxclass);
|
||||
|
||||
/*
|
||||
* Calculate known-valid settings before entering the run_size
|
||||
@ -3437,9 +3431,9 @@ arena_bin_run_size_calc(arena_bin_t *bin, size_t min_run_size)
|
||||
&& RUN_MAX_OVRHD * (bin->reg_size << 3) > RUN_MAX_OVRHD_RELAX
|
||||
&& (try_reg0_offset << RUN_BFP) > RUN_MAX_OVRHD * try_run_size);
|
||||
|
||||
assert(sizeof(arena_run_t) + (sizeof(unsigned) * (good_mask_nelms - 1))
|
||||
MOZ_ASSERT(sizeof(arena_run_t) + (sizeof(unsigned) * (good_mask_nelms - 1))
|
||||
<= good_reg0_offset);
|
||||
assert((good_mask_nelms << (SIZEOF_INT_2POW + 3)) >= good_nregs);
|
||||
MOZ_ASSERT((good_mask_nelms << (SIZEOF_INT_2POW + 3)) >= good_nregs);
|
||||
|
||||
/* Copy final settings. */
|
||||
bin->run_size = good_run_size;
|
||||
@ -3540,10 +3534,10 @@ static inline void *
|
||||
arena_malloc(arena_t *arena, size_t size, bool zero)
|
||||
{
|
||||
|
||||
assert(arena != NULL);
|
||||
MOZ_ASSERT(arena != NULL);
|
||||
RELEASE_ASSERT(arena->magic == ARENA_MAGIC);
|
||||
assert(size != 0);
|
||||
assert(QUANTUM_CEILING(size) <= arena_maxclass);
|
||||
MOZ_ASSERT(size != 0);
|
||||
MOZ_ASSERT(QUANTUM_CEILING(size) <= arena_maxclass);
|
||||
|
||||
if (size <= bin_maxclass) {
|
||||
return (arena_malloc_small(arena, size, zero));
|
||||
@ -3555,7 +3549,7 @@ static inline void *
|
||||
imalloc(size_t size)
|
||||
{
|
||||
|
||||
assert(size != 0);
|
||||
MOZ_ASSERT(size != 0);
|
||||
|
||||
if (size <= arena_maxclass)
|
||||
return (arena_malloc(choose_arena(), size, false));
|
||||
@ -3581,8 +3575,8 @@ arena_palloc(arena_t *arena, size_t alignment, size_t size, size_t alloc_size)
|
||||
size_t offset;
|
||||
arena_chunk_t *chunk;
|
||||
|
||||
assert((size & pagesize_mask) == 0);
|
||||
assert((alignment & pagesize_mask) == 0);
|
||||
MOZ_ASSERT((size & pagesize_mask) == 0);
|
||||
MOZ_ASSERT((alignment & pagesize_mask) == 0);
|
||||
|
||||
malloc_spin_lock(&arena->lock);
|
||||
ret = (void *)arena_run_alloc(arena, NULL, alloc_size, true, false);
|
||||
@ -3594,8 +3588,8 @@ arena_palloc(arena_t *arena, size_t alignment, size_t size, size_t alloc_size)
|
||||
chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ret);
|
||||
|
||||
offset = (uintptr_t)ret & (alignment - 1);
|
||||
assert((offset & pagesize_mask) == 0);
|
||||
assert(offset < alloc_size);
|
||||
MOZ_ASSERT((offset & pagesize_mask) == 0);
|
||||
MOZ_ASSERT(offset < alloc_size);
|
||||
if (offset == 0)
|
||||
arena_run_trim_tail(arena, chunk, (arena_run_t*)ret, alloc_size, size, false);
|
||||
else {
|
||||
@ -3611,7 +3605,7 @@ arena_palloc(arena_t *arena, size_t alignment, size_t size, size_t alloc_size)
|
||||
trailsize = alloc_size - leadsize - size;
|
||||
if (trailsize != 0) {
|
||||
/* Trim trailing space. */
|
||||
assert(trailsize < alloc_size);
|
||||
MOZ_ASSERT(trailsize < alloc_size);
|
||||
arena_run_trim_tail(arena, chunk, (arena_run_t*)ret, size + trailsize,
|
||||
size, false);
|
||||
}
|
||||
@ -3719,7 +3713,7 @@ ipalloc(size_t alignment, size_t size)
|
||||
ret = huge_palloc(ceil_size, alignment, false);
|
||||
}
|
||||
|
||||
assert(((uintptr_t)ret & (alignment - 1)) == 0);
|
||||
MOZ_ASSERT(((uintptr_t)ret & (alignment - 1)) == 0);
|
||||
return (ret);
|
||||
}
|
||||
|
||||
@ -3731,8 +3725,8 @@ arena_salloc(const void *ptr)
|
||||
arena_chunk_t *chunk;
|
||||
size_t pageind, mapbits;
|
||||
|
||||
assert(ptr != NULL);
|
||||
assert(CHUNK_ADDR2BASE(ptr) != ptr);
|
||||
MOZ_ASSERT(ptr != NULL);
|
||||
MOZ_ASSERT(CHUNK_ADDR2BASE(ptr) != ptr);
|
||||
|
||||
chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
|
||||
pageind = (((uintptr_t)ptr - (uintptr_t)chunk) >> pagesize_2pow);
|
||||
@ -3797,12 +3791,12 @@ isalloc(const void *ptr)
|
||||
size_t ret;
|
||||
arena_chunk_t *chunk;
|
||||
|
||||
assert(ptr != NULL);
|
||||
MOZ_ASSERT(ptr != NULL);
|
||||
|
||||
chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
|
||||
if (chunk != ptr) {
|
||||
/* Region. */
|
||||
assert(chunk->arena->magic == ARENA_MAGIC);
|
||||
MOZ_ASSERT(chunk->arena->magic == ARENA_MAGIC);
|
||||
|
||||
ret = arena_salloc(ptr);
|
||||
} else {
|
||||
@ -3928,13 +3922,13 @@ arena_dalloc(void *ptr, size_t offset)
|
||||
size_t pageind;
|
||||
arena_chunk_map_t *mapelm;
|
||||
|
||||
assert(ptr != NULL);
|
||||
assert(offset != 0);
|
||||
assert(CHUNK_ADDR2OFFSET(ptr) == offset);
|
||||
MOZ_ASSERT(ptr != NULL);
|
||||
MOZ_ASSERT(offset != 0);
|
||||
MOZ_ASSERT(CHUNK_ADDR2OFFSET(ptr) == offset);
|
||||
|
||||
chunk = (arena_chunk_t *) ((uintptr_t)ptr - offset);
|
||||
arena = chunk->arena;
|
||||
assert(arena != NULL);
|
||||
MOZ_ASSERT(arena != NULL);
|
||||
RELEASE_ASSERT(arena->magic == ARENA_MAGIC);
|
||||
|
||||
malloc_spin_lock(&arena->lock);
|
||||
@ -3956,7 +3950,7 @@ idalloc(void *ptr)
|
||||
{
|
||||
size_t offset;
|
||||
|
||||
assert(ptr != NULL);
|
||||
MOZ_ASSERT(ptr != NULL);
|
||||
|
||||
offset = CHUNK_ADDR2OFFSET(ptr);
|
||||
if (offset != 0)
|
||||
@ -3970,7 +3964,7 @@ arena_ralloc_large_shrink(arena_t *arena, arena_chunk_t *chunk, void *ptr,
|
||||
size_t size, size_t oldsize)
|
||||
{
|
||||
|
||||
assert(size < oldsize);
|
||||
MOZ_ASSERT(size < oldsize);
|
||||
|
||||
/*
|
||||
* Shrink the run, and make trailing pages available for other
|
||||
@ -3994,7 +3988,7 @@ arena_ralloc_large_grow(arena_t *arena, arena_chunk_t *chunk, void *ptr,
|
||||
RELEASE_ASSERT(oldsize == (chunk->map[pageind].bits & ~pagesize_mask));
|
||||
|
||||
/* Try to extend the run. */
|
||||
assert(size > oldsize);
|
||||
MOZ_ASSERT(size > oldsize);
|
||||
if (pageind + npages < chunk_npages && (chunk->map[pageind+npages].bits
|
||||
& CHUNK_MAP_ALLOCATED) == 0 && (chunk->map[pageind+npages].bits &
|
||||
~pagesize_mask) >= size - oldsize) {
|
||||
@ -4087,7 +4081,7 @@ arena_ralloc(void *ptr, size_t size, size_t oldsize)
|
||||
pow2_ceil(size) == pow2_ceil(oldsize))
|
||||
goto IN_PLACE; /* Same size class. */
|
||||
} else if (oldsize > bin_maxclass && oldsize <= arena_maxclass) {
|
||||
assert(size > bin_maxclass);
|
||||
MOZ_ASSERT(size > bin_maxclass);
|
||||
if (arena_ralloc_large(ptr, size, oldsize) == false)
|
||||
return (ptr);
|
||||
}
|
||||
@ -4124,8 +4118,8 @@ iralloc(void *ptr, size_t size)
|
||||
{
|
||||
size_t oldsize;
|
||||
|
||||
assert(ptr != NULL);
|
||||
assert(size != 0);
|
||||
MOZ_ASSERT(ptr != NULL);
|
||||
MOZ_ASSERT(size != 0);
|
||||
|
||||
oldsize = isalloc(ptr);
|
||||
|
||||
@ -4397,8 +4391,8 @@ huge_ralloc(void *ptr, size_t size, size_t oldsize)
|
||||
malloc_mutex_lock(&huge_mtx);
|
||||
key.addr = const_cast<void*>(ptr);
|
||||
node = extent_tree_ad_search(&huge, &key);
|
||||
assert(node != NULL);
|
||||
assert(node->size == oldsize);
|
||||
MOZ_ASSERT(node != NULL);
|
||||
MOZ_ASSERT(node->size == oldsize);
|
||||
huge_allocated -= oldsize - psize;
|
||||
/* No need to change huge_mapped, because we didn't
|
||||
* (un)map anything. */
|
||||
@ -4422,8 +4416,8 @@ huge_ralloc(void *ptr, size_t size, size_t oldsize)
|
||||
malloc_mutex_lock(&huge_mtx);
|
||||
key.addr = const_cast<void*>(ptr);
|
||||
node = extent_tree_ad_search(&huge, &key);
|
||||
assert(node != NULL);
|
||||
assert(node->size == oldsize);
|
||||
MOZ_ASSERT(node != NULL);
|
||||
MOZ_ASSERT(node->size == oldsize);
|
||||
huge_allocated += psize - oldsize;
|
||||
/* No need to change huge_mapped, because we didn't
|
||||
* (un)map anything. */
|
||||
@ -4468,8 +4462,8 @@ huge_dalloc(void *ptr)
|
||||
/* Extract from tree of huge allocations. */
|
||||
key.addr = ptr;
|
||||
node = extent_tree_ad_search(&huge, &key);
|
||||
assert(node != NULL);
|
||||
assert(node->addr == ptr);
|
||||
MOZ_ASSERT(node != NULL);
|
||||
MOZ_ASSERT(node->addr == ptr);
|
||||
extent_tree_ad_remove(&huge, node);
|
||||
|
||||
huge_ndalloc++;
|
||||
@ -4662,11 +4656,11 @@ malloc_init_hard(void)
|
||||
}
|
||||
#else
|
||||
result = sysconf(_SC_PAGESIZE);
|
||||
assert(result != -1);
|
||||
MOZ_ASSERT(result != -1);
|
||||
#endif
|
||||
|
||||
/* We assume that the page size is a power of 2. */
|
||||
assert(((result - 1) & result) == 0);
|
||||
MOZ_ASSERT(((result - 1) & result) == 0);
|
||||
#ifdef MALLOC_STATIC_SIZES
|
||||
if (pagesize % (size_t) result) {
|
||||
_malloc_message(_getprogname(),
|
||||
@ -4818,9 +4812,9 @@ MALLOC_OUT:
|
||||
|
||||
/* Set bin-related variables. */
|
||||
bin_maxclass = (pagesize >> 1);
|
||||
assert(opt_quantum_2pow >= TINY_MIN_2POW);
|
||||
MOZ_ASSERT(opt_quantum_2pow >= TINY_MIN_2POW);
|
||||
ntbins = opt_quantum_2pow - TINY_MIN_2POW;
|
||||
assert(ntbins <= opt_quantum_2pow);
|
||||
MOZ_ASSERT(ntbins <= opt_quantum_2pow);
|
||||
nqbins = (small_max >> opt_quantum_2pow);
|
||||
nsbins = pagesize_2pow - opt_small_max_2pow - 1;
|
||||
|
||||
@ -4831,7 +4825,7 @@ MALLOC_OUT:
|
||||
small_min = (quantum >> 1) + 1;
|
||||
else
|
||||
small_min = 1;
|
||||
assert(small_min <= quantum);
|
||||
MOZ_ASSERT(small_min <= quantum);
|
||||
|
||||
/* Set variables according to the value of opt_chunk_2pow. */
|
||||
chunksize = (1LU << opt_chunk_2pow);
|
||||
@ -4851,15 +4845,15 @@ MALLOC_OUT:
|
||||
* When using MAP_ALIGN, the alignment parameter must be a power of two
|
||||
* multiple of the system pagesize, or mmap will fail.
|
||||
*/
|
||||
assert((chunksize % pagesize) == 0);
|
||||
assert((1 << (ffs(chunksize / pagesize) - 1)) == (chunksize/pagesize));
|
||||
MOZ_ASSERT((chunksize % pagesize) == 0);
|
||||
MOZ_ASSERT((1 << (ffs(chunksize / pagesize) - 1)) == (chunksize/pagesize));
|
||||
#endif
|
||||
|
||||
/* Various sanity checks that regard configuration. */
|
||||
assert(quantum >= sizeof(void *));
|
||||
assert(quantum <= pagesize);
|
||||
assert(chunksize >= pagesize);
|
||||
assert(quantum * 4 <= chunksize);
|
||||
MOZ_ASSERT(quantum >= sizeof(void *));
|
||||
MOZ_ASSERT(quantum <= pagesize);
|
||||
MOZ_ASSERT(chunksize >= pagesize);
|
||||
MOZ_ASSERT(quantum * 4 <= chunksize);
|
||||
|
||||
/* Initialize chunks data. */
|
||||
malloc_mutex_init(&chunks_mtx);
|
||||
@ -5007,7 +5001,7 @@ MEMALIGN(size_t alignment, size_t size)
|
||||
{
|
||||
void *ret;
|
||||
|
||||
assert(((alignment - 1) & alignment) == 0);
|
||||
MOZ_ASSERT(((alignment - 1) & alignment) == 0);
|
||||
|
||||
if (malloc_init()) {
|
||||
ret = NULL;
|
||||
@ -5144,7 +5138,7 @@ realloc_impl(void *ptr, size_t size)
|
||||
}
|
||||
|
||||
if (ptr != NULL) {
|
||||
assert(malloc_initialized);
|
||||
MOZ_ASSERT(malloc_initialized);
|
||||
|
||||
ret = iralloc(ptr, size);
|
||||
|
||||
@ -5190,7 +5184,7 @@ free_impl(void *ptr)
|
||||
* A version of idalloc that checks for NULL pointer but only for
|
||||
* huge allocations assuming that CHUNK_ADDR2OFFSET(NULL) == 0.
|
||||
*/
|
||||
assert(CHUNK_ADDR2OFFSET(NULL) == 0);
|
||||
MOZ_ASSERT(CHUNK_ADDR2OFFSET(NULL) == 0);
|
||||
offset = CHUNK_ADDR2OFFSET(ptr);
|
||||
if (offset != 0)
|
||||
arena_dalloc(ptr, offset);
|
||||
@ -5257,7 +5251,7 @@ jemalloc_stats_impl(jemalloc_stats_t *stats)
|
||||
{
|
||||
size_t i, non_arena_mapped, chunk_header_size;
|
||||
|
||||
assert(stats != NULL);
|
||||
MOZ_ASSERT(stats != NULL);
|
||||
|
||||
/*
|
||||
* Gather runtime settings.
|
||||
@ -5293,14 +5287,14 @@ jemalloc_stats_impl(jemalloc_stats_t *stats)
|
||||
malloc_mutex_lock(&huge_mtx);
|
||||
non_arena_mapped += huge_mapped;
|
||||
stats->allocated += huge_allocated;
|
||||
assert(huge_mapped >= huge_allocated);
|
||||
MOZ_ASSERT(huge_mapped >= huge_allocated);
|
||||
malloc_mutex_unlock(&huge_mtx);
|
||||
|
||||
/* Get base mapped/allocated. */
|
||||
malloc_mutex_lock(&base_mtx);
|
||||
non_arena_mapped += base_mapped;
|
||||
stats->bookkeeping += base_committed;
|
||||
assert(base_mapped >= base_committed);
|
||||
MOZ_ASSERT(base_mapped >= base_committed);
|
||||
malloc_mutex_unlock(&base_mtx);
|
||||
|
||||
malloc_spin_lock(&arenas_lock);
|
||||
@ -5350,8 +5344,8 @@ jemalloc_stats_impl(jemalloc_stats_t *stats)
|
||||
|
||||
malloc_spin_unlock(&arena->lock);
|
||||
|
||||
assert(arena_mapped >= arena_committed);
|
||||
assert(arena_committed >= arena_allocated + arena_dirty);
|
||||
MOZ_ASSERT(arena_mapped >= arena_committed);
|
||||
MOZ_ASSERT(arena_committed >= arena_allocated + arena_dirty);
|
||||
|
||||
/* "waste" is committed memory that is neither dirty nor
|
||||
* allocated. */
|
||||
@ -5374,7 +5368,7 @@ jemalloc_stats_impl(jemalloc_stats_t *stats)
|
||||
stats->bookkeeping += chunk_header_size;
|
||||
stats->waste -= chunk_header_size;
|
||||
|
||||
assert(stats->mapped >= stats->allocated + stats->waste +
|
||||
MOZ_ASSERT(stats->mapped >= stats->allocated + stats->waste +
|
||||
stats->page_cache + stats->bookkeeping);
|
||||
}
|
||||
|
||||
|
@ -38,11 +38,6 @@
|
||||
* #define SIZEOF_PTR_2POW ...
|
||||
* #define RB_NO_C99_VARARRAYS
|
||||
*
|
||||
* (Optional, see assert(3).)
|
||||
* #define NDEBUG
|
||||
*
|
||||
* (Required.)
|
||||
* #include <assert.h>
|
||||
* #include <rb.h>
|
||||
* ...
|
||||
*
|
||||
@ -167,7 +162,7 @@ struct { \
|
||||
a_field, (a_node)), (r_node)); \
|
||||
} else { \
|
||||
a_type *rbp_n_t = (a_tree)->rbt_root; \
|
||||
assert(rbp_n_t != &(a_tree)->rbt_nil); \
|
||||
MOZ_ASSERT(rbp_n_t != &(a_tree)->rbt_nil); \
|
||||
(r_node) = &(a_tree)->rbt_nil; \
|
||||
while (true) { \
|
||||
int rbp_n_cmp = (a_cmp)((a_node), rbp_n_t); \
|
||||
@ -179,7 +174,7 @@ struct { \
|
||||
} else { \
|
||||
break; \
|
||||
} \
|
||||
assert(rbp_n_t != &(a_tree)->rbt_nil); \
|
||||
MOZ_ASSERT(rbp_n_t != &(a_tree)->rbt_nil); \
|
||||
} \
|
||||
} \
|
||||
} while (0)
|
||||
@ -190,7 +185,7 @@ struct { \
|
||||
a_field, (a_node)), (r_node)); \
|
||||
} else { \
|
||||
a_type *rbp_p_t = (a_tree)->rbt_root; \
|
||||
assert(rbp_p_t != &(a_tree)->rbt_nil); \
|
||||
MOZ_ASSERT(rbp_p_t != &(a_tree)->rbt_nil); \
|
||||
(r_node) = &(a_tree)->rbt_nil; \
|
||||
while (true) { \
|
||||
int rbp_p_cmp = (a_cmp)((a_node), rbp_p_t); \
|
||||
@ -202,7 +197,7 @@ struct { \
|
||||
} else { \
|
||||
break; \
|
||||
} \
|
||||
assert(rbp_p_t != &(a_tree)->rbt_nil); \
|
||||
MOZ_ASSERT(rbp_p_t != &(a_tree)->rbt_nil); \
|
||||
} \
|
||||
} \
|
||||
} while (0)
|
||||
@ -405,14 +400,14 @@ struct { \
|
||||
/* rbp_i_c was the right child of rbp_i_p, so rotate */\
|
||||
/* left in order to maintain the left-leaning */\
|
||||
/* invariant. */\
|
||||
assert(rbp_right_get(a_type, a_field, rbp_i_p) \
|
||||
MOZ_ASSERT(rbp_right_get(a_type, a_field, rbp_i_p) \
|
||||
== rbp_i_c); \
|
||||
rbp_right_set(a_type, a_field, rbp_i_p, rbp_i_t); \
|
||||
rbp_lean_left(a_type, a_field, rbp_i_p, rbp_i_u); \
|
||||
if (rbp_left_get(a_type, a_field, rbp_i_g) == rbp_i_p) {\
|
||||
rbp_left_set(a_type, a_field, rbp_i_g, rbp_i_u); \
|
||||
} else { \
|
||||
assert(rbp_right_get(a_type, a_field, rbp_i_g) \
|
||||
MOZ_ASSERT(rbp_right_get(a_type, a_field, rbp_i_g) \
|
||||
== rbp_i_p); \
|
||||
rbp_right_set(a_type, a_field, rbp_i_g, rbp_i_u); \
|
||||
} \
|
||||
@ -421,7 +416,7 @@ struct { \
|
||||
if (rbp_i_cmp < 0) { \
|
||||
rbp_i_c = rbp_left_get(a_type, a_field, rbp_i_p); \
|
||||
} else { \
|
||||
assert(rbp_i_cmp > 0); \
|
||||
MOZ_ASSERT(rbp_i_cmp > 0); \
|
||||
rbp_i_c = rbp_right_get(a_type, a_field, rbp_i_p); \
|
||||
} \
|
||||
continue; \
|
||||
@ -433,7 +428,7 @@ struct { \
|
||||
if (rbp_i_cmp < 0) { \
|
||||
rbp_i_c = rbp_left_get(a_type, a_field, rbp_i_c); \
|
||||
} else { \
|
||||
assert(rbp_i_cmp > 0); \
|
||||
MOZ_ASSERT(rbp_i_cmp > 0); \
|
||||
rbp_i_c = rbp_right_get(a_type, a_field, rbp_i_c); \
|
||||
} \
|
||||
} \
|
||||
@ -488,7 +483,7 @@ struct { \
|
||||
} \
|
||||
} else { \
|
||||
if (rbp_r_cmp == 0) { \
|
||||
assert((a_node) == rbp_r_c); \
|
||||
MOZ_ASSERT((a_node) == rbp_r_c); \
|
||||
if (rbp_right_get(a_type, a_field, rbp_r_c) \
|
||||
== &(a_tree)->rbt_nil) { \
|
||||
/* Delete root node (which is also a leaf node). */\
|
||||
@ -548,7 +543,7 @@ struct { \
|
||||
} \
|
||||
if (rbp_r_cmp != 0) { \
|
||||
while (true) { \
|
||||
assert(rbp_r_p != &(a_tree)->rbt_nil); \
|
||||
MOZ_ASSERT(rbp_r_p != &(a_tree)->rbt_nil); \
|
||||
rbp_r_cmp = (a_cmp)((a_node), rbp_r_c); \
|
||||
if (rbp_r_cmp < 0) { \
|
||||
rbp_r_t = rbp_left_get(a_type, a_field, rbp_r_c); \
|
||||
@ -561,7 +556,7 @@ struct { \
|
||||
rbp_left_set(a_type, a_field, rbp_r_xp, \
|
||||
rbp_r_c); \
|
||||
} else { \
|
||||
assert(rbp_right_get(a_type, a_field, \
|
||||
MOZ_ASSERT(rbp_right_get(a_type, a_field, \
|
||||
rbp_r_xp) == (a_node)); \
|
||||
rbp_right_set(a_type, a_field, rbp_r_xp, \
|
||||
rbp_r_c); \
|
||||
@ -577,7 +572,7 @@ struct { \
|
||||
rbp_left_set(a_type, a_field, rbp_r_p, \
|
||||
&(a_tree)->rbt_nil); \
|
||||
} else { \
|
||||
assert(rbp_right_get(a_type, a_field, rbp_r_p) \
|
||||
MOZ_ASSERT(rbp_right_get(a_type, a_field, rbp_r_p)\
|
||||
== rbp_r_c); \
|
||||
rbp_right_set(a_type, a_field, rbp_r_p, \
|
||||
&(a_tree)->rbt_nil); \
|
||||
@ -605,7 +600,7 @@ struct { \
|
||||
/* Check whether to delete this node (it has to be */\
|
||||
/* the correct node and a leaf node). */\
|
||||
if (rbp_r_cmp == 0) { \
|
||||
assert((a_node) == rbp_r_c); \
|
||||
MOZ_ASSERT((a_node) == rbp_r_c); \
|
||||
if (rbp_right_get(a_type, a_field, rbp_r_c) \
|
||||
== &(a_tree)->rbt_nil) { \
|
||||
/* Delete leaf node. */\
|
||||
|
Loading…
Reference in New Issue
Block a user