Back out bug 694896, rev 62ba86c7; wrong patch was checked in.

This commit is contained in:
Justin Lebar 2011-11-03 12:57:40 -04:00
parent 3648a9fb42
commit 515237a036

View File

@ -197,12 +197,14 @@
*/
/* #define MALLOC_BALANCE */
/*
* MALLOC_PAGEFILE causes all mmap()ed memory to be backed by temporary
* files, so that if a chunk is mapped, it is guaranteed to be swappable.
* This avoids asynchronous OOM failures that are due to VM over-commit.
*/
/* #define MALLOC_PAGEFILE */
#if (!defined(MOZ_MEMORY_WINDOWS) && !defined(MOZ_MEMORY_DARWIN))
/*
* MALLOC_PAGEFILE causes all mmap()ed memory to be backed by temporary
* files, so that if a chunk is mapped, it is guaranteed to be swappable.
* This avoids asynchronous OOM failures that are due to VM over-commit.
*/
#define MALLOC_PAGEFILE
#endif
#ifdef MALLOC_PAGEFILE
/* Write size when initializing a page file. */
@ -2309,9 +2311,8 @@ pages_map(void *addr, size_t size, int pfd)
}
assert(ret != NULL);
if (ret == MAP_FAILED) {
if (ret == MAP_FAILED)
ret = NULL;
}
#if defined(__ia64__)
/*
* If the allocated memory doesn't have its upper 17 bits clear, consider it
@ -2525,10 +2526,6 @@ malloc_rtree_set(malloc_rtree_t *rtree, uintptr_t key, void *val)
}
#endif
#if defined(MOZ_MEMORY_WINDOWS) || defined(JEMALLOC_USES_MAP_ALIGN) || defined(MALLOC_PAGEFILE)
/* Allocate an aligned chunk while maintaining a 1:1 correspondence between
* mmap and unmap calls. This is important on Windows, but not elsewhere. */
static void *
chunk_alloc_mmap(size_t size, bool pagefile)
{
@ -2547,6 +2544,16 @@ chunk_alloc_mmap(size_t size, bool pagefile)
#endif
pfd = -1;
/*
* Windows requires that there be a 1:1 mapping between VM
* allocation/deallocation operations. Therefore, take care here to
* acquire the final result via one mapping operation. This means
* unmapping any preliminary result that is not correctly aligned.
*
* The MALLOC_PAGEFILE code also benefits from this mapping algorithm,
* since it reduces the number of page files.
*/
#ifdef JEMALLOC_USES_MAP_ALIGN
ret = pages_map_align(size, pfd, chunksize);
#else
@ -2599,143 +2606,6 @@ RETURN:
return (ret);
}
#else /* ! (defined(MOZ_MEMORY_WINDOWS) || defined(JEMALLOC_USES_MAP_ALIGN) || defined(MALLOC_PAGEFILE)) */
/*
* Used by chunk_alloc_mmap() to decide whether to attempt the fast path and
* potentially avoid some system calls.
*/
#ifndef NO_TLS
static __thread bool mmap_unaligned_tls __attribute__((tls_model("initial-exec")));
#define MMAP_UNALIGNED_GET() mmap_unaligned_tls
#define MMAP_UNALIGNED_SET(v) do { \
mmap_unaligned_tls = (v); \
} while (0)
#else
#define NEEDS_PTHREAD_MMAP_UNALIGNED_TSD
static pthread_key_t mmap_unaligned_tsd;
#define MMAP_UNALIGNED_GET() ((bool)pthread_getspecific(mmap_unaligned_tsd))
#define MMAP_UNALIGNED_SET(v) do { \
pthread_setspecific(mmap_unaligned_tsd, (void *)(v)); \
} while (0)
#endif
/* chunk_alloc_mmap_slow and chunk_alloc_mmap were cherry-picked from upstream
* jemalloc 2.2.3 to fix Mozilla bug 694896, enable jemalloc on Mac 10.7. */
static void *
chunk_alloc_mmap_slow(size_t size, bool unaligned)
{
void *ret;
size_t offset;
/* Beware size_t wrap-around. */
if (size + chunksize <= size)
return (NULL);
ret = pages_map(NULL, size + chunksize, -1);
if (ret == NULL)
return (NULL);
/* Clean up unneeded leading/trailing space. */
offset = CHUNK_ADDR2OFFSET(ret);
if (offset != 0) {
/* Note that mmap() returned an unaligned mapping. */
unaligned = true;
/* Leading space. */
pages_unmap(ret, chunksize - offset);
ret = (void *)((uintptr_t)ret +
(chunksize - offset));
/* Trailing space. */
pages_unmap((void *)((uintptr_t)ret + size),
offset);
} else {
/* Trailing space only. */
pages_unmap((void *)((uintptr_t)ret + size),
chunksize);
}
/*
* If mmap() returned an aligned mapping, reset mmap_unaligned so that
* the next chunk_alloc_mmap() execution tries the fast allocation
* method.
*/
if (unaligned == false)
MMAP_UNALIGNED_SET(false);
return (ret);
}
static void *
chunk_alloc_mmap(size_t size, bool pagefile)
{
void *ret;
/*
* Ideally, there would be a way to specify alignment to mmap() (like
* NetBSD has), but in the absence of such a feature, we have to work
* hard to efficiently create aligned mappings. The reliable, but
* slow method is to create a mapping that is over-sized, then trim the
* excess. However, that always results in at least one call to
* pages_unmap().
*
* A more optimistic approach is to try mapping precisely the right
* amount, then try to append another mapping if alignment is off. In
* practice, this works out well as long as the application is not
* interleaving mappings via direct mmap() calls. If we do run into a
* situation where there is an interleaved mapping and we are unable to
* extend an unaligned mapping, our best option is to switch to the
* slow method until mmap() returns another aligned mapping. This will
* tend to leave a gap in the memory map that is too small to cause
* later problems for the optimistic method.
*
* Another possible confounding factor is address space layout
* randomization (ASLR), which causes mmap(2) to disregard the
* requested address. mmap_unaligned tracks whether the previous
* chunk_alloc_mmap() execution received any unaligned or relocated
* mappings, and if so, the current execution will immediately fall
* back to the slow method. However, we keep track of whether the fast
* method would have succeeded, and if so, we make a note to try the
* fast method next time.
*/
if (MMAP_UNALIGNED_GET() == false) {
size_t offset;
ret = pages_map(NULL, size, -1);
if (ret == NULL)
return (NULL);
offset = CHUNK_ADDR2OFFSET(ret);
if (offset != 0) {
MMAP_UNALIGNED_SET(true);
/* Try to extend chunk boundary. */
if (pages_map((void *)((uintptr_t)ret + size),
chunksize - offset, -1) == NULL) {
/*
* Extension failed. Clean up, then revert to
* the reliable-but-expensive method.
*/
pages_unmap(ret, size);
ret = chunk_alloc_mmap_slow(size, true);
} else {
/* Clean up unneeded leading space. */
pages_unmap(ret, chunksize - offset);
ret = (void *)((uintptr_t)ret + (chunksize -
offset));
}
}
} else
ret = chunk_alloc_mmap_slow(size, false);
return (ret);
}
#endif /* defined(MOZ_MEMORY_WINDOWS) || defined(JEMALLOC_USES_MAP_ALIGN) || defined(MALLOC_PAGEFILE) */
#ifdef MALLOC_PAGEFILE
static int
pagefile_init(size_t size)
@ -6057,51 +5927,45 @@ MALLOC_OUT:
return (true);
#endif
#if defined(NEEDS_PTHREAD_MMAP_UNALIGNED_TSD)
if (pthread_key_create(&mmap_unaligned_tsd, NULL) != 0) {
malloc_printf("<jemalloc>: Error in pthread_key_create()\n");
}
#endif
malloc_initialized = true;
#ifdef MOZ_MEMORY_DARWIN
/*
* Overwrite the default memory allocator to use jemalloc everywhere.
*/
default_zone = malloc_default_zone();
/*
* Overwrite the default memory allocator to use jemalloc everywhere.
*/
default_zone = malloc_default_zone();
/*
* We only use jemalloc with versions of MacOS we've seen (10.5, 10.6, and
* 10.7). We'll have to update our code to work with newer versions,
* because the malloc zone layout is likely to change.
*/
/*
* We only use jemalloc with the 10.6 SDK:
* - With the 10.5 SDK, madvise doesn't work, leading to a 20% memory
* usage regression (bug 670492).
* - With the 10.7 SDK, jemalloc causes the browser to hang (bug 670175).
*/
osx_use_jemalloc = default_zone->version == LEOPARD_MALLOC_ZONE_T_VERSION ||
default_zone->version == SNOW_LEOPARD_MALLOC_ZONE_T_VERSION ||
default_zone->version == LION_MALLOC_ZONE_T_VERSION;
osx_use_jemalloc = (default_zone->version == LEOPARD_MALLOC_ZONE_T_VERSION ||
default_zone->version == SNOW_LEOPARD_MALLOC_ZONE_T_VERSION);
/* Allow us dynamically turn off jemalloc for testing. */
/* Allow us dynamically turn off jemalloc for testing. */
if (getenv("NO_MAC_JEMALLOC"))
osx_use_jemalloc = false;
osx_use_jemalloc = false;
if (osx_use_jemalloc) {
size_t size;
if (osx_use_jemalloc) {
size_t size;
/* Register the custom zone. */
malloc_zone_register(create_zone(default_zone->version));
/* Register the custom zone. */
malloc_zone_register(create_zone(default_zone->version));
/*
* Convert the default szone to an "overlay zone" that is capable
* of deallocating szone-allocated objects, but allocating new
* objects from jemalloc.
*/
size = zone_version_size(default_zone->version);
szone2ozone(default_zone, size);
}
else {
szone = default_zone;
}
/*
* Convert the default szone to an "overlay zone" that is capable
* of deallocating szone-allocated objects, but allocating new
* objects from jemalloc.
*/
size = zone_version_size(default_zone->version);
szone2ozone(default_zone, size);
}
else {
szone = default_zone;
}
#endif
#ifndef MOZ_MEMORY_WINDOWS