diff --git a/configure.in b/configure.in index 1c9a6ebed87a..bcdbf7e6a124 100644 --- a/configure.in +++ b/configure.in @@ -94,7 +94,6 @@ dnl ============================================================== _topsrcdir=`cd \`dirname $0\`; pwd` _objdir=`pwd` - dnl TODO Don't exempt L10N builds once bug 842760 is resolved. if test "$_topsrcdir" = "$_objdir" -a "${with_l10n_base+set}" != set; then echo " ***" diff --git a/memory/jemalloc/src/ChangeLog b/memory/jemalloc/src/ChangeLog index 0cf887c2344a..dba05ebc834e 100644 --- a/memory/jemalloc/src/ChangeLog +++ b/memory/jemalloc/src/ChangeLog @@ -4,6 +4,25 @@ brevity. Much more detail can be found in the git revision history: https://github.com/jemalloc/jemalloc +* 4.0.1 (XXX) + + Bug fixes: + - Fix arenas_cache_cleanup() and arena_get_hard() to handle + allocation/deallocation within the application's thread-specific data + cleanup functions even after arenas_cache is torn down. + - Don't bitshift by negative amounts when encoding/decoding run sizes in chunk + header maps. This affected systems with page sizes greater than 8 KiB. + - Rename index_t to szind_t to avoid an existing type on Solaris. + - Add JEMALLOC_CXX_THROW to the memalign() function prototype, in order to + match glibc and avoid compilation errors when including both + jemalloc/jemalloc.h and malloc.h in C++ code. + - Fix chunk purge hook calls for in-place huge shrinking reallocation to + specify the old chunk size rather than the new chunk size. This bug caused + no correctness issues for the default chunk purge function, but was + visible to custom functions set via the "arena..chunk_hooks" mallctl. + - Fix TLS configuration such that it is enabled by default for platforms on + which it works correctly. + * 4.0.0 (August 17, 2015) This version contains many speed and space optimizations, both minor and diff --git a/memory/jemalloc/src/VERSION b/memory/jemalloc/src/VERSION index 6802c56546d6..a341366c1121 100644 --- a/memory/jemalloc/src/VERSION +++ b/memory/jemalloc/src/VERSION @@ -1 +1 @@ -4.0.0-0-g6e98caf8f064482b9ab292ef3638dea67420bbc2 +4.0.0-12-ged4883285e111b426e5769b24dad164ebacaa5b9 diff --git a/memory/jemalloc/src/configure b/memory/jemalloc/src/configure index f2ce3d4d4c79..baa88041d0cf 100755 --- a/memory/jemalloc/src/configure +++ b/memory/jemalloc/src/configure @@ -728,6 +728,7 @@ infodir docdir oldincludedir includedir +runstatedir localstatedir sharedstatedir sysconfdir @@ -831,6 +832,7 @@ datadir='${datarootdir}' sysconfdir='${prefix}/etc' sharedstatedir='${prefix}/com' localstatedir='${prefix}/var' +runstatedir='${localstatedir}/run' includedir='${prefix}/include' oldincludedir='/usr/include' docdir='${datarootdir}/doc/${PACKAGE}' @@ -1083,6 +1085,15 @@ do | -silent | --silent | --silen | --sile | --sil) silent=yes ;; + -runstatedir | --runstatedir | --runstatedi | --runstated \ + | --runstate | --runstat | --runsta | --runst | --runs \ + | --run | --ru | --r) + ac_prev=runstatedir ;; + -runstatedir=* | --runstatedir=* | --runstatedi=* | --runstated=* \ + | --runstate=* | --runstat=* | --runsta=* | --runst=* | --runs=* \ + | --run=* | --ru=* | --r=*) + runstatedir=$ac_optarg ;; + -sbindir | --sbindir | --sbindi | --sbind | --sbin | --sbi | --sb) ac_prev=sbindir ;; -sbindir=* | --sbindir=* | --sbindi=* | --sbind=* | --sbin=* \ @@ -1220,7 +1231,7 @@ fi for ac_var in exec_prefix prefix bindir sbindir libexecdir datarootdir \ datadir sysconfdir sharedstatedir localstatedir includedir \ oldincludedir docdir infodir htmldir dvidir pdfdir psdir \ - libdir localedir mandir + libdir localedir mandir runstatedir do eval ac_val=\$$ac_var # Remove trailing slashes. @@ -1373,6 +1384,7 @@ Fine tuning of the installation directories: --sysconfdir=DIR read-only single-machine data [PREFIX/etc] --sharedstatedir=DIR modifiable architecture-independent data [PREFIX/com] --localstatedir=DIR modifiable single-machine data [PREFIX/var] + --runstatedir=DIR modifiable per-process data [LOCALSTATEDIR/run] --libdir=DIR object code libraries [EPREFIX/lib] --includedir=DIR C header files [PREFIX/include] --oldincludedir=DIR C header files for non-gcc [/usr/include] @@ -6964,7 +6976,6 @@ else fi -set -x if test "x`test ! \"${srcroot}\" && cd \"${srcroot}\"; git rev-parse --is-inside-work-tree 2>/dev/null`" = "xtrue" ; then rm -f "${objroot}VERSION" @@ -6992,8 +7003,6 @@ $as_echo "Missing VERSION file, and unable to generate it; creating bogus VERSIO cp ${srcroot}VERSION ${objroot}VERSION fi fi - -set +x jemalloc_version=`cat "${objroot}VERSION"` jemalloc_version_major=`echo ${jemalloc_version} | tr ".g-" " " | awk '{print $1}'` jemalloc_version_minor=`echo ${jemalloc_version} | tr ".g-" " " | awk '{print $2}'` @@ -7283,15 +7292,18 @@ else fi -if test "x${enable_tls}" = "x" -a "x${force_tls}" = "x1" ; then - { $as_echo "$as_me:${as_lineno-$LINENO}: result: Forcing TLS to avoid allocator/threading bootstrap issues" >&5 +if test "x${enable_tls}" = "x" ; then + if test "x${force_tls}" = "x1" ; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: Forcing TLS to avoid allocator/threading bootstrap issues" >&5 $as_echo "Forcing TLS to avoid allocator/threading bootstrap issues" >&6; } - enable_tls="1" -fi -if test "x${enable_tls}" = "x" -a "x${force_tls}" = "x0" ; then - { $as_echo "$as_me:${as_lineno-$LINENO}: result: Forcing no TLS to avoid allocator/threading bootstrap issues" >&5 + enable_tls="1" + elif test "x${force_tls}" = "x0" ; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: Forcing no TLS to avoid allocator/threading bootstrap issues" >&5 $as_echo "Forcing no TLS to avoid allocator/threading bootstrap issues" >&6; } - enable_tls="0" + enable_tls="0" + else + enable_tls="1" + fi fi if test "x${enable_tls}" = "x1" ; then { $as_echo "$as_me:${as_lineno-$LINENO}: checking for TLS" >&5 @@ -7327,12 +7339,17 @@ else fi if test "x${enable_tls}" = "x1" ; then + if test "x${force_tls}" = "x0" ; then + { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: TLS enabled despite being marked unusable on this platform" >&5 +$as_echo "$as_me: WARNING: TLS enabled despite being marked unusable on this platform" >&2;} + fi cat >>confdefs.h <<_ACEOF #define JEMALLOC_TLS _ACEOF elif test "x${force_tls}" = "x1" ; then - as_fn_error $? "Failed to configure TLS, which is mandatory for correct function" "$LINENO" 5 + { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: TLS disabled despite being marked critical on this platform" >&5 +$as_echo "$as_me: WARNING: TLS disabled despite being marked critical on this platform" >&2;} fi diff --git a/memory/jemalloc/src/configure.ac b/memory/jemalloc/src/configure.ac index f7c7f3ceec5d..5e77b6800d65 100644 --- a/memory/jemalloc/src/configure.ac +++ b/memory/jemalloc/src/configure.ac @@ -1272,13 +1272,16 @@ fi , enable_tls="" ) -if test "x${enable_tls}" = "x" -a "x${force_tls}" = "x1" ; then - AC_MSG_RESULT([Forcing TLS to avoid allocator/threading bootstrap issues]) - enable_tls="1" -fi -if test "x${enable_tls}" = "x" -a "x${force_tls}" = "x0" ; then - AC_MSG_RESULT([Forcing no TLS to avoid allocator/threading bootstrap issues]) - enable_tls="0" +if test "x${enable_tls}" = "x" ; then + if test "x${force_tls}" = "x1" ; then + AC_MSG_RESULT([Forcing TLS to avoid allocator/threading bootstrap issues]) + enable_tls="1" + elif test "x${force_tls}" = "x0" ; then + AC_MSG_RESULT([Forcing no TLS to avoid allocator/threading bootstrap issues]) + enable_tls="0" + else + enable_tls="1" + fi fi if test "x${enable_tls}" = "x1" ; then AC_MSG_CHECKING([for TLS]) @@ -1298,9 +1301,12 @@ else fi AC_SUBST([enable_tls]) if test "x${enable_tls}" = "x1" ; then + if test "x${force_tls}" = "x0" ; then + AC_MSG_WARN([TLS enabled despite being marked unusable on this platform]) + fi AC_DEFINE_UNQUOTED([JEMALLOC_TLS], [ ]) elif test "x${force_tls}" = "x1" ; then - AC_MSG_ERROR([Failed to configure TLS, which is mandatory for correct function]) + AC_MSG_WARN([TLS disabled despite being marked critical on this platform]) fi dnl ============================================================================ diff --git a/memory/jemalloc/src/include/jemalloc/internal/arena.h b/memory/jemalloc/src/include/jemalloc/internal/arena.h index cb015eedc601..76c5b9362bc6 100644 --- a/memory/jemalloc/src/include/jemalloc/internal/arena.h +++ b/memory/jemalloc/src/include/jemalloc/internal/arena.h @@ -39,7 +39,7 @@ typedef struct arena_s arena_t; #ifdef JEMALLOC_ARENA_STRUCTS_A struct arena_run_s { /* Index of bin this run is associated with. */ - index_t binind; + szind_t binind; /* Number of free regions in run. */ unsigned nfree; @@ -448,7 +448,7 @@ bool arena_lg_dirty_mult_set(arena_t *arena, ssize_t lg_dirty_mult); void arena_maybe_purge(arena_t *arena); void arena_purge_all(arena_t *arena); void arena_tcache_fill_small(arena_t *arena, tcache_bin_t *tbin, - index_t binind, uint64_t prof_accumbytes); + szind_t binind, uint64_t prof_accumbytes); void arena_alloc_junk_small(void *ptr, arena_bin_info_t *bin_info, bool zero); #ifdef JEMALLOC_JET @@ -519,17 +519,19 @@ arena_chunk_map_misc_t *arena_run_to_miscelm(arena_run_t *run); size_t *arena_mapbitsp_get(arena_chunk_t *chunk, size_t pageind); size_t arena_mapbitsp_read(size_t *mapbitsp); size_t arena_mapbits_get(arena_chunk_t *chunk, size_t pageind); +size_t arena_mapbits_size_decode(size_t mapbits); size_t arena_mapbits_unallocated_size_get(arena_chunk_t *chunk, size_t pageind); size_t arena_mapbits_large_size_get(arena_chunk_t *chunk, size_t pageind); size_t arena_mapbits_small_runind_get(arena_chunk_t *chunk, size_t pageind); -index_t arena_mapbits_binind_get(arena_chunk_t *chunk, size_t pageind); +szind_t arena_mapbits_binind_get(arena_chunk_t *chunk, size_t pageind); size_t arena_mapbits_dirty_get(arena_chunk_t *chunk, size_t pageind); size_t arena_mapbits_unzeroed_get(arena_chunk_t *chunk, size_t pageind); size_t arena_mapbits_decommitted_get(arena_chunk_t *chunk, size_t pageind); size_t arena_mapbits_large_get(arena_chunk_t *chunk, size_t pageind); size_t arena_mapbits_allocated_get(arena_chunk_t *chunk, size_t pageind); void arena_mapbitsp_write(size_t *mapbitsp, size_t mapbits); +size_t arena_mapbits_size_encode(size_t size); void arena_mapbits_unallocated_set(arena_chunk_t *chunk, size_t pageind, size_t size, size_t flags); void arena_mapbits_unallocated_size_set(arena_chunk_t *chunk, size_t pageind, @@ -539,21 +541,21 @@ void arena_mapbits_internal_set(arena_chunk_t *chunk, size_t pageind, void arena_mapbits_large_set(arena_chunk_t *chunk, size_t pageind, size_t size, size_t flags); void arena_mapbits_large_binind_set(arena_chunk_t *chunk, size_t pageind, - index_t binind); + szind_t binind); void arena_mapbits_small_set(arena_chunk_t *chunk, size_t pageind, - size_t runind, index_t binind, size_t flags); + size_t runind, szind_t binind, size_t flags); void arena_metadata_allocated_add(arena_t *arena, size_t size); void arena_metadata_allocated_sub(arena_t *arena, size_t size); size_t arena_metadata_allocated_get(arena_t *arena); bool arena_prof_accum_impl(arena_t *arena, uint64_t accumbytes); bool arena_prof_accum_locked(arena_t *arena, uint64_t accumbytes); bool arena_prof_accum(arena_t *arena, uint64_t accumbytes); -index_t arena_ptr_small_binind_get(const void *ptr, size_t mapbits); -index_t arena_bin_index(arena_t *arena, arena_bin_t *bin); +szind_t arena_ptr_small_binind_get(const void *ptr, size_t mapbits); +szind_t arena_bin_index(arena_t *arena, arena_bin_t *bin); unsigned arena_run_regind(arena_run_t *run, arena_bin_info_t *bin_info, const void *ptr); prof_tctx_t *arena_prof_tctx_get(const void *ptr); -void arena_prof_tctx_set(const void *ptr, prof_tctx_t *tctx); +void arena_prof_tctx_set(const void *ptr, size_t usize, prof_tctx_t *tctx); void *arena_malloc(tsd_t *tsd, arena_t *arena, size_t size, bool zero, tcache_t *tcache); arena_t *arena_aalloc(const void *ptr); @@ -652,6 +654,22 @@ arena_mapbits_get(arena_chunk_t *chunk, size_t pageind) return (arena_mapbitsp_read(arena_mapbitsp_get(chunk, pageind))); } +JEMALLOC_ALWAYS_INLINE size_t +arena_mapbits_size_decode(size_t mapbits) +{ + size_t size; + +#if CHUNK_MAP_SIZE_SHIFT > 0 + size = (mapbits & CHUNK_MAP_SIZE_MASK) >> CHUNK_MAP_SIZE_SHIFT; +#elif CHUNK_MAP_SIZE_SHIFT == 0 + size = mapbits & CHUNK_MAP_SIZE_MASK; +#else + size = (mapbits & CHUNK_MAP_SIZE_MASK) << -CHUNK_MAP_SIZE_SHIFT; +#endif + + return (size); +} + JEMALLOC_ALWAYS_INLINE size_t arena_mapbits_unallocated_size_get(arena_chunk_t *chunk, size_t pageind) { @@ -659,7 +677,7 @@ arena_mapbits_unallocated_size_get(arena_chunk_t *chunk, size_t pageind) mapbits = arena_mapbits_get(chunk, pageind); assert((mapbits & (CHUNK_MAP_LARGE|CHUNK_MAP_ALLOCATED)) == 0); - return ((mapbits & CHUNK_MAP_SIZE_MASK) >> CHUNK_MAP_SIZE_SHIFT); + return (arena_mapbits_size_decode(mapbits)); } JEMALLOC_ALWAYS_INLINE size_t @@ -670,7 +688,7 @@ arena_mapbits_large_size_get(arena_chunk_t *chunk, size_t pageind) mapbits = arena_mapbits_get(chunk, pageind); assert((mapbits & (CHUNK_MAP_LARGE|CHUNK_MAP_ALLOCATED)) == (CHUNK_MAP_LARGE|CHUNK_MAP_ALLOCATED)); - return ((mapbits & CHUNK_MAP_SIZE_MASK) >> CHUNK_MAP_SIZE_SHIFT); + return (arena_mapbits_size_decode(mapbits)); } JEMALLOC_ALWAYS_INLINE size_t @@ -684,11 +702,11 @@ arena_mapbits_small_runind_get(arena_chunk_t *chunk, size_t pageind) return (mapbits >> CHUNK_MAP_RUNIND_SHIFT); } -JEMALLOC_ALWAYS_INLINE index_t +JEMALLOC_ALWAYS_INLINE szind_t arena_mapbits_binind_get(arena_chunk_t *chunk, size_t pageind) { size_t mapbits; - index_t binind; + szind_t binind; mapbits = arena_mapbits_get(chunk, pageind); binind = (mapbits & CHUNK_MAP_BININD_MASK) >> CHUNK_MAP_BININD_SHIFT; @@ -754,6 +772,23 @@ arena_mapbitsp_write(size_t *mapbitsp, size_t mapbits) *mapbitsp = mapbits; } +JEMALLOC_ALWAYS_INLINE size_t +arena_mapbits_size_encode(size_t size) +{ + size_t mapbits; + +#if CHUNK_MAP_SIZE_SHIFT > 0 + mapbits = size << CHUNK_MAP_SIZE_SHIFT; +#elif CHUNK_MAP_SIZE_SHIFT == 0 + mapbits = size; +#else + mapbits = size >> -CHUNK_MAP_SIZE_SHIFT; +#endif + + assert((mapbits & ~CHUNK_MAP_SIZE_MASK) == 0); + return (mapbits); +} + JEMALLOC_ALWAYS_INLINE void arena_mapbits_unallocated_set(arena_chunk_t *chunk, size_t pageind, size_t size, size_t flags) @@ -761,11 +796,10 @@ arena_mapbits_unallocated_set(arena_chunk_t *chunk, size_t pageind, size_t size, size_t *mapbitsp = arena_mapbitsp_get(chunk, pageind); assert((size & PAGE_MASK) == 0); - assert(((size << CHUNK_MAP_SIZE_SHIFT) & ~CHUNK_MAP_SIZE_MASK) == 0); assert((flags & CHUNK_MAP_FLAGS_MASK) == flags); assert((flags & CHUNK_MAP_DECOMMITTED) == 0 || (flags & (CHUNK_MAP_DIRTY|CHUNK_MAP_UNZEROED)) == 0); - arena_mapbitsp_write(mapbitsp, (size << CHUNK_MAP_SIZE_SHIFT) | + arena_mapbitsp_write(mapbitsp, arena_mapbits_size_encode(size) | CHUNK_MAP_BININD_INVALID | flags); } @@ -777,10 +811,9 @@ arena_mapbits_unallocated_size_set(arena_chunk_t *chunk, size_t pageind, size_t mapbits = arena_mapbitsp_read(mapbitsp); assert((size & PAGE_MASK) == 0); - assert(((size << CHUNK_MAP_SIZE_SHIFT) & ~CHUNK_MAP_SIZE_MASK) == 0); assert((mapbits & (CHUNK_MAP_LARGE|CHUNK_MAP_ALLOCATED)) == 0); - arena_mapbitsp_write(mapbitsp, (size << CHUNK_MAP_SIZE_SHIFT) | (mapbits - & ~CHUNK_MAP_SIZE_MASK)); + arena_mapbitsp_write(mapbitsp, arena_mapbits_size_encode(size) | + (mapbits & ~CHUNK_MAP_SIZE_MASK)); } JEMALLOC_ALWAYS_INLINE void @@ -799,18 +832,17 @@ arena_mapbits_large_set(arena_chunk_t *chunk, size_t pageind, size_t size, size_t *mapbitsp = arena_mapbitsp_get(chunk, pageind); assert((size & PAGE_MASK) == 0); - assert(((size << CHUNK_MAP_SIZE_SHIFT) & ~CHUNK_MAP_SIZE_MASK) == 0); assert((flags & CHUNK_MAP_FLAGS_MASK) == flags); assert((flags & CHUNK_MAP_DECOMMITTED) == 0 || (flags & (CHUNK_MAP_DIRTY|CHUNK_MAP_UNZEROED)) == 0); - arena_mapbitsp_write(mapbitsp, (size << CHUNK_MAP_SIZE_SHIFT) | + arena_mapbitsp_write(mapbitsp, arena_mapbits_size_encode(size) | CHUNK_MAP_BININD_INVALID | flags | CHUNK_MAP_LARGE | CHUNK_MAP_ALLOCATED); } JEMALLOC_ALWAYS_INLINE void arena_mapbits_large_binind_set(arena_chunk_t *chunk, size_t pageind, - index_t binind) + szind_t binind) { size_t *mapbitsp = arena_mapbitsp_get(chunk, pageind); size_t mapbits = arena_mapbitsp_read(mapbitsp); @@ -824,7 +856,7 @@ arena_mapbits_large_binind_set(arena_chunk_t *chunk, size_t pageind, JEMALLOC_ALWAYS_INLINE void arena_mapbits_small_set(arena_chunk_t *chunk, size_t pageind, size_t runind, - index_t binind, size_t flags) + szind_t binind, size_t flags) { size_t *mapbitsp = arena_mapbitsp_get(chunk, pageind); @@ -901,10 +933,10 @@ arena_prof_accum(arena_t *arena, uint64_t accumbytes) } } -JEMALLOC_ALWAYS_INLINE index_t +JEMALLOC_ALWAYS_INLINE szind_t arena_ptr_small_binind_get(const void *ptr, size_t mapbits) { - index_t binind; + szind_t binind; binind = (mapbits & CHUNK_MAP_BININD_MASK) >> CHUNK_MAP_BININD_SHIFT; @@ -916,7 +948,7 @@ arena_ptr_small_binind_get(const void *ptr, size_t mapbits) size_t rpages_ind; arena_run_t *run; arena_bin_t *bin; - index_t run_binind, actual_binind; + szind_t run_binind, actual_binind; arena_bin_info_t *bin_info; arena_chunk_map_misc_t *miscelm; void *rpages; @@ -950,10 +982,10 @@ arena_ptr_small_binind_get(const void *ptr, size_t mapbits) # endif /* JEMALLOC_ARENA_INLINE_A */ # ifdef JEMALLOC_ARENA_INLINE_B -JEMALLOC_INLINE index_t +JEMALLOC_INLINE szind_t arena_bin_index(arena_t *arena, arena_bin_t *bin) { - index_t binind = bin - arena->bins; + szind_t binind = bin - arena->bins; assert(binind < NBINS); return (binind); } @@ -1060,7 +1092,7 @@ arena_prof_tctx_get(const void *ptr) } JEMALLOC_INLINE void -arena_prof_tctx_set(const void *ptr, prof_tctx_t *tctx) +arena_prof_tctx_set(const void *ptr, size_t usize, prof_tctx_t *tctx) { arena_chunk_t *chunk; @@ -1070,12 +1102,25 @@ arena_prof_tctx_set(const void *ptr, prof_tctx_t *tctx) chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr); if (likely(chunk != ptr)) { size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE; + assert(arena_mapbits_allocated_get(chunk, pageind) != 0); - if (unlikely(arena_mapbits_large_get(chunk, pageind) != 0)) { - arena_chunk_map_misc_t *elm = arena_miscelm_get(chunk, - pageind); + if (unlikely(usize > SMALL_MAXCLASS || tctx > + (prof_tctx_t *)(uintptr_t)1U)) { + arena_chunk_map_misc_t *elm; + + assert(arena_mapbits_large_get(chunk, pageind) != 0); + + elm = arena_miscelm_get(chunk, pageind); atomic_write_p(&elm->prof_tctx_pun, tctx); + } else { + /* + * tctx must always be initialized for large runs. + * Assert that the surrounding conditional logic is + * equivalent to checking whether ptr refers to a large + * run. + */ + assert(arena_mapbits_large_get(chunk, pageind) == 0); } } else huge_prof_tctx_set(ptr, tctx); @@ -1131,7 +1176,7 @@ arena_salloc(const void *ptr, bool demote) size_t ret; arena_chunk_t *chunk; size_t pageind; - index_t binind; + szind_t binind; assert(ptr != NULL); @@ -1190,7 +1235,7 @@ arena_dalloc(tsd_t *tsd, void *ptr, tcache_t *tcache) if (likely((mapbits & CHUNK_MAP_LARGE) == 0)) { /* Small allocation. */ if (likely(tcache != NULL)) { - index_t binind = arena_ptr_small_binind_get(ptr, + szind_t binind = arena_ptr_small_binind_get(ptr, mapbits); tcache_dalloc_small(tsd, tcache, ptr, binind); } else { @@ -1242,7 +1287,7 @@ arena_sdalloc(tsd_t *tsd, void *ptr, size_t size, tcache_t *tcache) if (likely(size <= SMALL_MAXCLASS)) { /* Small allocation. */ if (likely(tcache != NULL)) { - index_t binind = size2index(size); + szind_t binind = size2index(size); tcache_dalloc_small(tsd, tcache, ptr, binind); } else { size_t pageind = ((uintptr_t)ptr - diff --git a/memory/jemalloc/src/include/jemalloc/internal/jemalloc_internal.h.in b/memory/jemalloc/src/include/jemalloc/internal/jemalloc_internal.h.in index 7a137b6293ba..f6e464e9c139 100644 --- a/memory/jemalloc/src/include/jemalloc/internal/jemalloc_internal.h.in +++ b/memory/jemalloc/src/include/jemalloc/internal/jemalloc_internal.h.in @@ -184,7 +184,7 @@ static const bool config_cache_oblivious = #include "jemalloc/internal/jemalloc_internal_macros.h" /* Size class index type. */ -typedef unsigned index_t; +typedef unsigned szind_t; /* * Flags bits: @@ -511,12 +511,12 @@ void jemalloc_postfork_child(void); #include "jemalloc/internal/huge.h" #ifndef JEMALLOC_ENABLE_INLINE -index_t size2index_compute(size_t size); -index_t size2index_lookup(size_t size); -index_t size2index(size_t size); -size_t index2size_compute(index_t index); -size_t index2size_lookup(index_t index); -size_t index2size(index_t index); +szind_t size2index_compute(size_t size); +szind_t size2index_lookup(size_t size); +szind_t size2index(size_t size); +size_t index2size_compute(szind_t index); +size_t index2size_lookup(szind_t index); +size_t index2size(szind_t index); size_t s2u_compute(size_t size); size_t s2u_lookup(size_t size); size_t s2u(size_t size); @@ -527,7 +527,7 @@ arena_t *arena_get(tsd_t *tsd, unsigned ind, bool init_if_missing, #endif #if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_C_)) -JEMALLOC_INLINE index_t +JEMALLOC_INLINE szind_t size2index_compute(size_t size) { @@ -558,7 +558,7 @@ size2index_compute(size_t size) } } -JEMALLOC_ALWAYS_INLINE index_t +JEMALLOC_ALWAYS_INLINE szind_t size2index_lookup(size_t size) { @@ -571,7 +571,7 @@ size2index_lookup(size_t size) } } -JEMALLOC_ALWAYS_INLINE index_t +JEMALLOC_ALWAYS_INLINE szind_t size2index(size_t size) { @@ -582,7 +582,7 @@ size2index(size_t size) } JEMALLOC_INLINE size_t -index2size_compute(index_t index) +index2size_compute(szind_t index) { #if (NTBINS > 0) @@ -609,7 +609,7 @@ index2size_compute(index_t index) } JEMALLOC_ALWAYS_INLINE size_t -index2size_lookup(index_t index) +index2size_lookup(szind_t index) { size_t ret = (size_t)index2size_tab[index]; assert(ret == index2size_compute(index)); @@ -617,7 +617,7 @@ index2size_lookup(index_t index) } JEMALLOC_ALWAYS_INLINE size_t -index2size(index_t index) +index2size(szind_t index) { assert(index < NSIZES); @@ -976,7 +976,7 @@ u2rz(size_t usize) size_t ret; if (usize <= SMALL_MAXCLASS) { - index_t binind = size2index(usize); + szind_t binind = size2index(usize); ret = arena_bin_info[binind].redzone_size; } else ret = 0; diff --git a/memory/jemalloc/src/include/jemalloc/internal/private_symbols.txt b/memory/jemalloc/src/include/jemalloc/internal/private_symbols.txt index dbf6aa7cdd2f..ed1f6c298dc6 100644 --- a/memory/jemalloc/src/include/jemalloc/internal/private_symbols.txt +++ b/memory/jemalloc/src/include/jemalloc/internal/private_symbols.txt @@ -50,6 +50,8 @@ arena_mapbits_large_size_get arena_mapbitsp_get arena_mapbitsp_read arena_mapbitsp_write +arena_mapbits_size_decode +arena_mapbits_size_encode arena_mapbits_small_runind_get arena_mapbits_small_set arena_mapbits_unallocated_set diff --git a/memory/jemalloc/src/include/jemalloc/internal/prof.h b/memory/jemalloc/src/include/jemalloc/internal/prof.h index 2e2271168d45..fe89828b4fb2 100644 --- a/memory/jemalloc/src/include/jemalloc/internal/prof.h +++ b/memory/jemalloc/src/include/jemalloc/internal/prof.h @@ -332,7 +332,7 @@ bool prof_sample_accum_update(tsd_t *tsd, size_t usize, bool commit, prof_tdata_t **tdata_out); prof_tctx_t *prof_alloc_prep(tsd_t *tsd, size_t usize, bool update); prof_tctx_t *prof_tctx_get(const void *ptr); -void prof_tctx_set(const void *ptr, prof_tctx_t *tctx); +void prof_tctx_set(const void *ptr, size_t usize, prof_tctx_t *tctx); void prof_malloc_sample_object(const void *ptr, size_t usize, prof_tctx_t *tctx); void prof_malloc(const void *ptr, size_t usize, prof_tctx_t *tctx); @@ -402,13 +402,13 @@ prof_tctx_get(const void *ptr) } JEMALLOC_ALWAYS_INLINE void -prof_tctx_set(const void *ptr, prof_tctx_t *tctx) +prof_tctx_set(const void *ptr, size_t usize, prof_tctx_t *tctx) { cassert(config_prof); assert(ptr != NULL); - arena_prof_tctx_set(ptr, tctx); + arena_prof_tctx_set(ptr, usize, tctx); } JEMALLOC_ALWAYS_INLINE bool @@ -473,7 +473,7 @@ prof_malloc(const void *ptr, size_t usize, prof_tctx_t *tctx) if (unlikely((uintptr_t)tctx > (uintptr_t)1U)) prof_malloc_sample_object(ptr, usize, tctx); else - prof_tctx_set(ptr, (prof_tctx_t *)(uintptr_t)1U); + prof_tctx_set(ptr, usize, (prof_tctx_t *)(uintptr_t)1U); } JEMALLOC_ALWAYS_INLINE void @@ -503,7 +503,7 @@ prof_realloc(tsd_t *tsd, const void *ptr, size_t usize, prof_tctx_t *tctx, if (unlikely((uintptr_t)tctx > (uintptr_t)1U)) prof_malloc_sample_object(ptr, usize, tctx); else - prof_tctx_set(ptr, (prof_tctx_t *)(uintptr_t)1U); + prof_tctx_set(ptr, usize, (prof_tctx_t *)(uintptr_t)1U); } JEMALLOC_ALWAYS_INLINE void diff --git a/memory/jemalloc/src/include/jemalloc/internal/tcache.h b/memory/jemalloc/src/include/jemalloc/internal/tcache.h index 493f4575b61f..5079cd266888 100644 --- a/memory/jemalloc/src/include/jemalloc/internal/tcache.h +++ b/memory/jemalloc/src/include/jemalloc/internal/tcache.h @@ -77,7 +77,7 @@ struct tcache_s { ql_elm(tcache_t) link; /* Used for aggregating stats. */ uint64_t prof_accumbytes;/* Cleared after arena_prof_accum(). */ unsigned ev_cnt; /* Event count since incremental GC. */ - index_t next_gc_bin; /* Next bin to GC. */ + szind_t next_gc_bin; /* Next bin to GC. */ tcache_bin_t tbins[1]; /* Dynamically sized. */ /* * The pointer stacks associated with tbins follow as a contiguous @@ -126,10 +126,10 @@ extern tcaches_t *tcaches; size_t tcache_salloc(const void *ptr); void tcache_event_hard(tsd_t *tsd, tcache_t *tcache); void *tcache_alloc_small_hard(tsd_t *tsd, arena_t *arena, tcache_t *tcache, - tcache_bin_t *tbin, index_t binind); + tcache_bin_t *tbin, szind_t binind); void tcache_bin_flush_small(tsd_t *tsd, tcache_t *tcache, tcache_bin_t *tbin, - index_t binind, unsigned rem); -void tcache_bin_flush_large(tsd_t *tsd, tcache_bin_t *tbin, index_t binind, + szind_t binind, unsigned rem); +void tcache_bin_flush_large(tsd_t *tsd, tcache_bin_t *tbin, szind_t binind, unsigned rem, tcache_t *tcache); void tcache_arena_associate(tcache_t *tcache, arena_t *arena); void tcache_arena_reassociate(tcache_t *tcache, arena_t *oldarena, @@ -161,7 +161,7 @@ void *tcache_alloc_small(tsd_t *tsd, arena_t *arena, tcache_t *tcache, void *tcache_alloc_large(tsd_t *tsd, arena_t *arena, tcache_t *tcache, size_t size, bool zero); void tcache_dalloc_small(tsd_t *tsd, tcache_t *tcache, void *ptr, - index_t binind); + szind_t binind); void tcache_dalloc_large(tsd_t *tsd, tcache_t *tcache, void *ptr, size_t size); tcache_t *tcaches_get(tsd_t *tsd, unsigned ind); @@ -267,7 +267,7 @@ tcache_alloc_small(tsd_t *tsd, arena_t *arena, tcache_t *tcache, size_t size, bool zero) { void *ret; - index_t binind; + szind_t binind; size_t usize; tcache_bin_t *tbin; @@ -312,7 +312,7 @@ tcache_alloc_large(tsd_t *tsd, arena_t *arena, tcache_t *tcache, size_t size, bool zero) { void *ret; - index_t binind; + szind_t binind; size_t usize; tcache_bin_t *tbin; @@ -360,7 +360,7 @@ tcache_alloc_large(tsd_t *tsd, arena_t *arena, tcache_t *tcache, size_t size, } JEMALLOC_ALWAYS_INLINE void -tcache_dalloc_small(tsd_t *tsd, tcache_t *tcache, void *ptr, index_t binind) +tcache_dalloc_small(tsd_t *tsd, tcache_t *tcache, void *ptr, szind_t binind) { tcache_bin_t *tbin; tcache_bin_info_t *tbin_info; @@ -386,7 +386,7 @@ tcache_dalloc_small(tsd_t *tsd, tcache_t *tcache, void *ptr, index_t binind) JEMALLOC_ALWAYS_INLINE void tcache_dalloc_large(tsd_t *tsd, tcache_t *tcache, void *ptr, size_t size) { - index_t binind; + szind_t binind; tcache_bin_t *tbin; tcache_bin_info_t *tbin_info; diff --git a/memory/jemalloc/src/include/jemalloc/jemalloc_protos.h.in b/memory/jemalloc/src/include/jemalloc/jemalloc_protos.h.in index 317ffdb9f2a3..a78414b196fc 100644 --- a/memory/jemalloc/src/include/jemalloc/jemalloc_protos.h.in +++ b/memory/jemalloc/src/include/jemalloc/jemalloc_protos.h.in @@ -56,7 +56,7 @@ JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW @je_@malloc_usable_size( #ifdef JEMALLOC_OVERRIDE_MEMALIGN JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN void JEMALLOC_NOTHROW *@je_@memalign(size_t alignment, size_t size) - JEMALLOC_ATTR(malloc); + JEMALLOC_CXX_THROW JEMALLOC_ATTR(malloc); #endif #ifdef JEMALLOC_OVERRIDE_VALLOC diff --git a/memory/jemalloc/src/src/arena.c b/memory/jemalloc/src/src/arena.c index af48b39d5955..f1216440bba4 100644 --- a/memory/jemalloc/src/src/arena.c +++ b/memory/jemalloc/src/src/arena.c @@ -39,7 +39,7 @@ JEMALLOC_INLINE_C arena_chunk_map_misc_t * arena_miscelm_key_create(size_t size) { - return ((arena_chunk_map_misc_t *)((size << CHUNK_MAP_SIZE_SHIFT) | + return ((arena_chunk_map_misc_t *)(arena_mapbits_size_encode(size) | CHUNK_MAP_KEY)); } @@ -58,8 +58,7 @@ arena_miscelm_key_size_get(const arena_chunk_map_misc_t *miscelm) assert(arena_miscelm_is_key(miscelm)); - return (((uintptr_t)miscelm & CHUNK_MAP_SIZE_MASK) >> - CHUNK_MAP_SIZE_SHIFT); + return (arena_mapbits_size_decode((uintptr_t)miscelm)); } JEMALLOC_INLINE_C size_t @@ -73,7 +72,7 @@ arena_miscelm_size_get(arena_chunk_map_misc_t *miscelm) chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(miscelm); pageind = arena_miscelm_to_pageind(miscelm); mapbits = arena_mapbits_get(chunk, pageind); - return ((mapbits & CHUNK_MAP_SIZE_MASK) >> CHUNK_MAP_SIZE_SHIFT); + return (arena_mapbits_size_decode(mapbits)); } JEMALLOC_INLINE_C int @@ -315,7 +314,7 @@ arena_run_reg_dalloc(arena_run_t *run, void *ptr) arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run); size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE; size_t mapbits = arena_mapbits_get(chunk, pageind); - index_t binind = arena_ptr_small_binind_get(ptr, mapbits); + szind_t binind = arena_ptr_small_binind_get(ptr, mapbits); arena_bin_info_t *bin_info = &arena_bin_info[binind]; unsigned regind = arena_run_regind(run, bin_info, ptr); @@ -508,7 +507,7 @@ arena_run_init_large(arena_t *arena, arena_run_t *run, size_t size, bool zero) static bool arena_run_split_small(arena_t *arena, arena_run_t *run, size_t size, - index_t binind) + szind_t binind) { arena_chunk_t *chunk; arena_chunk_map_misc_t *miscelm; @@ -780,7 +779,7 @@ arena_chunk_dalloc(arena_t *arena, arena_chunk_t *chunk) static void arena_huge_malloc_stats_update(arena_t *arena, size_t usize) { - index_t index = size2index(usize) - nlclasses - NBINS; + szind_t index = size2index(usize) - nlclasses - NBINS; cassert(config_stats); @@ -793,7 +792,7 @@ arena_huge_malloc_stats_update(arena_t *arena, size_t usize) static void arena_huge_malloc_stats_update_undo(arena_t *arena, size_t usize) { - index_t index = size2index(usize) - nlclasses - NBINS; + szind_t index = size2index(usize) - nlclasses - NBINS; cassert(config_stats); @@ -806,7 +805,7 @@ arena_huge_malloc_stats_update_undo(arena_t *arena, size_t usize) static void arena_huge_dalloc_stats_update(arena_t *arena, size_t usize) { - index_t index = size2index(usize) - nlclasses - NBINS; + szind_t index = size2index(usize) - nlclasses - NBINS; cassert(config_stats); @@ -819,7 +818,7 @@ arena_huge_dalloc_stats_update(arena_t *arena, size_t usize) static void arena_huge_dalloc_stats_update_undo(arena_t *arena, size_t usize) { - index_t index = size2index(usize) - nlclasses - NBINS; + szind_t index = size2index(usize) - nlclasses - NBINS; cassert(config_stats); @@ -1125,7 +1124,7 @@ arena_run_alloc_large(arena_t *arena, size_t size, bool zero) } static arena_run_t * -arena_run_alloc_small_helper(arena_t *arena, size_t size, index_t binind) +arena_run_alloc_small_helper(arena_t *arena, size_t size, szind_t binind) { arena_run_t *run = arena_run_first_best_fit(arena, size); if (run != NULL) { @@ -1136,7 +1135,7 @@ arena_run_alloc_small_helper(arena_t *arena, size_t size, index_t binind) } static arena_run_t * -arena_run_alloc_small(arena_t *arena, size_t size, index_t binind) +arena_run_alloc_small(arena_t *arena, size_t size, szind_t binind) { arena_chunk_t *chunk; arena_run_t *run; @@ -1749,15 +1748,6 @@ arena_run_dalloc(arena_t *arena, arena_run_t *run, bool dirty, bool cleaned, arena_maybe_purge(arena); } -static void -arena_run_dalloc_decommit(arena_t *arena, arena_chunk_t *chunk, - arena_run_t *run) -{ - bool committed = arena_run_decommit(arena, chunk, run); - - arena_run_dalloc(arena, run, committed, false, !committed); -} - static void arena_run_trim_head(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run, size_t oldsize, size_t newsize) @@ -1889,7 +1879,7 @@ static arena_run_t * arena_bin_nonfull_run_get(arena_t *arena, arena_bin_t *bin) { arena_run_t *run; - index_t binind; + szind_t binind; arena_bin_info_t *bin_info; /* Look for a usable run. */ @@ -1940,7 +1930,7 @@ static void * arena_bin_malloc_hard(arena_t *arena, arena_bin_t *bin) { void *ret; - index_t binind; + szind_t binind; arena_bin_info_t *bin_info; arena_run_t *run; @@ -1986,7 +1976,7 @@ arena_bin_malloc_hard(arena_t *arena, arena_bin_t *bin) } void -arena_tcache_fill_small(arena_t *arena, tcache_bin_t *tbin, index_t binind, +arena_tcache_fill_small(arena_t *arena, tcache_bin_t *tbin, szind_t binind, uint64_t prof_accumbytes) { unsigned i, nfill; @@ -2131,7 +2121,7 @@ arena_dalloc_junk_small_t *arena_dalloc_junk_small = void arena_quarantine_junk_small(void *ptr, size_t usize) { - index_t binind; + szind_t binind; arena_bin_info_t *bin_info; cassert(config_fill); assert(opt_junk_free); @@ -2149,7 +2139,7 @@ arena_malloc_small(arena_t *arena, size_t size, bool zero) void *ret; arena_bin_t *bin; arena_run_t *run; - index_t binind; + szind_t binind; binind = size2index(size); assert(binind < NBINS); @@ -2233,7 +2223,7 @@ arena_malloc_large(arena_t *arena, size_t size, bool zero) ret = (void *)((uintptr_t)arena_miscelm_to_rpages(miscelm) + random_offset); if (config_stats) { - index_t index = size2index(usize) - NBINS; + szind_t index = size2index(usize) - NBINS; arena->stats.nmalloc_large++; arena->stats.nrequests_large++; @@ -2326,7 +2316,7 @@ arena_palloc_large(tsd_t *tsd, arena_t *arena, size_t usize, size_t alignment, ret = arena_miscelm_to_rpages(miscelm); if (config_stats) { - index_t index = size2index(usize) - NBINS; + szind_t index = size2index(usize) - NBINS; arena->stats.nmalloc_large++; arena->stats.nrequests_large++; @@ -2385,7 +2375,7 @@ arena_prof_promoted(const void *ptr, size_t size) { arena_chunk_t *chunk; size_t pageind; - index_t binind; + szind_t binind; cassert(config_prof); assert(ptr != NULL); @@ -2413,7 +2403,7 @@ arena_dissociate_bin_run(arena_chunk_t *chunk, arena_run_t *run, if (run == bin->runcur) bin->runcur = NULL; else { - index_t binind = arena_bin_index(extent_node_arena_get( + szind_t binind = arena_bin_index(extent_node_arena_get( &chunk->node), bin); arena_bin_info_t *bin_info = &arena_bin_info[binind]; @@ -2440,7 +2430,7 @@ arena_dalloc_bin_run(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run, malloc_mutex_unlock(&bin->lock); /******************************/ malloc_mutex_lock(&arena->lock); - arena_run_dalloc_decommit(arena, chunk, run); + arena_run_dalloc(arena, run, true, false, false); malloc_mutex_unlock(&arena->lock); /****************************/ malloc_mutex_lock(&bin->lock); @@ -2477,7 +2467,7 @@ arena_dalloc_bin_locked_impl(arena_t *arena, arena_chunk_t *chunk, void *ptr, arena_run_t *run; arena_bin_t *bin; arena_bin_info_t *bin_info; - index_t binind; + szind_t binind; pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE; rpages_ind = pageind - arena_mapbits_small_runind_get(chunk, pageind); @@ -2574,7 +2564,7 @@ arena_dalloc_large_locked_impl(arena_t *arena, arena_chunk_t *chunk, if (!junked) arena_dalloc_junk_large(ptr, usize); if (config_stats) { - index_t index = size2index(usize) - NBINS; + szind_t index = size2index(usize) - NBINS; arena->stats.ndalloc_large++; arena->stats.allocated_large -= usize; @@ -2583,7 +2573,7 @@ arena_dalloc_large_locked_impl(arena_t *arena, arena_chunk_t *chunk, } } - arena_run_dalloc_decommit(arena, chunk, run); + arena_run_dalloc(arena, run, true, false, false); } void @@ -2621,8 +2611,8 @@ arena_ralloc_large_shrink(arena_t *arena, arena_chunk_t *chunk, void *ptr, arena_run_trim_tail(arena, chunk, run, oldsize + large_pad, size + large_pad, true); if (config_stats) { - index_t oldindex = size2index(oldsize) - NBINS; - index_t index = size2index(size) - NBINS; + szind_t oldindex = size2index(oldsize) - NBINS; + szind_t index = size2index(size) - NBINS; arena->stats.ndalloc_large++; arena->stats.allocated_large -= oldsize; @@ -2700,8 +2690,8 @@ arena_ralloc_large_grow(arena_t *arena, arena_chunk_t *chunk, void *ptr, pageind+npages-1))); if (config_stats) { - index_t oldindex = size2index(oldsize) - NBINS; - index_t index = size2index(size) - NBINS; + szind_t oldindex = size2index(oldsize) - NBINS; + szind_t index = size2index(size) - NBINS; arena->stats.ndalloc_large++; arena->stats.allocated_large -= oldsize; diff --git a/memory/jemalloc/src/src/huge.c b/memory/jemalloc/src/src/huge.c index 54c2114ce2ba..4d5887c4f618 100644 --- a/memory/jemalloc/src/src/huge.c +++ b/memory/jemalloc/src/src/huge.c @@ -148,11 +148,12 @@ huge_ralloc_no_move_similar(void *ptr, size_t oldsize, size_t usize, /* Fill if necessary (shrinking). */ if (oldsize > usize) { size_t sdiff = oldsize - usize; - zeroed = !chunk_purge_wrapper(arena, &chunk_hooks, ptr, - CHUNK_CEILING(usize), usize, sdiff); if (config_fill && unlikely(opt_junk_free)) { memset((void *)((uintptr_t)ptr + usize), 0x5a, sdiff); zeroed = false; + } else { + zeroed = !chunk_purge_wrapper(arena, &chunk_hooks, ptr, + CHUNK_CEILING(oldsize), usize, sdiff); } } else zeroed = true; @@ -202,14 +203,15 @@ huge_ralloc_no_move_shrink(void *ptr, size_t oldsize, size_t usize) if (oldsize > usize) { size_t sdiff = oldsize - usize; - zeroed = !chunk_purge_wrapper(arena, &chunk_hooks, - CHUNK_ADDR2BASE((uintptr_t)ptr + usize), - CHUNK_CEILING(usize), CHUNK_ADDR2OFFSET((uintptr_t)ptr + - usize), sdiff); if (config_fill && unlikely(opt_junk_free)) { huge_dalloc_junk((void *)((uintptr_t)ptr + usize), sdiff); zeroed = false; + } else { + zeroed = !chunk_purge_wrapper(arena, &chunk_hooks, + CHUNK_ADDR2BASE((uintptr_t)ptr + usize), + CHUNK_CEILING(oldsize), + CHUNK_ADDR2OFFSET((uintptr_t)ptr + usize), sdiff); } } else zeroed = true; diff --git a/memory/jemalloc/src/src/jemalloc.c b/memory/jemalloc/src/src/jemalloc.c index ed7863b927bf..7cf1487a1f79 100644 --- a/memory/jemalloc/src/src/jemalloc.c +++ b/memory/jemalloc/src/src/jemalloc.c @@ -179,13 +179,24 @@ static bool malloc_initializer = NO_INITIALIZER; static malloc_mutex_t init_lock = SRWLOCK_INIT; #else static malloc_mutex_t init_lock; +static bool init_lock_initialized = false; JEMALLOC_ATTR(constructor) static void WINAPI _init_init_lock(void) { - malloc_mutex_init(&init_lock); + /* If another constructor in the same binary is using mallctl to + * e.g. setup chunk hooks, it may end up running before this one, + * and malloc_init_hard will crash trying to lock the uninitialized + * lock. So we force an initialization of the lock in + * malloc_init_hard as well. We don't try to care about atomicity + * of the accessed to the init_lock_initialized boolean, since it + * really only matters early in the process creation, before any + * separate thread normally starts doing anything. */ + if (!init_lock_initialized) + malloc_mutex_init(&init_lock); + init_lock_initialized = true; } #ifdef _MSC_VER @@ -510,17 +521,17 @@ arena_get_hard(tsd_t *tsd, unsigned ind, bool init_if_missing) assert(ind < narenas_actual || !init_if_missing); narenas_cache = (ind < narenas_actual) ? narenas_actual : ind+1; - if (!*arenas_cache_bypassp) { + if (tsd_nominal(tsd) && !*arenas_cache_bypassp) { *arenas_cache_bypassp = true; arenas_cache = (arena_t **)a0malloc(sizeof(arena_t *) * narenas_cache); *arenas_cache_bypassp = false; - } else - arenas_cache = NULL; + } if (arenas_cache == NULL) { /* * This function must always tell the truth, even if - * it's slow, so don't let OOM or recursive allocation + * it's slow, so don't let OOM, thread cleanup (note + * tsd_nominal check), nor recursive allocation * avoidance (note arenas_cache_bypass check) get in the * way. */ @@ -531,6 +542,7 @@ arena_get_hard(tsd_t *tsd, unsigned ind, bool init_if_missing) malloc_mutex_unlock(&arenas_lock); return (arena); } + assert(tsd_nominal(tsd) && !*arenas_cache_bypassp); tsd_arenas_cache_set(tsd, arenas_cache); tsd_narenas_cache_set(tsd, narenas_cache); } @@ -649,8 +661,10 @@ arenas_cache_cleanup(tsd_t *tsd) arena_t **arenas_cache; arenas_cache = tsd_arenas_cache_get(tsd); - if (arenas_cache != NULL) + if (arenas_cache != NULL) { + tsd_arenas_cache_set(tsd, NULL); a0dalloc(arenas_cache); + } } void @@ -1297,6 +1311,9 @@ static bool malloc_init_hard(void) { +#if defined(_WIN32) && _WIN32_WINNT < 0x0600 + _init_init_lock(); +#endif malloc_mutex_lock(&init_lock); if (!malloc_init_hard_needed()) { malloc_mutex_unlock(&init_lock); diff --git a/memory/jemalloc/src/src/prof.c b/memory/jemalloc/src/src/prof.c index a05792fd6b5b..b79eba64b39b 100644 --- a/memory/jemalloc/src/src/prof.c +++ b/memory/jemalloc/src/src/prof.c @@ -219,7 +219,7 @@ void prof_malloc_sample_object(const void *ptr, size_t usize, prof_tctx_t *tctx) { - prof_tctx_set(ptr, tctx); + prof_tctx_set(ptr, usize, tctx); malloc_mutex_lock(tctx->tdata->lock); tctx->cnts.curobjs++; diff --git a/memory/jemalloc/src/src/tcache.c b/memory/jemalloc/src/src/tcache.c index 3814365ce4f9..f1a30d5056f7 100644 --- a/memory/jemalloc/src/src/tcache.c +++ b/memory/jemalloc/src/src/tcache.c @@ -32,7 +32,7 @@ size_t tcache_salloc(const void *ptr) void tcache_event_hard(tsd_t *tsd, tcache_t *tcache) { - index_t binind = tcache->next_gc_bin; + szind_t binind = tcache->next_gc_bin; tcache_bin_t *tbin = &tcache->tbins[binind]; tcache_bin_info_t *tbin_info = &tcache_bin_info[binind]; @@ -72,7 +72,7 @@ tcache_event_hard(tsd_t *tsd, tcache_t *tcache) void * tcache_alloc_small_hard(tsd_t *tsd, arena_t *arena, tcache_t *tcache, - tcache_bin_t *tbin, index_t binind) + tcache_bin_t *tbin, szind_t binind) { void *ret; @@ -87,7 +87,7 @@ tcache_alloc_small_hard(tsd_t *tsd, arena_t *arena, tcache_t *tcache, void tcache_bin_flush_small(tsd_t *tsd, tcache_t *tcache, tcache_bin_t *tbin, - index_t binind, unsigned rem) + szind_t binind, unsigned rem) { arena_t *arena; void *ptr; @@ -166,7 +166,7 @@ tcache_bin_flush_small(tsd_t *tsd, tcache_t *tcache, tcache_bin_t *tbin, } void -tcache_bin_flush_large(tsd_t *tsd, tcache_bin_t *tbin, index_t binind, +tcache_bin_flush_large(tsd_t *tsd, tcache_bin_t *tbin, szind_t binind, unsigned rem, tcache_t *tcache) { arena_t *arena; diff --git a/memory/jemalloc/src/test/integration/chunk.c b/memory/jemalloc/src/test/integration/chunk.c index 7eb1b6d2a454..af1c9a53e44d 100644 --- a/memory/jemalloc/src/test/integration/chunk.c +++ b/memory/jemalloc/src/test/integration/chunk.c @@ -1,5 +1,9 @@ #include "test/jemalloc_test.h" +#ifdef JEMALLOC_FILL +const char *malloc_conf = "junk:false"; +#endif + static chunk_hooks_t orig_hooks; static chunk_hooks_t old_hooks; diff --git a/memory/jemalloc/src/test/unit/prof_reset.c b/memory/jemalloc/src/test/unit/prof_reset.c index 3af1964294a9..da34d7026e44 100644 --- a/memory/jemalloc/src/test/unit/prof_reset.c +++ b/memory/jemalloc/src/test/unit/prof_reset.c @@ -16,6 +16,27 @@ prof_dump_open_intercept(bool propagate_err, const char *filename) return (fd); } +static size_t +get_lg_prof_sample(void) +{ + size_t lg_prof_sample; + size_t sz = sizeof(size_t); + + assert_d_eq(mallctl("prof.lg_sample", &lg_prof_sample, &sz, NULL, 0), 0, + "Unexpected mallctl failure while reading profiling sample rate"); + return (lg_prof_sample); +} + +static void +do_prof_reset(size_t lg_prof_sample) +{ + assert_d_eq(mallctl("prof.reset", NULL, NULL, + &lg_prof_sample, sizeof(size_t)), 0, + "Unexpected mallctl failure while resetting profile data"); + assert_zu_eq(lg_prof_sample, get_lg_prof_sample(), + "Expected profile sample rate change"); +} + TEST_BEGIN(test_prof_reset_basic) { size_t lg_prof_sample_orig, lg_prof_sample, lg_prof_sample_next; @@ -30,9 +51,7 @@ TEST_BEGIN(test_prof_reset_basic) "Unexpected mallctl failure while reading profiling sample rate"); assert_zu_eq(lg_prof_sample_orig, 0, "Unexpected profiling sample rate"); - sz = sizeof(size_t); - assert_d_eq(mallctl("prof.lg_sample", &lg_prof_sample, &sz, NULL, 0), 0, - "Unexpected mallctl failure while reading profiling sample rate"); + lg_prof_sample = get_lg_prof_sample(); assert_zu_eq(lg_prof_sample_orig, lg_prof_sample, "Unexpected disagreement between \"opt.lg_prof_sample\" and " "\"prof.lg_sample\""); @@ -41,10 +60,7 @@ TEST_BEGIN(test_prof_reset_basic) for (i = 0; i < 2; i++) { assert_d_eq(mallctl("prof.reset", NULL, NULL, NULL, 0), 0, "Unexpected mallctl failure while resetting profile data"); - sz = sizeof(size_t); - assert_d_eq(mallctl("prof.lg_sample", &lg_prof_sample, &sz, - NULL, 0), 0, "Unexpected mallctl failure while reading " - "profiling sample rate"); + lg_prof_sample = get_lg_prof_sample(); assert_zu_eq(lg_prof_sample_orig, lg_prof_sample, "Unexpected profile sample rate change"); } @@ -52,22 +68,15 @@ TEST_BEGIN(test_prof_reset_basic) /* Test resets with prof.lg_sample changes. */ lg_prof_sample_next = 1; for (i = 0; i < 2; i++) { - assert_d_eq(mallctl("prof.reset", NULL, NULL, - &lg_prof_sample_next, sizeof(size_t)), 0, - "Unexpected mallctl failure while resetting profile data"); - sz = sizeof(size_t); - assert_d_eq(mallctl("prof.lg_sample", &lg_prof_sample, &sz, - NULL, 0), 0, "Unexpected mallctl failure while reading " - "profiling sample rate"); + do_prof_reset(lg_prof_sample_next); + lg_prof_sample = get_lg_prof_sample(); assert_zu_eq(lg_prof_sample, lg_prof_sample_next, "Expected profile sample rate change"); lg_prof_sample_next = lg_prof_sample_orig; } /* Make sure the test code restored prof.lg_sample. */ - sz = sizeof(size_t); - assert_d_eq(mallctl("prof.lg_sample", &lg_prof_sample, &sz, NULL, 0), 0, - "Unexpected mallctl failure while reading profiling sample rate"); + lg_prof_sample = get_lg_prof_sample(); assert_zu_eq(lg_prof_sample_orig, lg_prof_sample, "Unexpected disagreement between \"opt.lg_prof_sample\" and " "\"prof.lg_sample\""); @@ -182,6 +191,7 @@ thd_start(void *varg) TEST_BEGIN(test_prof_reset) { + size_t lg_prof_sample_orig; bool active; thd_t thds[NTHREADS]; unsigned thd_args[NTHREADS]; @@ -195,6 +205,9 @@ TEST_BEGIN(test_prof_reset) "Unexpected pre-existing tdata structures"); tdata_count = prof_tdata_count(); + lg_prof_sample_orig = get_lg_prof_sample(); + do_prof_reset(5); + active = true; assert_d_eq(mallctl("prof.active", NULL, NULL, &active, sizeof(active)), 0, "Unexpected mallctl failure while activating profiling"); @@ -214,6 +227,8 @@ TEST_BEGIN(test_prof_reset) active = false; assert_d_eq(mallctl("prof.active", NULL, NULL, &active, sizeof(active)), 0, "Unexpected mallctl failure while deactivating profiling"); + + do_prof_reset(lg_prof_sample_orig); } TEST_END #undef NTHREADS diff --git a/memory/jemalloc/src/test/unit/size_classes.c b/memory/jemalloc/src/test/unit/size_classes.c index d79183469cfd..d3aaebd77665 100644 --- a/memory/jemalloc/src/test/unit/size_classes.c +++ b/memory/jemalloc/src/test/unit/size_classes.c @@ -26,7 +26,7 @@ get_max_size_class(void) TEST_BEGIN(test_size_classes) { size_t size_class, max_size_class; - index_t index, max_index; + szind_t index, max_index; max_size_class = get_max_size_class(); max_index = size2index(max_size_class); diff --git a/memory/jemalloc/src/test/unit/tsd.c b/memory/jemalloc/src/test/unit/tsd.c index b031c484e090..8be787fda935 100644 --- a/memory/jemalloc/src/test/unit/tsd.c +++ b/memory/jemalloc/src/test/unit/tsd.c @@ -56,9 +56,14 @@ static void * thd_start(void *arg) { data_t d = (data_t)(uintptr_t)arg; + void *p; + assert_x_eq(*data_tsd_get(), DATA_INIT, "Initial tsd get should return initialization value"); + p = malloc(1); + assert_ptr_not_null(p, "Unexpected malloc() failure"); + data_tsd_set(&d); assert_x_eq(*data_tsd_get(), d, "After tsd set, tsd get should return value that was set"); @@ -67,6 +72,7 @@ thd_start(void *arg) assert_x_eq(*data_tsd_get(), (data_t)(uintptr_t)arg, "Resetting local data should have no effect on tsd"); + free(p); return (NULL); } diff --git a/memory/jemalloc/upstream.info b/memory/jemalloc/upstream.info index e5482e1df88c..c58020a2217a 100644 --- a/memory/jemalloc/upstream.info +++ b/memory/jemalloc/upstream.info @@ -1,2 +1,2 @@ -UPSTREAM_REPO=https://github.com/jemalloc/jemalloc -UPSTREAM_COMMIT=4.0.0 +UPSTREAM_REPO=https://github.com/glandium/jemalloc +UPSTREAM_COMMIT=ed4883285e111b426e5769b24dad164ebacaa5b9