diff --git a/memory/build/Utils.h b/memory/build/Utils.h index c8a96f8b0c48..d4bbe4ec645c 100644 --- a/memory/build/Utils.h +++ b/memory/build/Utils.h @@ -32,14 +32,24 @@ CompareAddr(T* aAddr1, T* aAddr2) } // User-defined literals to make constants more legible -constexpr unsigned long long int operator"" _KiB(unsigned long long int aNum) +constexpr size_t operator"" _KiB(unsigned long long int aNum) { - return aNum * 1024; + return size_t(aNum) * 1024; } -constexpr unsigned long long int operator"" _MiB(unsigned long long int aNum) +constexpr size_t operator"" _KiB(long double aNum) { - return aNum * 1024_KiB; + return size_t(aNum * 1024); +} + +constexpr size_t operator"" _MiB(unsigned long long int aNum) +{ + return size_t(aNum) * 1024_KiB; +} + +constexpr size_t operator"" _MiB(long double aNum) +{ + return size_t(aNum * 1024_KiB); } constexpr long double operator""_percent(long double aPercent) diff --git a/memory/build/malloc_decls.h b/memory/build/malloc_decls.h index 540082d349dd..0796f9344d4d 100644 --- a/memory/build/malloc_decls.h +++ b/memory/build/malloc_decls.h @@ -105,7 +105,9 @@ MALLOC_DECL(jemalloc_ptr_info, void, const void*, jemalloc_ptr_info_t*) // functions. MALLOC_DECL(moz_create_arena, arena_id_t) -// Dispose of the given arena. Subsequent uses of the arena will fail. +// Dispose of the given arena. Subsequent uses of the arena will crash. +// Passing an invalid id (inexistent or already disposed) to this function +// will crash. MALLOC_DECL(moz_dispose_arena, void, arena_id_t) #endif @@ -113,8 +115,9 @@ MALLOC_DECL(moz_dispose_arena, void, arena_id_t) // Same as the functions without the moz_arena_ prefix, but using arenas // created with moz_create_arena. // The contract, even if not enforced at runtime in some configurations, -// is that moz_arena_realloc and moz_arena_free will crash if the wrong -// arena id is given. All functions will crash if the arena id is invalid. +// is that moz_arena_realloc and moz_arena_free will crash if the given +// arena doesn't own the given pointer. All functions will crash if the +// arena id is invalid. // Although discouraged, plain realloc and free can still be used on // pointers allocated with these functions. Realloc will properly keep // new pointers in the same arena as the original. diff --git a/memory/build/mozjemalloc.cpp b/memory/build/mozjemalloc.cpp index efe93e806d81..55e3034f5f64 100644 --- a/memory/build/mozjemalloc.cpp +++ b/memory/build/mozjemalloc.cpp @@ -1257,7 +1257,7 @@ huge_palloc(size_t aSize, size_t aAlignment, bool aZero, arena_t* aArena); static void* huge_ralloc(void* aPtr, size_t aSize, size_t aOldSize, arena_t* aArena); static void -huge_dalloc(void* aPtr); +huge_dalloc(void* aPtr, arena_t* aArena); #ifdef XP_WIN extern "C" #else @@ -3584,7 +3584,7 @@ arena_t::DallocLarge(arena_chunk_t* aChunk, void* aPtr) } static inline void -arena_dalloc(void* aPtr, size_t aOffset) +arena_dalloc(void* aPtr, size_t aOffset, arena_t* aArena) { MOZ_ASSERT(aPtr); MOZ_ASSERT(aOffset != 0); @@ -3594,6 +3594,7 @@ arena_dalloc(void* aPtr, size_t aOffset) auto arena = chunk->arena; MOZ_ASSERT(arena); MOZ_DIAGNOSTIC_ASSERT(arena->mMagic == ARENA_MAGIC); + MOZ_RELEASE_ASSERT(!aArena || arena == aArena); MutexAutoLock lock(arena->mLock); size_t pageind = aOffset >> gPageSize2Pow; @@ -3609,7 +3610,7 @@ arena_dalloc(void* aPtr, size_t aOffset) } static inline void -idalloc(void* ptr) +idalloc(void* ptr, arena_t* aArena) { size_t offset; @@ -3617,9 +3618,9 @@ idalloc(void* ptr) offset = GetChunkOffsetForPtr(ptr); if (offset != 0) { - arena_dalloc(ptr, offset); + arena_dalloc(ptr, offset, aArena); } else { - huge_dalloc(ptr); + huge_dalloc(ptr, aArena); } } @@ -3754,7 +3755,7 @@ arena_ralloc(void* aPtr, size_t aSize, size_t aOldSize, arena_t* aArena) { memcpy(ret, aPtr, copysize); } - idalloc(aPtr); + idalloc(aPtr, aArena); return ret; } @@ -3765,9 +3766,10 @@ iralloc(void* aPtr, size_t aSize, arena_t* aArena) MOZ_ASSERT(aSize != 0); auto info = AllocInfo::Get(aPtr); - aArena = aArena ? aArena : info.Arena(); + auto arena = info.Arena(); + MOZ_RELEASE_ASSERT(!aArena || arena == aArena); + aArena = aArena ? aArena : arena; size_t oldsize = info.Size(); - MOZ_RELEASE_ASSERT(aArena); MOZ_DIAGNOSTIC_ASSERT(aArena->mMagic == ARENA_MAGIC); return (aSize <= gMaxLargeClass) ? arena_ralloc(aPtr, aSize, oldsize, aArena) @@ -3966,6 +3968,7 @@ huge_ralloc(void* aPtr, size_t aSize, size_t aOldSize, arena_t* aArena) extent_node_t* node = huge.Search(&key); MOZ_ASSERT(node); MOZ_ASSERT(node->mSize == aOldSize); + MOZ_RELEASE_ASSERT(!aArena || node->mArena == aArena); huge_allocated -= aOldSize - psize; // No need to change huge_mapped, because we didn't (un)map anything. node->mSize = psize; @@ -3990,6 +3993,7 @@ huge_ralloc(void* aPtr, size_t aSize, size_t aOldSize, arena_t* aArena) extent_node_t* node = huge.Search(&key); MOZ_ASSERT(node); MOZ_ASSERT(node->mSize == aOldSize); + MOZ_RELEASE_ASSERT(!aArena || node->mArena == aArena); huge_allocated += psize - aOldSize; // No need to change huge_mapped, because we didn't // (un)map anything. @@ -4019,12 +4023,12 @@ huge_ralloc(void* aPtr, size_t aSize, size_t aOldSize, arena_t* aArena) { memcpy(ret, aPtr, copysize); } - idalloc(aPtr); + idalloc(aPtr, aArena); return ret; } static void -huge_dalloc(void* aPtr) +huge_dalloc(void* aPtr, arena_t* aArena) { extent_node_t* node; { @@ -4036,6 +4040,7 @@ huge_dalloc(void* aPtr) node = huge.Search(&key); MOZ_ASSERT(node); MOZ_ASSERT(node->mAddr == aPtr); + MOZ_RELEASE_ASSERT(!aArena || node->mArena == aArena); huge.Remove(node); huge_allocated -= node->mSize; @@ -4373,10 +4378,10 @@ BaseAllocator::free(void* aPtr) offset = GetChunkOffsetForPtr(aPtr); if (offset != 0) { MOZ_RELEASE_ASSERT(malloc_initialized); - arena_dalloc(aPtr, offset); + arena_dalloc(aPtr, offset, mArena); } else if (aPtr) { MOZ_RELEASE_ASSERT(malloc_initialized); - huge_dalloc(aPtr); + huge_dalloc(aPtr, mArena); } } @@ -4707,9 +4712,8 @@ inline void MozJemalloc::moz_dispose_arena(arena_id_t aArenaId) { arena_t* arena = gArenas.GetById(aArenaId, /* IsPrivate = */ true); - if (arena) { - gArenas.DisposeArena(arena); - } + MOZ_RELEASE_ASSERT(arena); + gArenas.DisposeArena(arena); } #define MALLOC_DECL(name, return_type, ...) \ diff --git a/memory/gtest/TestJemalloc.cpp b/memory/gtest/TestJemalloc.cpp index 6da2d06b1a51..e172493cdea9 100644 --- a/memory/gtest/TestJemalloc.cpp +++ b/memory/gtest/TestJemalloc.cpp @@ -12,6 +12,39 @@ #include "gtest/gtest.h" +#ifdef MOZ_CRASHREPORTER +#include "nsCOMPtr.h" +#include "nsICrashReporter.h" +#include "nsServiceManagerUtils.h" +#endif + +#if defined(DEBUG) && !defined(XP_WIN) && !defined(ANDROID) +#define HAS_GDB_SLEEP_DURATION 1 +extern unsigned int _gdb_sleep_duration; +#endif + +// Death tests are too slow on OSX because of the system crash reporter. +#ifndef XP_DARWIN +static void DisableCrashReporter() +{ +#ifdef MOZ_CRASHREPORTER + nsCOMPtr crashreporter = + do_GetService("@mozilla.org/toolkit/crash-reporter;1"); + if (crashreporter) { + crashreporter->SetEnabled(false); + } +#endif +} + +// Wrap ASSERT_DEATH_IF_SUPPORTED to disable the crash reporter +// when entering the subprocess, so that the expected crashes don't +// create a minidump that the gtest harness will interpret as an error. +#define ASSERT_DEATH_WRAP(a, b) \ + ASSERT_DEATH_IF_SUPPORTED({ DisableCrashReporter(); a; }, b) +#else +#define ASSERT_DEATH_WRAP(a, b) +#endif + using namespace mozilla; static inline void @@ -223,3 +256,58 @@ TEST(Jemalloc, PtrInfo) jemalloc_thread_local_arena(false); } + +#ifdef NIGHTLY_BUILD +TEST(Jemalloc, Arenas) +{ + arena_id_t arena = moz_create_arena(); + ASSERT_TRUE(arena != 0); + void* ptr = moz_arena_malloc(arena, 42); + ASSERT_TRUE(ptr != nullptr); + ptr = moz_arena_realloc(arena, ptr, 64); + ASSERT_TRUE(ptr != nullptr); + moz_arena_free(arena, ptr); + ptr = moz_arena_calloc(arena, 24, 2); + // For convenience, free can be used to free arena pointers. + free(ptr); + moz_dispose_arena(arena); + +#ifdef HAS_GDB_SLEEP_DURATION + // Avoid death tests adding some unnecessary (long) delays. + unsigned int old_gdb_sleep_duration = _gdb_sleep_duration; + _gdb_sleep_duration = 0; +#endif + + // Can't use an arena after it's disposed. + ASSERT_DEATH_WRAP(moz_arena_malloc(arena, 80), ""); + + // Arena id 0 can't be used to somehow get to the main arena. + ASSERT_DEATH_WRAP(moz_arena_malloc(0, 80), ""); + + arena = moz_create_arena(); + arena_id_t arena2 = moz_create_arena(); + + // For convenience, realloc can also be used to reallocate arena pointers. + // The result should be in the same arena. Test various size class transitions. + size_t sizes[] = { 1, 42, 80, 1_KiB, 1.5_KiB, 72_KiB, 129_KiB, 2.5_MiB, 5.1_MiB }; + for (size_t from_size : sizes) { + for (size_t to_size : sizes) { + ptr = moz_arena_malloc(arena, from_size); + ptr = realloc(ptr, to_size); + // Freeing with the wrong arena should crash. + ASSERT_DEATH_WRAP(moz_arena_free(arena2, ptr), ""); + // Likewise for moz_arena_realloc. + ASSERT_DEATH_WRAP(moz_arena_realloc(arena2, ptr, from_size), ""); + // The following will crash if it's not in the right arena. + moz_arena_free(arena, ptr); + } + } + + moz_dispose_arena(arena2); + moz_dispose_arena(arena); + +#ifdef HAS_GDB_SLEEP_DURATION + _gdb_sleep_duration = old_gdb_sleep_duration; +#endif +} +#endif