From 38dbbfcffcc1e4ae77ff9ddee0801ea8125eb07f Mon Sep 17 00:00:00 2001 From: Nicholas Nethercote Date: Wed, 3 Jul 2019 09:26:11 +1000 Subject: [PATCH] Bug 1523276 - Implement PHC, a probabilistic heap checker. r=glandium,gsvelto Differential Revision: https://phabricator.services.mozilla.com/D25021 --HG-- extra : rebase_source : 86e94499f746b18a596130341692c6a9992d4867 --- build/moz.configure/memory.configure | 30 + memory/build/moz.build | 3 + memory/build/mozjemalloc.cpp | 7 + memory/build/replace_malloc_bridge.h | 52 +- memory/gtest/TestJemalloc.cpp | 37 + memory/gtest/moz.build | 3 + memory/replace/logalloc/replay/moz.build | 7 +- memory/replace/moz.build | 3 + memory/replace/phc/PHC.cpp | 1352 +++++++++++++++++ memory/replace/phc/PHC.h | 88 ++ memory/replace/phc/moz.build | 32 + memory/replace/phc/test/gtest/TestPHC.cpp | 155 ++ memory/replace/phc/test/gtest/moz.build | 15 + memory/replace/phc/test/moz.build | 9 + toolkit/crashreporter/CrashAnnotations.yaml | 34 + .../linux/handler/exception_handler.cc | 36 +- .../linux/handler/exception_handler.h | 13 +- .../breakpad-client/linux/moz.build | 3 + .../mac/handler/exception_handler.cc | 8 +- .../mac/handler/exception_handler.h | 13 +- .../windows/handler/exception_handler.cc | 6 +- .../windows/handler/exception_handler.h | 8 + toolkit/crashreporter/moz.build | 3 + toolkit/crashreporter/nsExceptionHandler.cpp | 101 +- toolkit/crashreporter/test/CrashTestUtils.jsm | 2 + toolkit/crashreporter/test/moz.build | 14 +- toolkit/crashreporter/test/nsTestCrasher.cpp | 37 + .../crashreporter/test/unit/test_crash_phc.js | 43 + .../crashreporter/test/unit/xpcshell-phc.ini | 9 + .../test/unit_ipc/test_content_phc.js | 30 + .../test/unit_ipc/test_content_phc2.js | 33 + .../test/unit_ipc/xpcshell-phc.ini | 10 + 32 files changed, 2165 insertions(+), 31 deletions(-) create mode 100644 memory/replace/phc/PHC.cpp create mode 100644 memory/replace/phc/PHC.h create mode 100644 memory/replace/phc/moz.build create mode 100644 memory/replace/phc/test/gtest/TestPHC.cpp create mode 100644 memory/replace/phc/test/gtest/moz.build create mode 100644 memory/replace/phc/test/moz.build create mode 100644 toolkit/crashreporter/test/unit/test_crash_phc.js create mode 100644 toolkit/crashreporter/test/unit/xpcshell-phc.ini create mode 100644 toolkit/crashreporter/test/unit_ipc/test_content_phc.js create mode 100644 toolkit/crashreporter/test/unit_ipc/test_content_phc2.js create mode 100644 toolkit/crashreporter/test/unit_ipc/xpcshell-phc.ini diff --git a/build/moz.configure/memory.configure b/build/moz.configure/memory.configure index cdef266b1567..b3f77529b75b 100644 --- a/build/moz.configure/memory.configure +++ b/build/moz.configure/memory.configure @@ -45,3 +45,33 @@ def replace_malloc_static(build_project): set_config('MOZ_REPLACE_MALLOC_STATIC', replace_malloc_static) + +# PHC (Probabilistic Heap Checker) +# ============================================================== + +# In general, it only makes sense for PHC to run on the platforms that have a +# crash reporter. Currently it only runs on Linux, but not on 32-bit Linux +# because stack tracing frequently crashes (for unclear reasons). In the +# future, we want it to run on Win64 and Mac as well. +@depends(milestone, target, replace_malloc_default, '--enable-replace-malloc', + when='--enable-jemalloc') +def phc_default(milestone, target, replace_malloc_default, replace_malloc): + if not replace_malloc_default or \ + (replace_malloc.origin != 'default' and not replace_malloc): + return False + if not milestone.is_nightly: + return False + return (target.os == 'GNU' and target.kernel == 'Linux' and + target.bitness == 64) + + +option('--enable-phc', env='MOZ_PHC', default=phc_default, + when='--enable-jemalloc', + help='{Enable|Disable} PHC (Probabilistic Memory Checker). ' + 'Also enables replace-malloc and frame pointers') +imply_option('--enable-replace-malloc', True, when='--enable-phc') +imply_option('--enable-frame-pointers', True, when='--enable-phc') + + +set_config('MOZ_PHC', True, when='--enable-phc') + diff --git a/memory/build/moz.build b/memory/build/moz.build index 89ffa37de827..fc68e2cd5a6b 100644 --- a/memory/build/moz.build +++ b/memory/build/moz.build @@ -20,6 +20,9 @@ if CONFIG['MOZ_REPLACE_MALLOC']: 'replace_malloc_bridge.h', ] +if CONFIG['MOZ_PHC']: + DEFINES['MOZ_PHC'] = True + if CONFIG['MOZ_MEMORY']: UNIFIED_SOURCES += [ 'mozjemalloc.cpp', diff --git a/memory/build/mozjemalloc.cpp b/memory/build/mozjemalloc.cpp index a07f4bea914c..17f1cc62be35 100644 --- a/memory/build/mozjemalloc.cpp +++ b/memory/build/mozjemalloc.cpp @@ -4511,6 +4511,8 @@ static void replace_malloc_init_funcs(malloc_table_t*); extern "C" void logalloc_init(malloc_table_t*, ReplaceMallocBridge**); extern "C" void dmd_init(malloc_table_t*, ReplaceMallocBridge**); + +extern "C" void phc_init(malloc_table_t*, ReplaceMallocBridge**); # endif bool Equals(const malloc_table_t& aTable1, const malloc_table_t& aTable2) { @@ -4549,6 +4551,11 @@ static void init() { dmd_init(&tempTable, &gReplaceMallocBridge); } # endif +# ifdef MOZ_PHC + if (Equals(tempTable, gDefaultMallocTable)) { + phc_init(&tempTable, &gReplaceMallocBridge); + } +# endif # endif if (!Equals(tempTable, gDefaultMallocTable)) { replace_malloc_init_funcs(&tempTable); diff --git a/memory/build/replace_malloc_bridge.h b/memory/build/replace_malloc_bridge.h index ef5af03f53d1..358d9f25c4b3 100644 --- a/memory/build/replace_malloc_bridge.h +++ b/memory/build/replace_malloc_bridge.h @@ -115,6 +115,10 @@ namespace dmd { struct DMDFuncs; } // namespace dmd +namespace phc { +class AddrInfo; +} // namespace phc + // Callbacks to register debug file handles for Poison IO interpose. // See Mozilla(|Un)RegisterDebugHandle in xpcom/build/PoisonIOInterposer.h struct DebugFdRegistry { @@ -126,7 +130,7 @@ struct DebugFdRegistry { } // namespace mozilla struct ReplaceMallocBridge { - ReplaceMallocBridge() : mVersion(3) {} + ReplaceMallocBridge() : mVersion(4) {} // This method was added in version 1 of the bridge. virtual mozilla::dmd::DMDFuncs* GetDMDFuncs() { return nullptr; } @@ -156,6 +160,28 @@ struct ReplaceMallocBridge { return nullptr; } + // If this is a PHC-handled address, return true, and if an AddrInfo is + // provided, fill in all of its fields. Otherwise, return false and leave + // AddrInfo unchanged. + // This method was added in version 4 of the bridge. + virtual bool IsPHCAllocation(const void*, mozilla::phc::AddrInfo*) { + return false; + } + + // Disable PHC allocations on the current thread. Only useful for tests. Note + // that PHC deallocations will still occur as needed. + // This method was added in version 4 of the bridge. + virtual void DisablePHCOnCurrentThread() {} + + // Re-enable PHC allocations on the current thread. Only useful for tests. + // This method was added in version 4 of the bridge. + virtual void ReenablePHCOnCurrentThread() {} + + // Test whether PHC allocations are enabled on the current thread. Only + // useful for tests. + // This method was added in version 4 of the bridge. + virtual bool IsPHCEnabledOnCurrentThread() { return false; } + # ifndef REPLACE_MALLOC_IMPL // Returns the replace-malloc bridge if its version is at least the // requested one. @@ -199,6 +225,30 @@ struct ReplaceMalloc { return singleton ? singleton->RegisterHook(aName, aTable, aHookTable) : nullptr; } + + static bool IsPHCAllocation(const void* aPtr, mozilla::phc::AddrInfo* aOut) { + auto singleton = ReplaceMallocBridge::Get(/* minimumVersion */ 4); + return singleton ? singleton->IsPHCAllocation(aPtr, aOut) : false; + } + + static void DisablePHCOnCurrentThread() { + auto singleton = ReplaceMallocBridge::Get(/* minimumVersion */ 4); + if (singleton) { + singleton->DisablePHCOnCurrentThread(); + } + } + + static void ReenablePHCOnCurrentThread() { + auto singleton = ReplaceMallocBridge::Get(/* minimumVersion */ 4); + if (singleton) { + singleton->ReenablePHCOnCurrentThread(); + } + } + + static bool IsPHCEnabledOnCurrentThread() { + auto singleton = ReplaceMallocBridge::Get(/* minimumVersion */ 4); + return singleton ? singleton->IsPHCEnabledOnCurrentThread() : false; + } }; # endif diff --git a/memory/gtest/TestJemalloc.cpp b/memory/gtest/TestJemalloc.cpp index e63a3d45dd2d..d5698422b304 100644 --- a/memory/gtest/TestJemalloc.cpp +++ b/memory/gtest/TestJemalloc.cpp @@ -16,6 +16,10 @@ #include "gtest/gtest.h" +#ifdef MOZ_PHC +# include "replace_malloc_bridge.h" +#endif + #if defined(DEBUG) && !defined(XP_WIN) && !defined(ANDROID) # define HAS_GDB_SLEEP_DURATION 1 extern unsigned int _gdb_sleep_duration; @@ -47,6 +51,21 @@ static void DisableCrashReporter() { using namespace mozilla; +class AutoDisablePHCOnCurrentThread { + public: + AutoDisablePHCOnCurrentThread() { +#ifdef MOZ_PHC + ReplaceMalloc::DisablePHCOnCurrentThread(); +#endif + } + + ~AutoDisablePHCOnCurrentThread() { +#ifdef MOZ_PHC + ReplaceMalloc::ReenablePHCOnCurrentThread(); +#endif + } +}; + static inline void TestOne(size_t size) { size_t req = size; size_t adv = malloc_good_size(req); @@ -378,6 +397,12 @@ static bool IsSameRoundedHugeClass(size_t aSize1, size_t aSize2, static bool CanReallocInPlace(size_t aFromSize, size_t aToSize, jemalloc_stats_t& aStats) { + // PHC allocations must be disabled because PHC reallocs differently to + // mozjemalloc. +#ifdef MOZ_PHC + MOZ_RELEASE_ASSERT(!ReplaceMalloc::IsPHCEnabledOnCurrentThread()); +#endif + if (aFromSize == malloc_good_size(aToSize)) { // Same size class: in-place. return true; @@ -397,6 +422,10 @@ static bool CanReallocInPlace(size_t aFromSize, size_t aToSize, TEST(Jemalloc, InPlace) { + // Disable PHC allocations for this test, because CanReallocInPlace() isn't + // valid for PHC allocations. + AutoDisablePHCOnCurrentThread disable; + jemalloc_stats_t stats; jemalloc_stats(&stats); @@ -430,6 +459,10 @@ TEST(Jemalloc, InPlace) #if !defined(XP_WIN) || !defined(MOZ_CODE_COVERAGE) TEST(Jemalloc, JunkPoison) { + // Disable PHC allocations for this test, because CanReallocInPlace() isn't + // valid for PHC allocations, and the testing UAFs aren't valid. + AutoDisablePHCOnCurrentThread disable; + jemalloc_stats_t stats; jemalloc_stats(&stats); @@ -631,6 +664,10 @@ TEST(Jemalloc, JunkPoison) TEST(Jemalloc, GuardRegion) { + // Disable PHC allocations for this test, because even a single PHC + // allocation occurring can throw it off. + AutoDisablePHCOnCurrentThread disable; + jemalloc_stats_t stats; jemalloc_stats(&stats); diff --git a/memory/gtest/moz.build b/memory/gtest/moz.build index 24bda76e8ec2..d1eb77957ec8 100644 --- a/memory/gtest/moz.build +++ b/memory/gtest/moz.build @@ -10,6 +10,9 @@ if CONFIG['OS_TARGET'] != 'Android' and not(CONFIG['OS_TARGET'] == 'WINNT' and C 'TestJemalloc.cpp', ] + if CONFIG['MOZ_PHC']: + DEFINES['MOZ_PHC'] = True + FINAL_LIBRARY = 'xul-gtest' LOCAL_INCLUDES += [ diff --git a/memory/replace/logalloc/replay/moz.build b/memory/replace/logalloc/replay/moz.build index 7f0e979e8089..8c794f17b293 100644 --- a/memory/replace/logalloc/replay/moz.build +++ b/memory/replace/logalloc/replay/moz.build @@ -15,12 +15,17 @@ SOURCES += [ 'Replay.cpp', ] -if CONFIG['MOZ_REPLACE_MALLOC_STATIC'] and CONFIG['MOZ_DMD']: +if CONFIG['MOZ_REPLACE_MALLOC_STATIC'] and \ + (CONFIG['MOZ_DMD'] or CONFIG['MOZ_PHC']): UNIFIED_SOURCES += [ '/mfbt/HashFunctions.cpp', '/mfbt/JSONWriter.cpp', '/mozglue/misc/StackWalk.cpp', ] + if CONFIG['MOZ_BUILD_APP'] == 'memory': + EXPORTS.mozilla += [ + '/mozglue/misc/StackWalk.h', + ] if not CONFIG['MOZ_REPLACE_MALLOC_STATIC']: SOURCES += [ diff --git a/memory/replace/moz.build b/memory/replace/moz.build index 4858b3379015..f3f859e01364 100644 --- a/memory/replace/moz.build +++ b/memory/replace/moz.build @@ -18,3 +18,6 @@ DIRS += [ if CONFIG['MOZ_DMD']: DIRS += ['dmd'] + +if CONFIG['MOZ_PHC']: + DIRS += ['phc'] diff --git a/memory/replace/phc/PHC.cpp b/memory/replace/phc/PHC.cpp new file mode 100644 index 000000000000..afe60bbf7efa --- /dev/null +++ b/memory/replace/phc/PHC.cpp @@ -0,0 +1,1352 @@ +/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ +/* vim: set ts=8 sts=2 et sw=2 tw=80: */ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +// PHC is a probabilistic heap checker. A tiny fraction of randomly chosen heap +// allocations are subject to some expensive checking via the use of OS page +// access protection. A failed check triggers a crash, whereupon useful +// information about the failure is put into the crash report. The cost and +// coverage for each user is minimal, but spread over the entire user base the +// coverage becomes significant. +// +// The idea comes from Chromium, where it is called GWP-ASAN. (Firefox uses PHC +// as the name because GWP-ASAN is long, awkward, and doesn't have any +// particular meaning.) +// +// In the current implementation up to 64 allocations per process can become +// PHC allocations. These allocations must be page-sized or smaller. Each PHC +// allocation gets its own page, and when the allocation is freed its page is +// marked inaccessible until the page is reused for another allocation. This +// means that a use-after-free defect (which includes double-frees) will be +// caught if the use occurs before the page is reused for another allocation. +// The crash report will contain stack traces for the allocation site, the free +// site, and the use-after-free site, which is often enough to diagnose the +// defect. +// +// The design space for the randomization strategy is large. The current +// implementation has a large random delay before it starts operating, and a +// small random delay between each PHC allocation attempt. Each freed PHC +// allocation is quarantined for a medium random delay before being reused, in +// order to increase the chance of catching UAFs. +// +// The basic cost of PHC's operation is as follows. +// +// - The memory cost is 64 * 4 KiB = 256 KiB per process (assuming 4 KiB +// pages) plus some metadata (including stack traces) for each page. +// +// - Every allocation requires a size check and a decrement-and-check of an +// atomic counter. When the counter reaches zero a PHC allocation can occur, +// which involves marking a page as accessible and getting a stack trace for +// the allocation site. Otherwise, mozjemalloc performs the allocation. +// +// - Every deallocation requires a range check on the pointer to see if it +// involves a PHC allocation. (The choice to only do PHC allocations that are +// a page or smaller enables this range check, because the 64 pages are +// contiguous. Allowing larger allocations would make this more complicated, +// and we definitely don't want something as slow as a hash table lookup on +// every deallocation.) PHC deallocations involve marking a page as +// inaccessible and getting a stack trace for the deallocation site. +// +// In the future, we may add guard pages between the used pages in order +// to detect buffer overflows/underflows. This would change the memory cost to +// (64 * 2 + 1) * 4 KiB = 516 KiB per process and complicate the machinery +// somewhat. +// +// Note that calls to realloc(), free(), malloc_usable_size(), and +// IsPHCAllocation() will succeed if the given pointer falls anywhere within a +// page allocation's page, even if that is beyond the bounds of the page +// allocation's usable size. For example: +// +// void* p = malloc(64); +// free(p + 128); // p+128 is within p's page --> same outcome as free(p) + +#include "PHC.h" + +#include +#include + +#include + +#ifdef XP_WIN +# include +#else +# include +# include +# include +# include +#endif + +#include "replace_malloc.h" +#include "FdPrintf.h" +#include "Mutex.h" +#include "mozilla/Assertions.h" +#include "mozilla/Atomics.h" +#include "mozilla/Attributes.h" +#include "mozilla/CheckedInt.h" +#include "mozilla/Maybe.h" +#include "mozilla/StackWalk.h" +#include "mozilla/ThreadLocal.h" +#include "mozilla/XorShift128PlusRNG.h" + +using namespace mozilla; + +//--------------------------------------------------------------------------- +// Utilities +//--------------------------------------------------------------------------- + +#ifdef ANDROID +// Android doesn't have pthread_atfork defined in pthread.h. +extern "C" MOZ_EXPORT int pthread_atfork(void (*)(void), void (*)(void), + void (*)(void)); +#endif + +#ifndef DISALLOW_COPY_AND_ASSIGN +# define DISALLOW_COPY_AND_ASSIGN(T) \ + T(const T&); \ + void operator=(const T&) +#endif + +static malloc_table_t sMallocTable; + +// This class provides infallible operations for the small number of heap +// allocations that PHC does for itself. It would be nice if we could use the +// InfallibleAllocPolicy from mozalloc, but PHC cannot use mozalloc. +class InfallibleAllocPolicy { + public: + static void AbortOnFailure(const void* aP) { + if (!aP) { + MOZ_CRASH("PHC failed to allocate"); + } + } + + template + static T* new_() { + void* p = sMallocTable.malloc(sizeof(T)); + AbortOnFailure(p); + return new (p) T; + } +}; + +//--------------------------------------------------------------------------- +// Stack traces +//--------------------------------------------------------------------------- + +// This code is similar to the equivalent code within DMD. + +class StackTrace : public phc::StackTrace { + public: + StackTrace() : phc::StackTrace(), mSkipped(false) {} + + bool IsEmpty() const { return mLength == 0 && !mSkipped; } + + void Clear() { + mLength = 0; + mSkipped = false; + } + + void Fill(); + + void FillSkipped() { + mLength = 0; + mSkipped = true; + } + + private: + static void StackWalkCallback(uint32_t aFrameNumber, void* aPc, void* aSp, + void* aClosure) { + StackTrace* st = (StackTrace*)aClosure; + MOZ_ASSERT(st->mLength < kMaxFrames); + st->mPcs[st->mLength] = aPc; + st->mLength++; + MOZ_ASSERT(st->mLength == aFrameNumber); + } + + // There are some rare cases (see FillSkipped's call sites) where we want to + // get a stack trace but cannot do so safely. When this field is set it + // indicates such a stack trace. + bool mSkipped; +}; + +// WARNING WARNING WARNING: this function must only be called when GMut::sMutex +// is *not* locked, otherwise we might get deadlocks. +// +// How? On Windows, MozStackWalk() can lock a mutex, M, from the shared library +// loader. Another thread might call malloc() while holding M locked (when +// loading a shared library) and try to lock GMut::sMutex, causing a deadlock. +// So GMut::sMutex can't be locked during the call to MozStackWalk(). (For +// details, see https://bugzilla.mozilla.org/show_bug.cgi?id=374829#c8. On +// Linux, something similar can happen; see bug 824340. So we just disallow it +// on all platforms.) +// +// In DMD, to avoid this problem we temporarily unlock the equivalent mutex for +// the MozStackWalk() call. But that's grotty, and things are a bit different +// here, so we just require that stack traces be obtained before locking +// GMut::sMutex. +// +// Unfortunately, there is no reliable way at compile-time or run-time to ensure +// this pre-condition. Hence this large comment. +// +void StackTrace::Fill() { + mLength = 0; + mSkipped = false; + +#if defined(XP_WIN) && defined(_M_IX86) + // This avoids MozStackWalk(), which causes unusably slow startup on Win32 + // when it is called during static initialization (see bug 1241684). + // + // This code is cribbed from the Gecko Profiler, which also uses + // FramePointerStackWalk() on Win32: Registers::SyncPopulate() for the + // frame pointer, and GetStackTop() for the stack end. + CONTEXT context; + RtlCaptureContext(&context); + void** fp = reinterpret_cast(context.Ebp); + + PNT_TIB pTib = reinterpret_cast(NtCurrentTeb()); + void* stackEnd = static_cast(pTib->StackBase); + FramePointerStackWalk(StackWalkCallback, /* aSkipFrames = */ 0, kMaxFrames, + this, fp, stackEnd); +#elif defined(XP_MACOSX) + // This avoids MozStackWalk(), which has become unusably slow on Mac due to + // changes in libunwind. + // + // This code is cribbed from the Gecko Profiler, which also uses + // FramePointerStackWalk() on Mac: Registers::SyncPopulate() for the frame + // pointer, and GetStackTop() for the stack end. + void** fp; + asm( + // Dereference %rbp to get previous %rbp + "movq (%%rbp), %0\n\t" + : "=r"(fp)); + void* stackEnd = pthread_get_stackaddr_np(pthread_self()); + FramePointerStackWalk(StackWalkCallback, /* skipFrames = */ 0, kMaxFrames, + this, fp, stackEnd); +#else + MozStackWalk(StackWalkCallback, /* aSkipFrames = */ 0, kMaxFrames, this); +#endif +} + +//--------------------------------------------------------------------------- +// Logging +//--------------------------------------------------------------------------- + +// Change this to 1 to enable some PHC logging. Useful for debugging. +#define PHC_LOGGING 0 + +#if PHC_LOGGING + +static size_t GetPid() { return size_t(getpid()); } + +static size_t GetTid() { +# if defined(XP_WIN) + return size_t(GetCurrentThreadId()); +# else + return size_t(pthread_self()); +# endif +} + +# if defined(XP_WIN) +# define LOG_STDERR \ + reinterpret_cast(GetStdHandle(STD_ERROR_HANDLE)) +# else +# define LOG_STDERR 2 +# endif +# define LOG(fmt, ...) \ + FdPrintf(LOG_STDERR, "PHC[%zu,%zu,~%zu] " fmt, GetPid(), GetTid(), \ + size_t(GAtomic::Now()), __VA_ARGS__) + +#else + +# define LOG(fmt, ...) + +#endif // PHC_LOGGING + +//--------------------------------------------------------------------------- +// Global state +//--------------------------------------------------------------------------- + +// Throughout this entire file time is measured as the number of sub-page +// allocations performed (by PHC and mozjemalloc combined). `Time` is 64-bit +// because we could have more than 2**32 allocations in a long-running session. +// `Delay` is 32-bit because the delays used within PHC are always much smaller +// than 2**32. +using Time = uint64_t; // A moment in time. +using Delay = uint32_t; // A time duration. + +// PHC only runs if the page size is 4 KiB; anything more is uncommon and would +// use too much memory. So we hardwire this size. +static const size_t kPageSize = 4096; + +// The maximum number of live page allocations. +static const size_t kMaxPageAllocs = 64; + +// The total size of the pages. +static const size_t kAllPagesSize = kPageSize * kMaxPageAllocs; + +// The junk value used to fill new allocation in debug builds. It's same value +// as the one used by mozjemalloc. PHC applies it unconditionally in debug +// builds. Unlike mozjemalloc, PHC doesn't consult the MALLOC_OPTIONS +// environment variable to possibly change that behaviour. +// +// Also note that, unlike mozjemalloc, PHC doesn't have a poison value for freed +// allocations because freed allocations are protected by OS page protection. +const uint8_t kAllocJunk = 0xe4; + +// The maximum time. +static const Time kMaxTime = ~(Time(0)); + +// The average delay before doing any page allocations at the start of a +// process. Note that roughly 1 million allocations occur in the main process +// while starting the browser. +static const Delay kAvgFirstAllocDelay = 512 * 1024; + +// The average delay until the next attempted page allocation, once we get past +// the first delay. +static const Delay kAvgAllocDelay = 2 * 1024; + +// The average delay before reusing a freed page. Should be significantly larger +// than kAvgAllocDelay, otherwise there's not much point in having it. +static const Delay kAvgPageReuseDelay = 32 * 1024; + +// Truncate aRnd to the range (1 .. AvgDelay*2). If aRnd is random, this +// results in an average value of aAvgDelay + 0.5, which is close enough to +// aAvgDelay. aAvgDelay must be a power-of-two (otherwise it will crash) for +// speed. +template +constexpr Delay Rnd64ToDelay(uint64_t aRnd) { + static_assert(IsPowerOfTwo(AvgDelay), "must be a power of two"); + + return aRnd % (AvgDelay * 2) + 1; +} + +// Shared, atomic, mutable global state. +class GAtomic { + public: + static void Init(Delay aFirstDelay) { + sAllocDelay = aFirstDelay; + + LOG("Initial sAllocDelay <- %zu\n", size_t(aFirstDelay)); + } + + static Time Now() { return sNow; } + + static void IncrementNow() { sNow++; } + + // Decrements the delay and returns the decremented value. + static int32_t DecrementDelay() { return --sAllocDelay; } + + static void SetAllocDelay(Delay aAllocDelay) { sAllocDelay = aAllocDelay; } + + private: + // The current time. Relaxed semantics because it's primarily used for + // determining if an allocation can be recycled yet and therefore it doesn't + // need to be exact. + static Atomic sNow; + + // Delay until the next attempt at a page allocation. See the comment in + // MaybePageAlloc() for an explanation of why it is a signed integer, and why + // it uses ReleaseAcquire semantics. + static Atomic sAllocDelay; +}; + +Atomic GAtomic::sNow; +Atomic GAtomic::sAllocDelay; + +// Shared, immutable global state. Initialized by replace_init() and never +// changed after that. replace_init() runs early enough that no synchronization +// is needed. +class GConst { + private: + // The bounds of the allocated pages. + const uintptr_t mPagesStart; + const uintptr_t mPagesLimit; + + uintptr_t AllocPages() { + // Allocate the pages so that they are inaccessible. They are never freed, + // because it would happen at process termination when it would be of little + // use. + void* pages = +#ifdef XP_WIN + VirtualAlloc(nullptr, kAllPagesSize, MEM_RESERVE, PAGE_NOACCESS); +#else + mmap(nullptr, kAllPagesSize, PROT_NONE, MAP_ANONYMOUS | MAP_PRIVATE, -1, + 0); +#endif + if (!pages) { + MOZ_CRASH(); + } + + return reinterpret_cast(pages); + } + + public: + GConst() + : mPagesStart(AllocPages()), mPagesLimit(mPagesStart + kAllPagesSize) { + LOG("AllocPages at %p..%p\n", (void*)mPagesStart, (void*)mPagesLimit); + } + + // Detect if a pointer is to a page allocation, and if so, which one. This + // function must be fast because it is called for every call to free(), + // realloc(), malloc_usable_size(), and jemalloc_ptr_info(). + Maybe PageIndex(const void* aPtr) { + auto ptr = reinterpret_cast(aPtr); + if (!(mPagesStart <= ptr && ptr < mPagesLimit)) { + return Nothing(); + } + + size_t i = (ptr - mPagesStart) / kPageSize; + MOZ_ASSERT(i < kMaxPageAllocs); + return Some(i); + } + + // Get the address of a page referred to via an index. + void* PagePtr(size_t aIndex) { + MOZ_ASSERT(aIndex < kMaxPageAllocs); + return reinterpret_cast(mPagesStart + kPageSize * aIndex); + } +}; + +static GConst* gConst; + +// On MacOS, the first __thread/thread_local access calls malloc, which leads +// to an infinite loop. So we use pthread-based TLS instead, which somehow +// doesn't have this problem. +#if !defined(XP_DARWIN) +# define PHC_THREAD_LOCAL(T) MOZ_THREAD_LOCAL(T) +#else +# define PHC_THREAD_LOCAL(T) \ + detail::ThreadLocal +#endif + +// Thread-local state. +class GTls { + DISALLOW_COPY_AND_ASSIGN(GTls); + + // When true, PHC does as little as possible. + // + // (a) It does not allocate any new page allocations. + // + // (b) It avoids doing any operations that might call malloc/free/etc., which + // would cause re-entry into PHC. (In practice, MozStackWalk() is the + // only such operation.) Note that calls to the functions in sMallocTable + // are ok. + // + // For example, replace_malloc() will just fall back to mozjemalloc. However, + // operations involving existing allocations are more complex, because those + // existing allocations may be page allocations. For example, if + // replace_free() is passed a page allocation on a PHC-disabled thread, it + // will free the page allocation in the usual way, but it will get a dummy + // freeStack in order to avoid calling MozStackWalk(), as per (b) above. + // + // This single disabling mechanism has two distinct uses. + // + // - It's used to prevent re-entry into PHC, which can cause correctness + // problems. For example, consider this sequence. + // + // 1. enter replace_free() + // 2. which calls PageFree() + // 3. which calls MozStackWalk() + // 4. which locks a mutex M, and then calls malloc + // 5. enter replace_malloc() + // 6. which calls MaybePageAlloc() + // 7. which calls MozStackWalk() + // 8. which (re)locks a mutex M --> deadlock + // + // We avoid this sequence by "disabling" the thread in PageFree() (at step + // 2), which causes MaybePageAlloc() to fail, avoiding the call to + // MozStackWalk() (at step 7). + // + // In practice, realloc or free of a PHC allocation is unlikely on a thread + // that is disabled because of this use: MozStackWalk() will probably only + // realloc/free allocations that it allocated itself, but those won't be + // page allocations because PHC is disabled before calling MozStackWalk(). + // + // (Note that MaybePageAlloc() could safely do a page allocation so long as + // it avoided calling MozStackWalk() by getting a dummy allocStack. But it + // wouldn't be useful, and it would prevent the second use below.) + // + // - It's used to prevent PHC allocations in some tests that rely on + // mozjemalloc's exact allocation behaviour, which PHC does not replicate + // exactly. (Note that (b) isn't necessary for this use -- MozStackWalk() + // could be safely called -- but it is necessary for the first use above.) + // + static PHC_THREAD_LOCAL(bool) tlsIsDisabled; + + public: + static void Init() { + if (!tlsIsDisabled.init()) { + MOZ_CRASH(); + } + } + + static void DisableOnCurrentThread() { + MOZ_ASSERT(!GTls::tlsIsDisabled.get()); + tlsIsDisabled.set(true); + } + + static void EnableOnCurrentThread() { + MOZ_ASSERT(GTls::tlsIsDisabled.get()); + tlsIsDisabled.set(false); + } + + static bool IsDisabledOnCurrentThread() { return tlsIsDisabled.get(); } +}; + +PHC_THREAD_LOCAL(bool) GTls::tlsIsDisabled; + +class AutoDisableOnCurrentThread { + DISALLOW_COPY_AND_ASSIGN(AutoDisableOnCurrentThread); + + public: + explicit AutoDisableOnCurrentThread() { GTls::DisableOnCurrentThread(); } + ~AutoDisableOnCurrentThread() { GTls::EnableOnCurrentThread(); } +}; + +// This type is used as a proof-of-lock token, to make it clear which functions +// require sMutex to be locked. +using GMutLock = const MutexAutoLock&; + +// Shared, mutable global state. Protected by sMutex; all accessing functions +// take a GMutLock as proof that sMutex is held. +class GMut { + enum class PageState { + NeverAllocated = 0, + InUse = 1, + Freed = 2, + }; + + // Metadata for each page. + class PageInfo { + public: + PageInfo() + : mState(PageState::NeverAllocated), + mArenaId(), + mUsableSize(0), + mAllocStack(), + mFreeStack(), + mReuseTime(0) {} + + // The current page state. + PageState mState; + + // The arena that the allocation is nominally from. This isn't meaningful + // within PHC, which has no arenas. But it is necessary for reallocation of + // page allocations as normal allocations, such as in this code: + // + // p = moz_arena_malloc(arenaId, 4096); + // realloc(p, 8192); + // + // The realloc is more than one page, and thus too large for PHC to handle. + // Therefore, if PHC handles the first allocation, it must ask mozjemalloc + // to allocate the 8192 bytes in the correct arena, and to do that, it must + // call sMallocTable.moz_arena_malloc with the correct arenaId under the + // covers. Therefore it must record that arenaId. + // + // This field is also needed for jemalloc_ptr_info() to work, because it + // also returns the arena ID (but only in debug builds). + // + // - NeverAllocated: must be 0. + // - InUse: can be any valid arena ID value. + // - Freed: can be any valid arena ID value. + Maybe mArenaId; + + // The usable size, which could be bigger than the requested size. + // - NeverAllocated: must be 0. + // - InUse: must be > 0. + // - Freed: must be > 0. + size_t mUsableSize; + + // The allocation stack. + // - NeverAllocated: empty. + // - InUse: non-empty. + // - Freed: non-empty. + StackTrace mAllocStack; + + // The free stack. + // - NeverAllocated: empty. + // - InUse: empty. + // - Freed: non-empty. + StackTrace mFreeStack; + + // The time at which the page is available for reuse, as measured against + // GAtomic::sNow. When the page is in use this value will be kMaxTime. + // - NeverAllocated: must be 0. + // - InUse: must be kMaxTime. + // - Freed: must be > 0 and < kMaxTime. + Time mReuseTime; + }; + + public: + // The mutex that protects the other members. + static Mutex sMutex; + + GMut() : mRNG(RandomSeed<0>(), RandomSeed<1>()), mPages() { sMutex.Init(); } + + uint64_t Random64(GMutLock) { return mRNG.next(); } + + bool IsPageInUse(GMutLock, uintptr_t aIndex) { + return mPages[aIndex].mState == PageState::InUse; + } + + // Is the page free? And if so, has enough time passed that we can use it? + bool IsPageAllocatable(GMutLock, uintptr_t aIndex, Time aNow) { + const PageInfo& page = mPages[aIndex]; + return page.mState != PageState::InUse && aNow >= page.mReuseTime; + } + + Maybe PageArena(GMutLock aLock, uintptr_t aIndex) { + const PageInfo& page = mPages[aIndex]; + AssertPageInUse(aLock, page); + + return page.mArenaId; + } + + size_t PageUsableSize(GMutLock aLock, uintptr_t aIndex) { + const PageInfo& page = mPages[aIndex]; + AssertPageInUse(aLock, page); + + return page.mUsableSize; + } + + void SetPageInUse(GMutLock aLock, uintptr_t aIndex, + const Maybe& aArenaId, size_t aUsableSize, + const StackTrace& aAllocStack) { + MOZ_ASSERT(aUsableSize == sMallocTable.malloc_good_size(aUsableSize)); + + PageInfo& page = mPages[aIndex]; + AssertPageNotInUse(aLock, page); + + page.mState = PageState::InUse; + page.mArenaId = aArenaId; + page.mUsableSize = aUsableSize; + page.mAllocStack = aAllocStack; + page.mFreeStack.Clear(); + page.mReuseTime = kMaxTime; + } + + void ResizePageInUse(GMutLock aLock, uintptr_t aIndex, + const Maybe& aArenaId, size_t aNewUsableSize, + const StackTrace& aAllocStack) { + MOZ_ASSERT(aNewUsableSize == sMallocTable.malloc_good_size(aNewUsableSize)); + + PageInfo& page = mPages[aIndex]; + AssertPageInUse(aLock, page); + + // page.mState is not changed. + if (aArenaId.isSome()) { + // Crash if the arenas don't match. + MOZ_RELEASE_ASSERT(page.mArenaId == aArenaId); + } + page.mUsableSize = aNewUsableSize; + // We could just keep the original alloc stack, but the realloc stack is + // more recent and therefore seems more useful. + page.mAllocStack = aAllocStack; + // page.mFreeStack is not changed. + // page.mReuseTime is not changed. + }; + + void SetPageFreed(GMutLock aLock, uintptr_t aIndex, + const Maybe& aArenaId, + const StackTrace& aFreeStack, Delay aReuseDelay) { + PageInfo& page = mPages[aIndex]; + AssertPageInUse(aLock, page); + + page.mState = PageState::Freed; + + // page.mArenaId is left unchanged, for jemalloc_ptr_info() calls that + // occur after freeing (e.g. in the PtrInfo test in TestJemalloc.cpp). + if (aArenaId.isSome()) { + // Crash if the arenas don't match. + MOZ_RELEASE_ASSERT(page.mArenaId == aArenaId); + } + + // page.musableSize is left unchanged, for reporting on UAF, and for + // jemalloc_ptr_info() calls that occur after freeing (e.g. in the PtrInfo + // test in TestJemalloc.cpp). + + // page.mAllocStack is left unchanged, for reporting on UAF. + + page.mFreeStack = aFreeStack; + page.mReuseTime = GAtomic::Now() + aReuseDelay; + } + + void EnsureInUse(GMutLock, void* aPtr, uintptr_t aIndex) { + const PageInfo& page = mPages[aIndex]; + MOZ_RELEASE_ASSERT(page.mState != PageState::NeverAllocated); + if (page.mState == PageState::Freed) { + LOG("EnsureInUse(%p), failure\n", aPtr); + // An operation on a freed page? This is a particular kind of + // use-after-free. Deliberately touch the page in question, in order to + // cause a crash that triggers the usual PHC machinery. But unlock sMutex + // first, because that self-same PHC machinery needs to re-lock it, and + // the crash causes non-local control flow so sMutex won't be unlocked + // the normal way in the caller. + sMutex.Unlock(); + *static_cast(aPtr) = 0; + MOZ_CRASH("unreachable"); + } + } + + void FillAddrInfo(GMutLock, uintptr_t aIndex, const void* aBaseAddr, + phc::AddrInfo& aOut) { + const PageInfo& page = mPages[aIndex]; + switch (page.mState) { + case PageState::NeverAllocated: + aOut.mKind = phc::AddrInfo::Kind::NeverAllocatedPage; + break; + + case PageState::InUse: + aOut.mKind = phc::AddrInfo::Kind::InUsePage; + break; + + case PageState::Freed: + aOut.mKind = phc::AddrInfo::Kind::FreedPage; + break; + + default: + MOZ_CRASH(); + } + aOut.mBaseAddr = const_cast(gConst->PagePtr(aIndex)); + aOut.mUsableSize = page.mUsableSize; + aOut.mAllocStack = page.mAllocStack; + aOut.mFreeStack = page.mFreeStack; + } + + void FillJemallocPtrInfo(GMutLock, const void* aPtr, uintptr_t aIndex, + jemalloc_ptr_info_t* aInfo) { + const PageInfo& page = mPages[aIndex]; + switch (page.mState) { + case PageState::NeverAllocated: + break; + + case PageState::InUse: { + // Only return TagLiveAlloc if the pointer is within the bounds of the + // allocation's usable size. + char* pagePtr = static_cast(gConst->PagePtr(aIndex)); + if (aPtr < pagePtr + page.mUsableSize) { + *aInfo = {TagLiveAlloc, pagePtr, page.mUsableSize, + page.mArenaId.valueOr(0)}; + return; + } + break; + } + + case PageState::Freed: { + // Only return TagFreedAlloc if the pointer is within the bounds of the + // former allocation's usable size. + char* pagePtr = static_cast(gConst->PagePtr(aIndex)); + if (aPtr < pagePtr + page.mUsableSize) { + *aInfo = {TagFreedAlloc, gConst->PagePtr(aIndex), page.mUsableSize, + page.mArenaId.valueOr(0)}; + return; + } + break; + } + + default: + MOZ_CRASH(); + } + + *aInfo = {TagUnknown, nullptr, 0, 0}; + } + + static void prefork() { sMutex.Lock(); } + static void postfork() { sMutex.Unlock(); } + + private: + template + uint64_t RandomSeed() { + // An older version of this code used RandomUint64() here, but on Mac that + // function uses arc4random(), which can allocate, which would cause + // re-entry, which would be bad. So we just use time() and a local variable + // address. These are mediocre sources of entropy, but good enough for PHC. + static_assert(N == 0 || N == 1, "must be 0 or 1"); + uint64_t seed; + if (N == 0) { + time_t t = time(nullptr); + seed = t ^ (t << 32); + } else { + seed = uintptr_t(&seed) ^ (uintptr_t(&seed) << 32); + } + return seed; + } + + void AssertPageInUse(GMutLock, const PageInfo& aPage) { + MOZ_ASSERT(aPage.mState == PageState::InUse); + // There is nothing to assert about aPage.mArenaId. + MOZ_ASSERT(aPage.mUsableSize > 0); + MOZ_ASSERT(!aPage.mAllocStack.IsEmpty()); + MOZ_ASSERT(aPage.mFreeStack.IsEmpty()); + MOZ_ASSERT(aPage.mReuseTime == kMaxTime); + } + + void AssertPageNotInUse(GMutLock, const PageInfo& aPage) { + // We can assert a lot about `NeverAllocated` pages, but not much about + // `Freed` pages. +#ifdef DEBUG + bool isFresh = aPage.mState == PageState::NeverAllocated; + MOZ_ASSERT(isFresh || aPage.mState == PageState::Freed); + MOZ_ASSERT_IF(isFresh, aPage.mArenaId == Nothing()); + MOZ_ASSERT(isFresh == (aPage.mUsableSize == 0)); + MOZ_ASSERT(isFresh == (aPage.mAllocStack.IsEmpty())); + MOZ_ASSERT(isFresh == (aPage.mFreeStack.IsEmpty())); + MOZ_ASSERT(aPage.mReuseTime != kMaxTime); +#endif + } + + // RNG for deciding which allocations to treat specially. It doesn't need to + // be high quality. + // + // This is a raw pointer for the reason explained in the comment above + // GMut's constructor. Don't change it to UniquePtr or anything like that. + non_crypto::XorShift128PlusRNG mRNG; + + PageInfo mPages[kMaxPageAllocs]; +}; + +Mutex GMut::sMutex; + +static GMut* gMut; + +//--------------------------------------------------------------------------- +// Page allocation operations +//--------------------------------------------------------------------------- + +// Attempt a page allocation if the time and the size are right. Allocated +// memory is zeroed if aZero is true. On failure, the caller should attempt a +// normal allocation via sMallocTable. Can be called in a context where +// GMut::sMutex is locked. +static void* MaybePageAlloc(const Maybe& aArenaId, size_t aReqSize, + bool aZero) { + if (aReqSize > kPageSize) { + return nullptr; + } + + GAtomic::IncrementNow(); + + // Decrement the delay. If it's zero, we do a page allocation and reset the + // delay to a random number. Because the assignment to the random number isn't + // atomic w.r.t. the decrement, we might have a sequence like this: + // + // Thread 1 Thread 2 Thread 3 + // -------- -------- -------- + // (a) newDelay = --sAllocDelay (-> 0) + // (b) --sAllocDelay (-> -1) + // (c) (newDelay != 0) fails + // (d) --sAllocDelay (-> -2) + // (e) sAllocDelay = new_random_number() + // + // It's critical that sAllocDelay has ReleaseAcquire semantics, because that + // guarantees that exactly one thread will see sAllocDelay have the value 0. + // (Relaxed semantics wouldn't guarantee that.) + // + // It's also nice that sAllocDelay is signed, given that we can decrement to + // below zero. (Strictly speaking, an unsigned integer would also work due + // to wrapping, but a signed integer is conceptually cleaner.) + // + // Finally, note that the decrements that occur between (a) and (e) above are + // effectively ignored, because (e) clobbers them. This shouldn't be a + // problem; it effectively just adds a little more randomness to + // new_random_number(). An early version of this code tried to account for + // these decrements by doing `sAllocDelay += new_random_number()`. However, if + // new_random_value() is small, the number of decrements between (a) and (e) + // can easily exceed it, whereupon sAllocDelay ends up negative after + // `sAllocDelay += new_random_number()`, and the zero-check never succeeds + // again. (At least, not until sAllocDelay wraps around on overflow, which + // would take a very long time indeed.) + // + int32_t newDelay = GAtomic::DecrementDelay(); + if (newDelay != 0) { + return nullptr; + } + + if (GTls::IsDisabledOnCurrentThread()) { + return nullptr; + } + + // Disable on this thread *before* getting the stack trace. + AutoDisableOnCurrentThread disable; + + // Get the stack trace *before* locking the mutex. If we return nullptr then + // it was a waste, but it's not so frequent, and doing a stack walk while + // the mutex is locked is problematic (see the big comment on + // StackTrace::Fill() for details). + StackTrace allocStack; + allocStack.Fill(); + + MutexAutoLock lock(GMut::sMutex); + + Time now = GAtomic::Now(); + Delay newAllocDelay = Rnd64ToDelay(gMut->Random64(lock)); + + // We start at a random page alloc and wrap around, to ensure pages get even + // amounts of use. + void* ptr = nullptr; + for (uintptr_t n = 0, i = size_t(gMut->Random64(lock)) % kMaxPageAllocs; + n < kMaxPageAllocs; n++, i = (i + 1) % kMaxPageAllocs) { + if (gMut->IsPageAllocatable(lock, i, now)) { + void* pagePtr = gConst->PagePtr(i); + bool ok = +#ifdef XP_WIN + !!VirtualAlloc(pagePtr, kPageSize, MEM_COMMIT, PAGE_READWRITE); +#else + mprotect(pagePtr, kPageSize, PROT_READ | PROT_WRITE) == 0; +#endif + size_t usableSize = sMallocTable.malloc_good_size(aReqSize); + if (ok) { + gMut->SetPageInUse(lock, i, aArenaId, usableSize, allocStack); + ptr = pagePtr; + if (aZero) { + memset(ptr, 0, usableSize); + } else { +#ifdef DEBUG + memset(ptr, kAllocJunk, usableSize); +#endif + } + } + LOG("PageAlloc(%zu) -> %p[%zu] (%zu) (z%zu), sAllocDelay <- %zu\n", + aReqSize, ptr, i, usableSize, size_t(aZero), size_t(newAllocDelay)); + break; + } + } + + if (!ptr) { + // No pages are available, or VirtualAlloc/mprotect failed. + LOG("No PageAlloc(%zu), sAllocDelay <- %zu\n", aReqSize, + size_t(newAllocDelay)); + } + + // Set the new alloc delay. + GAtomic::SetAllocDelay(newAllocDelay); + + return ptr; +} + +static void FreePage(GMutLock aLock, size_t aIndex, + const Maybe& aArenaId, + const StackTrace& aFreeStack, Delay aReuseDelay) { + void* pagePtr = gConst->PagePtr(aIndex); +#ifdef XP_WIN + if (!VirtualFree(pagePtr, kPageSize, MEM_DECOMMIT)) { + return; + } +#else + if (!mmap(pagePtr, kPageSize, PROT_NONE, MAP_FIXED | MAP_PRIVATE | MAP_ANON, + -1, 0)) { + return; + } +#endif + + gMut->SetPageFreed(aLock, aIndex, aArenaId, aFreeStack, aReuseDelay); +} + +//--------------------------------------------------------------------------- +// replace-malloc machinery +//--------------------------------------------------------------------------- + +// This handles malloc, moz_arena_malloc, and realloc-with-a-nullptr. +MOZ_ALWAYS_INLINE static void* PageMalloc(const Maybe& aArenaId, + size_t aReqSize) { + void* ptr = MaybePageAlloc(aArenaId, aReqSize, /* aZero */ false); + return ptr ? ptr + : (aArenaId.isSome() + ? sMallocTable.moz_arena_malloc(*aArenaId, aReqSize) + : sMallocTable.malloc(aReqSize)); +} + +static void* replace_malloc(size_t aReqSize) { + return PageMalloc(Nothing(), aReqSize); +} + +// This handles both calloc and moz_arena_calloc. +MOZ_ALWAYS_INLINE static void* PageCalloc(const Maybe& aArenaId, + size_t aNum, size_t aReqSize) { + CheckedInt checkedSize = CheckedInt(aNum) * aReqSize; + if (!checkedSize.isValid()) { + return nullptr; + } + + void* ptr = MaybePageAlloc(aArenaId, checkedSize.value(), /* aZero */ true); + return ptr ? ptr + : (aArenaId.isSome() + ? sMallocTable.moz_arena_calloc(*aArenaId, aNum, aReqSize) + : sMallocTable.calloc(aNum, aReqSize)); +} + +static void* replace_calloc(size_t aNum, size_t aReqSize) { + return PageCalloc(Nothing(), aNum, aReqSize); +} + +// This function handles both realloc and moz_arena_realloc. +// +// As always, realloc is complicated, and doubly so when there are two +// different kinds of allocations in play. Here are the possible transitions, +// and what we do in practice. +// +// - normal-to-normal: This is straightforward and obviously necessary. +// +// - normal-to-page: This is disallowed because it would require getting the +// arenaId of the normal allocation, which isn't possible in non-DEBUG builds +// for security reasons. +// +// - page-to-page: This is done whenever possible, i.e. whenever the new size +// is less than or equal to 4 KiB. This choice counterbalances the +// disallowing of normal-to-page allocations, in order to avoid biasing +// towards or away from page allocations. It always occurs in-place. +// +// - page-to-normal: this is done only when necessary, i.e. only when the new +// size is greater than 4 KiB. This choice naturally flows from the +// prior choice on page-to-page transitions. +// +// In summary: realloc doesn't change the allocation kind unless it must. +// +MOZ_ALWAYS_INLINE static void* PageRealloc(const Maybe& aArenaId, + void* aOldPtr, size_t aNewSize) { + if (!aOldPtr) { + // Null pointer. Treat like malloc(aNewSize). + return PageMalloc(aArenaId, aNewSize); + } + + Maybe i = gConst->PageIndex(aOldPtr); + if (i.isNothing()) { + // A normal-to-normal transition. + return aArenaId.isSome() + ? sMallocTable.moz_arena_realloc(*aArenaId, aOldPtr, aNewSize) + : sMallocTable.realloc(aOldPtr, aNewSize); + } + + // A page-to-something transition. + + // Note that `disable` has no effect unless it is emplaced below. + Maybe disable; + // Get the stack trace *before* locking the mutex. + StackTrace stack; + if (GTls::IsDisabledOnCurrentThread()) { + // PHC is disabled on this thread. Get a dummy stack. + stack.FillSkipped(); + } else { + // Disable on this thread *before* getting the stack trace. + disable.emplace(); + stack.Fill(); + } + + MutexAutoLock lock(GMut::sMutex); + + // Check for realloc() of a freed block. + gMut->EnsureInUse(lock, aOldPtr, *i); + + if (aNewSize <= kPageSize) { + // A page-to-page transition. Just keep using the page allocation. We do + // this even if the thread is disabled, because it doesn't create a new + // page allocation. Note that ResizePageInUse() checks aArenaId. + size_t newUsableSize = sMallocTable.malloc_good_size(aNewSize); + gMut->ResizePageInUse(lock, *i, aArenaId, newUsableSize, stack); + LOG("PageRealloc-Reuse(%p, %zu)\n", aOldPtr, aNewSize); + return aOldPtr; + } + + // A page-to-normal transition (with the new size greater than page-sized). + // (Note that aArenaId is checked below.) + void* newPtr; + if (aArenaId.isSome()) { + newPtr = sMallocTable.moz_arena_malloc(*aArenaId, aNewSize); + } else { + Maybe oldArenaId = gMut->PageArena(lock, *i); + newPtr = (oldArenaId.isSome() + ? sMallocTable.moz_arena_malloc(*oldArenaId, aNewSize) + : sMallocTable.malloc(aNewSize)); + } + if (!newPtr) { + return nullptr; + } + + MOZ_ASSERT(aNewSize > kPageSize); + + Delay reuseDelay = Rnd64ToDelay(gMut->Random64(lock)); + + // Copy the usable size rather than the requested size, because the user + // might have used malloc_usable_size() and filled up the usable size. Note + // that FreePage() checks aArenaId (via SetPageFreed()). + size_t oldUsableSize = gMut->PageUsableSize(lock, *i); + memcpy(newPtr, aOldPtr, std::min(oldUsableSize, aNewSize)); + FreePage(lock, *i, aArenaId, stack, reuseDelay); + LOG("PageRealloc-Free(%p[%zu], %zu) -> %p, %zu delay, reuse at ~%zu\n", + aOldPtr, *i, aNewSize, newPtr, size_t(reuseDelay), + size_t(GAtomic::Now()) + reuseDelay); + + return newPtr; +} + +static void* replace_realloc(void* aOldPtr, size_t aNewSize) { + return PageRealloc(Nothing(), aOldPtr, aNewSize); +} + +// This handles both free and moz_arena_free. +MOZ_ALWAYS_INLINE static void PageFree(const Maybe& aArenaId, + void* aPtr) { + Maybe i = gConst->PageIndex(aPtr); + if (i.isNothing()) { + // Not a page allocation. + return aArenaId.isSome() ? sMallocTable.moz_arena_free(*aArenaId, aPtr) + : sMallocTable.free(aPtr); + } + + // Note that `disable` has no effect unless it is emplaced below. + Maybe disable; + // Get the stack trace *before* locking the mutex. + StackTrace freeStack; + if (GTls::IsDisabledOnCurrentThread()) { + // PHC is disabled on this thread. Get a dummy stack. + freeStack.FillSkipped(); + } else { + // Disable on this thread *before* getting the stack trace. + disable.emplace(); + freeStack.Fill(); + } + + MutexAutoLock lock(GMut::sMutex); + + // Check for a double-free. + gMut->EnsureInUse(lock, aPtr, *i); + + // Note that FreePage() checks aArenaId (via SetPageFreed()). + Delay reuseDelay = Rnd64ToDelay(gMut->Random64(lock)); + FreePage(lock, *i, aArenaId, freeStack, reuseDelay); + + LOG("PageFree(%p[%zu]), %zu delay, reuse at ~%zu\n", aPtr, *i, + size_t(reuseDelay), size_t(GAtomic::Now()) + reuseDelay); +} + +static void replace_free(void* aPtr) { return PageFree(Nothing(), aPtr); } + +// This handles memalign and moz_arena_memalign. +MOZ_ALWAYS_INLINE static void* PageMemalign(const Maybe& aArenaId, + size_t aAlignment, + size_t aReqSize) { + // PHC always allocates on a page boundary, so if the alignment required is + // no greater than that it'll happen automatically. Otherwise, we can't + // satisfy it, so fall back to mozjemalloc. + MOZ_ASSERT(IsPowerOfTwo(aAlignment)); + void* ptr = nullptr; + if (aAlignment <= kPageSize) { + ptr = MaybePageAlloc(aArenaId, aReqSize, /* aZero */ false); + } + return ptr ? ptr + : (aArenaId.isSome() + ? sMallocTable.moz_arena_memalign(*aArenaId, aAlignment, + aReqSize) + : sMallocTable.memalign(aAlignment, aReqSize)); +} + +static void* replace_memalign(size_t aAlignment, size_t aReqSize) { + return PageMemalign(Nothing(), aAlignment, aReqSize); +} + +static size_t replace_malloc_usable_size(usable_ptr_t aPtr) { + Maybe i = gConst->PageIndex(aPtr); + if (i.isNothing()) { + // Not a page allocation. Measure it normally. + return sMallocTable.malloc_usable_size(aPtr); + } + + MutexAutoLock lock(GMut::sMutex); + + // Check for malloc_usable_size() of a freed block. + gMut->EnsureInUse(lock, aPtr, *i); + + return gMut->PageUsableSize(lock, *i); +} + +void replace_jemalloc_stats(jemalloc_stats_t* aStats) { + sMallocTable.jemalloc_stats(aStats); + + // Add all the pages to `mapped`. + size_t mapped = kAllPagesSize; + aStats->mapped += mapped; + + size_t allocated = 0; + { + MutexAutoLock lock(GMut::sMutex); + + // Add usable space of in-use allocations to `allocated`. + for (size_t i = 0; i < kMaxPageAllocs; i++) { + if (gMut->IsPageInUse(lock, i)) { + allocated += gMut->PageUsableSize(lock, i); + } + } + } + aStats->allocated += allocated; + + // Waste is the gap between `allocated` and `mapped`. + size_t waste = mapped - allocated; + aStats->waste += waste; + + // aStats.page_cache and aStats.bin_unused are left unchanged because PHC + // doesn't have anything corresponding to those. + + // gConst and gMut are normal heap allocations, so they're measured by + // mozjemalloc as `allocated`. Move them into `bookkeeping`. + size_t bookkeeping = sMallocTable.malloc_usable_size(gConst) + + sMallocTable.malloc_usable_size(gMut); + aStats->allocated -= bookkeeping; + aStats->bookkeeping += bookkeeping; +} + +void replace_jemalloc_ptr_info(const void* aPtr, jemalloc_ptr_info_t* aInfo) { + // We need to implement this properly, because various code locations do + // things like checking that allocations are in the expected arena. + Maybe i = gConst->PageIndex(aPtr); + if (i.isNothing()) { + // Not a page allocation. + return sMallocTable.jemalloc_ptr_info(aPtr, aInfo); + } + + MutexAutoLock lock(GMut::sMutex); + + gMut->FillJemallocPtrInfo(lock, aPtr, *i, aInfo); +#if DEBUG + LOG("JemallocPtrInfo(%p[%zu]) -> {%zu, %p, %zu, %zu}\n", aPtr, *i, + size_t(aInfo->tag), aInfo->addr, aInfo->size, aInfo->arenaId); +#else + LOG("JemallocPtrInfo(%p[%zu]) -> {%zu, %p, %zu}\n", aPtr, *i, + size_t(aInfo->tag), aInfo->addr, aInfo->size); +#endif +} + +arena_id_t replace_moz_create_arena_with_params(arena_params_t* aParams) { + // No need to do anything special here. + return sMallocTable.moz_create_arena_with_params(aParams); +} + +void replace_moz_dispose_arena(arena_id_t aArenaId) { + // No need to do anything special here. + return sMallocTable.moz_dispose_arena(aArenaId); +} + +void* replace_moz_arena_malloc(arena_id_t aArenaId, size_t aReqSize) { + return PageMalloc(Some(aArenaId), aReqSize); +} + +void* replace_moz_arena_calloc(arena_id_t aArenaId, size_t aNum, + size_t aReqSize) { + return PageCalloc(Some(aArenaId), aNum, aReqSize); +} + +void* replace_moz_arena_realloc(arena_id_t aArenaId, void* aOldPtr, + size_t aNewSize) { + return PageRealloc(Some(aArenaId), aOldPtr, aNewSize); +} + +void replace_moz_arena_free(arena_id_t aArenaId, void* aPtr) { + return PageFree(Some(aArenaId), aPtr); +} + +void* replace_moz_arena_memalign(arena_id_t aArenaId, size_t aAlignment, + size_t aReqSize) { + return PageMemalign(Some(aArenaId), aAlignment, aReqSize); +} + +class PHCBridge : public ReplaceMallocBridge { + virtual bool IsPHCAllocation(const void* aPtr, phc::AddrInfo* aOut) override { + Maybe i = gConst->PageIndex(aPtr); + if (i.isNothing()) { + return false; + } + + if (aOut) { + MutexAutoLock lock(GMut::sMutex); + gMut->FillAddrInfo(lock, *i, aPtr, *aOut); + LOG("IsPHCAllocation: %zu, %p, %zu, %zu, %zu\n", size_t(aOut->mKind), + aOut->mBaseAddr, aOut->mUsableSize, aOut->mAllocStack.mLength, + aOut->mFreeStack.mLength); + } + return true; + } + + virtual void DisablePHCOnCurrentThread() override { + GTls::DisableOnCurrentThread(); + LOG("DisablePHCOnCurrentThread: %zu\n", 0ul); + } + + virtual void ReenablePHCOnCurrentThread() override { + GTls::EnableOnCurrentThread(); + LOG("ReenablePHCOnCurrentThread: %zu\n", 0ul); + } + + virtual bool IsPHCEnabledOnCurrentThread() override { + bool enabled = !GTls::IsDisabledOnCurrentThread(); + LOG("IsPHCEnabledOnCurrentThread: %zu\n", size_t(enabled)); + return enabled; + } +}; + +// WARNING: this function runs *very* early -- before all static initializers +// have run. For this reason, non-scalar globals (gConst, gMut) are allocated +// dynamically (so we can guarantee their construction in this function) rather +// than statically. GAtomic and GTls contain simple static data that doesn't +// involve static initializers so they don't need to be allocated dynamically. +void replace_init(malloc_table_t* aMallocTable, ReplaceMallocBridge** aBridge) { + // Don't run PHC if the page size isn't 4 KiB. + jemalloc_stats_t stats; + aMallocTable->jemalloc_stats(&stats); + if (stats.page_size != kPageSize) { + return; + } + + sMallocTable = *aMallocTable; + + // The choices of which functions to replace are complex enough that we set + // them individually instead of using MALLOC_FUNCS/malloc_decls.h. + + aMallocTable->malloc = replace_malloc; + aMallocTable->calloc = replace_calloc; + aMallocTable->realloc = replace_realloc; + aMallocTable->free = replace_free; + aMallocTable->memalign = replace_memalign; + + // posix_memalign, aligned_alloc & valloc: unset, which means they fall back + // to replace_memalign. + aMallocTable->malloc_usable_size = replace_malloc_usable_size; + // default malloc_good_size: the default suffices. + + aMallocTable->jemalloc_stats = replace_jemalloc_stats; + // jemalloc_purge_freed_pages: the default suffices. + // jemalloc_free_dirty_pages: the default suffices. + // jemalloc_thread_local_arena: the default suffices. + aMallocTable->jemalloc_ptr_info = replace_jemalloc_ptr_info; + + aMallocTable->moz_create_arena_with_params = + replace_moz_create_arena_with_params; + aMallocTable->moz_dispose_arena = replace_moz_dispose_arena; + aMallocTable->moz_arena_malloc = replace_moz_arena_malloc; + aMallocTable->moz_arena_calloc = replace_moz_arena_calloc; + aMallocTable->moz_arena_realloc = replace_moz_arena_realloc; + aMallocTable->moz_arena_free = replace_moz_arena_free; + aMallocTable->moz_arena_memalign = replace_moz_arena_memalign; + + static PHCBridge bridge; + *aBridge = &bridge; + +#ifndef XP_WIN + // Avoid deadlocks when forking by acquiring our state lock prior to forking + // and releasing it after forking. See |LogAlloc|'s |replace_init| for + // in-depth details. + // + // Note: This must run after attempting an allocation so as to give the + // system malloc a chance to insert its own atfork handler. + sMallocTable.malloc(-1); + pthread_atfork(GMut::prefork, GMut::postfork, GMut::postfork); +#endif + + // gConst and gMut are never freed. They live for the life of the process. + gConst = InfallibleAllocPolicy::new_(); + GTls::Init(); + gMut = InfallibleAllocPolicy::new_(); + { + MutexAutoLock lock(GMut::sMutex); + Delay firstAllocDelay = + Rnd64ToDelay(gMut->Random64(lock)); + GAtomic::Init(firstAllocDelay); + } +} diff --git a/memory/replace/phc/PHC.h b/memory/replace/phc/PHC.h new file mode 100644 index 000000000000..5c606f06566e --- /dev/null +++ b/memory/replace/phc/PHC.h @@ -0,0 +1,88 @@ +/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ +/* vim: set ts=2 et sw=2 tw=80: */ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#ifndef PHC_h +#define PHC_h + +#include "mozilla/Assertions.h" +#include +#include + +namespace mozilla { +namespace phc { + +// Note: a more compact stack trace representation could be achieved with +// some effort. +struct StackTrace { + public: + static const size_t kMaxFrames = 16; + + // The number of PCs in the stack trace. + size_t mLength; + + // The PCs in the stack trace. Only the first mLength are initialized. + const void* mPcs[kMaxFrames]; + + public: + StackTrace() : mLength(0) {} +}; + +// Info from PHC about an address in memory. +class AddrInfo { + public: + enum class Kind { + // The address is not in PHC-managed memory. + Unknown = 0, + + // The address is within a PHC page that has never been allocated. A crash + // involving such an address is unlikely in practice, because it would + // require the crash to happen quite early. + NeverAllocatedPage = 1, + + // The address is within a PHC page that is in use. + InUsePage = 2, + + // The address is within a PHC page that has been allocated and then freed. + // A crash involving such an address most likely indicates a + // use-after-free. (A sufficiently wild write -- e.g. a large buffer + // overflow -- could also trigger it, but this is less likely.) + FreedPage = 3, + + // The address is within a PHC guard page. A crash involving such an + // address most likely indicates a buffer overflow. (Again, a sufficiently + // wild write could unluckily trigger it, but this is less likely.) + // + // NOTE: guard pages are not yet implemented. This value is present so they + // can be added easily in the future. + GuardPage = 4, + }; + + Kind mKind; + + // The base address of the containing PHC allocation, if there is one. + const void* mBaseAddr; + + // The usable size of the containing PHC allocation, if there is one. + size_t mUsableSize; + + // The allocation and free stack traces of the containing PHC allocation, if + // there is one. + StackTrace mAllocStack; + StackTrace mFreeStack; + + // Default to no PHC info. + AddrInfo() + : mKind(Kind::Unknown), + mBaseAddr(nullptr), + mUsableSize(0), + mAllocStack(), + mFreeStack() {} +}; + +} // namespace phc +} // namespace mozilla + +#endif /* PHC_h */ diff --git a/memory/replace/phc/moz.build b/memory/replace/phc/moz.build new file mode 100644 index 000000000000..145ff40f9d59 --- /dev/null +++ b/memory/replace/phc/moz.build @@ -0,0 +1,32 @@ +# -*- Mode: python; indent-tabs-mode: nil; tab-width: 40 -*- +# vim: set filetype=python: +# This Source Code Form is subject to the terms of the Mozilla Public +# License, v. 2.0. If a copy of the MPL was not distributed with this +# file, You can obtain one at http://mozilla.org/MPL/2.0/. + +ReplaceMalloc('phc') + +DEFINES['MOZ_NO_MOZALLOC'] = True + +LOCAL_INCLUDES += [ + '../logalloc', + '/memory/build', +] + +EXPORTS += [ + 'PHC.h', +] + +UNIFIED_SOURCES += [ + 'PHC.cpp', +] + +if not CONFIG['MOZ_REPLACE_MALLOC_STATIC']: + SOURCES += [ + '../logalloc/FdPrintf.cpp', + '/mozglue/misc/StackWalk.cpp', + ] + +TEST_DIRS += ['test'] + +DisableStlWrapping() diff --git a/memory/replace/phc/test/gtest/TestPHC.cpp b/memory/replace/phc/test/gtest/TestPHC.cpp new file mode 100644 index 000000000000..c0ec0a54c492 --- /dev/null +++ b/memory/replace/phc/test/gtest/TestPHC.cpp @@ -0,0 +1,155 @@ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#include "gtest/gtest.h" + +#include "mozmemory.h" +#include "replace_malloc_bridge.h" +#include "mozilla/Assertions.h" +#include "../../PHC.h" + +using namespace mozilla; + +bool PHCInfoEq(phc::AddrInfo& aInfo, phc::AddrInfo::Kind aKind, void* aBaseAddr, + size_t aUsableSize, bool aHasAllocStack, bool aHasFreeStack) { + return aInfo.mKind == aKind && aInfo.mBaseAddr == aBaseAddr && + aInfo.mUsableSize == aUsableSize && + // Proper stack traces will have at least 3 elements. + (aHasAllocStack ? (aInfo.mAllocStack.mLength > 2) + : (aInfo.mAllocStack.mLength == 0)) && + (aHasFreeStack ? (aInfo.mFreeStack.mLength > 2) + : (aInfo.mFreeStack.mLength == 0)); +} + +bool JeInfoEq(jemalloc_ptr_info_t& aInfo, PtrInfoTag aTag, void* aAddr, + size_t aSize, arena_id_t arenaId) { + return aInfo.tag == aTag && aInfo.addr == aAddr && aInfo.size == aSize +#ifdef MOZ_DEBUG + && aInfo.arenaId == arenaId +#endif + ; +} + +char* GetPHCAllocation(size_t aSize) { + // A crude but effective way to get a PHC allocation. + for (int i = 0; i < 2000000; i++) { + char* p = (char*)malloc(aSize); + if (ReplaceMalloc::IsPHCAllocation(p, nullptr)) { + return p; + } + free(p); + } + return nullptr; +} + +TEST(PHC, TestPHCBasics) +{ + int stackVar; + phc::AddrInfo phcInfo; + jemalloc_ptr_info_t jeInfo; + + // Test a default AddrInfo. + ASSERT_TRUE(PHCInfoEq(phcInfo, phc::AddrInfo::Kind::Unknown, nullptr, 0ul, + false, false)); + + // Test some non-PHC allocation addresses. + ASSERT_FALSE(ReplaceMalloc::IsPHCAllocation(nullptr, &phcInfo)); + ASSERT_TRUE(PHCInfoEq(phcInfo, phc::AddrInfo::Kind::Unknown, nullptr, 0, + false, false)); + ASSERT_FALSE(ReplaceMalloc::IsPHCAllocation(&stackVar, &phcInfo)); + ASSERT_TRUE(PHCInfoEq(phcInfo, phc::AddrInfo::Kind::Unknown, nullptr, 0, + false, false)); + + char* p = GetPHCAllocation(32); + if (!p) { + MOZ_CRASH("failed to get a PHC allocation"); + } + + // Test an in-use PHC allocation, via its base address. + ASSERT_TRUE(ReplaceMalloc::IsPHCAllocation(p, &phcInfo)); + ASSERT_TRUE( + PHCInfoEq(phcInfo, phc::AddrInfo::Kind::InUsePage, p, 32ul, true, false)); + ASSERT_EQ(malloc_usable_size(p), 32ul); + jemalloc_ptr_info(p, &jeInfo); + ASSERT_TRUE(JeInfoEq(jeInfo, TagLiveAlloc, p, 32, 0)); + + // Test an in-use PHC allocation, via an address in its middle. + ASSERT_TRUE(ReplaceMalloc::IsPHCAllocation(p + 10, &phcInfo)); + ASSERT_TRUE( + PHCInfoEq(phcInfo, phc::AddrInfo::Kind::InUsePage, p, 32ul, true, false)); + ASSERT_EQ(malloc_usable_size(p), 32ul); + jemalloc_ptr_info(p + 10, &jeInfo); + ASSERT_TRUE(JeInfoEq(jeInfo, TagLiveAlloc, p, 32, 0)); + + // Test an in-use PHC allocation, via an address past its end. The results + // for phcInfo should be the same, but be different for jeInfo. + ASSERT_TRUE(ReplaceMalloc::IsPHCAllocation(p + 64, &phcInfo)); + ASSERT_TRUE( + PHCInfoEq(phcInfo, phc::AddrInfo::Kind::InUsePage, p, 32ul, true, false)); + jemalloc_ptr_info(p + 64, &jeInfo); + ASSERT_TRUE(JeInfoEq(jeInfo, TagUnknown, nullptr, 0, 0)); + + free(p); + + // Test a freed PHC allocation, via its base address. + ASSERT_TRUE(ReplaceMalloc::IsPHCAllocation(p, &phcInfo)); + ASSERT_TRUE( + PHCInfoEq(phcInfo, phc::AddrInfo::Kind::FreedPage, p, 32ul, true, true)); + jemalloc_ptr_info(p, &jeInfo); + ASSERT_TRUE(JeInfoEq(jeInfo, TagFreedAlloc, p, 32, 0)); + + // Test a freed PHC allocation, via an address in its middle. + ASSERT_TRUE(ReplaceMalloc::IsPHCAllocation(p + 10, &phcInfo)); + ASSERT_TRUE( + PHCInfoEq(phcInfo, phc::AddrInfo::Kind::FreedPage, p, 32ul, true, true)); + jemalloc_ptr_info(p + 10, &jeInfo); + ASSERT_TRUE(JeInfoEq(jeInfo, TagFreedAlloc, p, 32, 0)); + + // Test a freed PHC allocation, via an address past its end. + ASSERT_TRUE(ReplaceMalloc::IsPHCAllocation(p + 10, &phcInfo)); + ASSERT_TRUE( + PHCInfoEq(phcInfo, phc::AddrInfo::Kind::FreedPage, p, 32ul, true, true)); + jemalloc_ptr_info(p + 64, &jeInfo); + ASSERT_TRUE(JeInfoEq(jeInfo, TagUnknown, nullptr, 0, 0)); + + // There are no tests for `mKind == NeverAllocatedPage` because it's not + // possible to reliably get ahold of such a page. + + // There are not tests for `mKind == GuardPage` because it's currently not + // implemented. +} + +TEST(PHC, TestPHCDisabling) +{ + char* p = GetPHCAllocation(32); + char* q = GetPHCAllocation(32); + if (!p || !q) { + MOZ_CRASH("failed to get a PHC allocation"); + } + + ASSERT_TRUE(ReplaceMalloc::IsPHCEnabledOnCurrentThread()); + ReplaceMalloc::DisablePHCOnCurrentThread(); + ASSERT_FALSE(ReplaceMalloc::IsPHCEnabledOnCurrentThread()); + + // Test realloc() on a PHC allocation while PHC is disabled on the thread. + char* p2 = (char*)realloc(p, 128); + // The small realloc is in-place. + ASSERT_TRUE(p2 == p); + ASSERT_TRUE(ReplaceMalloc::IsPHCAllocation(p2, nullptr)); + char* p3 = (char*)realloc(p2, 8192); + // The big realloc is not in-place, and the result is not a PHC allocation. + ASSERT_TRUE(p3 != p2); + ASSERT_FALSE(ReplaceMalloc::IsPHCAllocation(p3, nullptr)); + free(p3); + + // Test free() on a PHC allocation while PHC is disabled on the thread. + free(q); + + // These must not be PHC allocations. + char* r = GetPHCAllocation(32); // This will fail. + ASSERT_FALSE(!!r); + + ReplaceMalloc::ReenablePHCOnCurrentThread(); + ASSERT_TRUE(ReplaceMalloc::IsPHCEnabledOnCurrentThread()); +} diff --git a/memory/replace/phc/test/gtest/moz.build b/memory/replace/phc/test/gtest/moz.build new file mode 100644 index 000000000000..5d5f706c39bf --- /dev/null +++ b/memory/replace/phc/test/gtest/moz.build @@ -0,0 +1,15 @@ +# -*- Mode: python; indent-tabs-mode: nil; tab-width: 40 -*- +# vim: set filetype=python: +# This Source Code Form is subject to the terms of the Mozilla Public +# License, v. 2.0. If a copy of the MPL was not distributed with this +# file, You can obtain one at http://mozilla.org/MPL/2.0/. + +UNIFIED_SOURCES += [ + 'TestPHC.cpp', +] + +LOCAL_INCLUDES += [ + '../../', +] + +FINAL_LIBRARY = 'xul-gtest' diff --git a/memory/replace/phc/test/moz.build b/memory/replace/phc/test/moz.build new file mode 100644 index 000000000000..2abb716eebe4 --- /dev/null +++ b/memory/replace/phc/test/moz.build @@ -0,0 +1,9 @@ +# -*- Mode: python; indent-tabs-mode: nil; tab-width: 40 -*- +# vim: set filetype=python: +# This Source Code Form is subject to the terms of the Mozilla Public +# License, v. 2.0. If a copy of the MPL was not distributed with this +# file, You can obtain one at http://mozilla.org/MPL/2.0/. + +# The gtests won't work in a SpiderMonkey-only build. +if CONFIG['MOZ_WIDGET_TOOLKIT']: + TEST_DIRS += ['gtest'] diff --git a/toolkit/crashreporter/CrashAnnotations.yaml b/toolkit/crashreporter/CrashAnnotations.yaml index 4c7eb821e8db..b9422b22b87b 100644 --- a/toolkit/crashreporter/CrashAnnotations.yaml +++ b/toolkit/crashreporter/CrashAnnotations.yaml @@ -350,6 +350,40 @@ GraphicsStartupTest: Set to 1 by the graphics driver crash guard when it's activated. type: boolean +PHCKind: + description: > + The allocation kind, if the crash involved a bad access of a special PHC + allocation. + type: string + +PHCBaseAddress: + description: > + The allocation's base address, if the crash involved a bad access of a + special PHC allocation. Encoded as a decimal address. + type: string + +PHCUsableSize: + description: > + The allocation's usable size, if the crash involved a bad access of a + special PHC allocation. + # A 32-bit integer is enough because the maximum usable size of a special PHC + # allocation is far less than 2 GiB. + type: integer + +PHCAllocStack: + description: > + The allocation's allocation stack trace, if the crash involved a bad access + of a special PHC allocation. Encoded as a comma-separated list of decimal + addresses. + type: string + +PHCFreeStack: + description: > + The allocation's free stack trace, if the crash involved a bad access + of a special PHC allocation. Encoded as a comma-separated list of decimal + addresses. + type: string + HangMonitorDescription: description: > Name of the hang monitor that generated the crash. diff --git a/toolkit/crashreporter/breakpad-client/linux/handler/exception_handler.cc b/toolkit/crashreporter/breakpad-client/linux/handler/exception_handler.cc index 2b3aafcfc94e..4ae454d23363 100644 --- a/toolkit/crashreporter/breakpad-client/linux/handler/exception_handler.cc +++ b/toolkit/crashreporter/breakpad-client/linux/handler/exception_handler.cc @@ -98,6 +98,10 @@ #include "third_party/lss/linux_syscall_support.h" #include "prenv.h" +#ifdef MOZ_PHC +#include "replace_malloc_bridge.h" +#endif + #if defined(__ANDROID__) #include "linux/sched.h" #endif @@ -446,10 +450,24 @@ int ExceptionHandler::ThreadEntry(void *arg) { thread_arg->context_size) == false; } +#ifdef MOZ_PHC +void GetPHCAddrInfo(siginfo_t* siginfo, mozilla::phc::AddrInfo* addr_info) { + // Is this a crash involving a PHC allocation? + if (siginfo->si_signo == SIGSEGV || siginfo->si_signo == SIGBUS) { + ReplaceMalloc::IsPHCAllocation(siginfo->si_addr, addr_info); + } +} +#endif + // This function runs in a compromised context: see the top of the file. // Runs on the crashing thread. bool ExceptionHandler::HandleSignal(int /*sig*/, siginfo_t* info, void* uc) { - if (filter_ && !filter_(callback_context_)) + mozilla::phc::AddrInfo addr_info; +#ifdef MOZ_PHC + GetPHCAddrInfo(info, &addr_info); +#endif + + if (filter_ && !filter_(callback_context_, &addr_info)) return false; // Allow ourselves to be dumped if the signal is trusted. @@ -489,7 +507,8 @@ bool ExceptionHandler::HandleSignal(int /*sig*/, siginfo_t* info, void* uc) { return true; } } - return GenerateDump(&g_crash_context_); + + return GenerateDump(&g_crash_context_, &addr_info); } // This is a public interface to HandleSignal that allows the client to @@ -506,7 +525,8 @@ bool ExceptionHandler::SimulateSignalDelivery(int sig) { } // This function may run in a compromised context: see the top of the file. -bool ExceptionHandler::GenerateDump(CrashContext *context) { +bool ExceptionHandler::GenerateDump( + CrashContext *context, const mozilla::phc::AddrInfo* addr_info) { if (IsOutOfProcess()) return crash_generation_client_->RequestDump(context, sizeof(*context)); @@ -591,7 +611,8 @@ bool ExceptionHandler::GenerateDump(CrashContext *context) { bool success = r != -1 && WIFEXITED(status) && WEXITSTATUS(status) == 0; if (callback_) - success = callback_(minidump_descriptor_, callback_context_, success); + success = + callback_(minidump_descriptor_, callback_context_, addr_info, success); return success; } @@ -765,7 +786,8 @@ bool ExceptionHandler::WriteMinidump() { #error "This code has not been ported to your platform yet." #endif - return GenerateDump(&context); + // nullptr here for phc::AddrInfo* is ok because this is not a crash. + return GenerateDump(&context, nullptr); } void ExceptionHandler::AddMappingInfo(const string& name, @@ -822,7 +844,9 @@ bool ExceptionHandler::WriteMinidumpForChild(pid_t child, child_blamed_thread)) return false; - return callback ? callback(descriptor, callback_context, true) : true; + // nullptr here for phc::AddrInfo* is ok because this is not a crash. + return callback ? callback(descriptor, callback_context, nullptr, true) + : true; } void SetFirstChanceExceptionHandler(FirstChanceHandler callback) { diff --git a/toolkit/crashreporter/breakpad-client/linux/handler/exception_handler.h b/toolkit/crashreporter/breakpad-client/linux/handler/exception_handler.h index 2d3dce467f37..3273233dd091 100644 --- a/toolkit/crashreporter/breakpad-client/linux/handler/exception_handler.h +++ b/toolkit/crashreporter/breakpad-client/linux/handler/exception_handler.h @@ -44,6 +44,12 @@ #include "common/using_std_string.h" #include "google_breakpad/common/minidump_format.h" +#ifdef MOZ_PHC +#include "PHC.h" +#else +namespace mozilla { namespace phc { class AddrInfo {}; } } +#endif + namespace google_breakpad { // ExceptionHandler @@ -82,7 +88,8 @@ class ExceptionHandler { // attempting to write a minidump. If a FilterCallback returns false, // Breakpad will immediately report the exception as unhandled without // writing a minidump, allowing another handler the opportunity to handle it. - typedef bool (*FilterCallback)(void *context); + typedef bool (*FilterCallback)(void *context, + const mozilla::phc::AddrInfo* addr_info); // A callback function to run after the minidump has been written. // |descriptor| contains the file descriptor or file path containing the @@ -102,6 +109,7 @@ class ExceptionHandler { // return true directly (unless |succeeded| is true). typedef bool (*MinidumpCallback)(const MinidumpDescriptor& descriptor, void* context, + const mozilla::phc::AddrInfo* addr_info, bool succeeded); // In certain cases, a user may wish to handle the generation of the minidump @@ -234,7 +242,8 @@ class ExceptionHandler { static void RestoreHandlersLocked(); void PreresolveSymbols(); - bool GenerateDump(CrashContext *context); + bool GenerateDump(CrashContext *context, + const mozilla::phc::AddrInfo* addr_info); void SendContinueSignalToChild(); void WaitForContinueSignal(); diff --git a/toolkit/crashreporter/breakpad-client/linux/moz.build b/toolkit/crashreporter/breakpad-client/linux/moz.build index 3edab224b583..e3fd16c4eb73 100644 --- a/toolkit/crashreporter/breakpad-client/linux/moz.build +++ b/toolkit/crashreporter/breakpad-client/linux/moz.build @@ -33,3 +33,6 @@ if CONFIG['OS_TARGET'] == 'Android': FINAL_LIBRARY = 'breakpad_client' include('/toolkit/crashreporter/crashreporter.mozbuild') + +if CONFIG['MOZ_PHC']: + DEFINES['MOZ_PHC'] = True diff --git a/toolkit/crashreporter/breakpad-client/mac/handler/exception_handler.cc b/toolkit/crashreporter/breakpad-client/mac/handler/exception_handler.cc index 64377079abae..918a3bb08c17 100644 --- a/toolkit/crashreporter/breakpad-client/mac/handler/exception_handler.cc +++ b/toolkit/crashreporter/breakpad-client/mac/handler/exception_handler.cc @@ -343,7 +343,7 @@ bool ExceptionHandler::WriteMinidumpForChild(mach_port_t child, if (callback) { return callback(dump_path.c_str(), dump_id.c_str(), - callback_context, result); + callback_context, nullptr, result); } return result; } @@ -377,7 +377,7 @@ bool ExceptionHandler::WriteMinidumpWithException( if (exception_type && exception_code) { // If this is a real exception, give the filter (if any) a chance to // decide if this should be sent. - if (filter_ && !filter_(callback_context_)) + if (filter_ && !filter_(callback_context_, nullptr)) return false; result = crash_generation_client_->RequestDumpForException( exception_type, @@ -402,7 +402,7 @@ bool ExceptionHandler::WriteMinidumpWithException( if (exception_type && exception_code) { // If this is a real exception, give the filter (if any) a chance to // decide if this should be sent. - if (filter_ && !filter_(callback_context_)) + if (filter_ && !filter_(callback_context_, nullptr)) return false; md.SetExceptionInformation(exception_type, exception_code, @@ -418,7 +418,7 @@ bool ExceptionHandler::WriteMinidumpWithException( // (rather than just writing out the file), then we should exit without // forwarding the exception to the next handler. if (callback_(dump_path_c_, next_minidump_id_c_, callback_context_, - result)) { + nullptr, result)) { if (exit_after_write) _exit(exception_type); } diff --git a/toolkit/crashreporter/breakpad-client/mac/handler/exception_handler.h b/toolkit/crashreporter/breakpad-client/mac/handler/exception_handler.h index 45038ce4e31d..1a0abcec3b9d 100644 --- a/toolkit/crashreporter/breakpad-client/mac/handler/exception_handler.h +++ b/toolkit/crashreporter/breakpad-client/mac/handler/exception_handler.h @@ -48,6 +48,12 @@ #include "mac/crash_generation/crash_generation_client.h" #endif +#ifdef MOZ_PHC +#include "PHC.h" +#else +namespace mozilla { namespace phc { class AddrInfo {}; } } +#endif + namespace google_breakpad { using std::string; @@ -75,7 +81,8 @@ class ExceptionHandler { // attempting to write a minidump. If a FilterCallback returns false, Breakpad // will immediately report the exception as unhandled without writing a // minidump, allowing another handler the opportunity to handle it. - typedef bool (*FilterCallback)(void *context); + typedef bool (*FilterCallback)(void *context, + const mozilla::phc::AddrInfo* addr_info); // A callback function to run after the minidump has been written. // |minidump_id| is a unique id for the dump, so the minidump @@ -87,7 +94,9 @@ class ExceptionHandler { // exception. typedef bool (*MinidumpCallback)(const char *dump_dir, const char *minidump_id, - void *context, bool succeeded); + void *context, + const mozilla::phc::AddrInfo* addr_info, + bool succeeded); // A callback function which will be called directly if an exception occurs. // This bypasses the minidump file writing and simply gives the client diff --git a/toolkit/crashreporter/breakpad-client/windows/handler/exception_handler.cc b/toolkit/crashreporter/breakpad-client/windows/handler/exception_handler.cc index c493aae78c9c..2c764af0d9dc 100644 --- a/toolkit/crashreporter/breakpad-client/windows/handler/exception_handler.cc +++ b/toolkit/crashreporter/breakpad-client/windows/handler/exception_handler.cc @@ -824,7 +824,7 @@ bool ExceptionHandler::WriteMinidumpForChild(HANDLE child, if (callback) { success = callback(handler.dump_path_c_, handler.next_minidump_id_c_, - callback_context, NULL, NULL, success); + callback_context, NULL, NULL, nullptr, success); } return success; @@ -840,7 +840,7 @@ bool ExceptionHandler::WriteMinidumpWithException( // HandleException to call any previous handler or return // EXCEPTION_CONTINUE_SEARCH on the exception thread, allowing it to appear // as though this handler were not present at all. - if (filter_ && !filter_(callback_context_, exinfo, assertion)) { + if (filter_ && !filter_(callback_context_, exinfo, nullptr, assertion)) { return false; } @@ -861,7 +861,7 @@ bool ExceptionHandler::WriteMinidumpWithException( // scenario, the server process ends up creating the dump path and dump // id so they are not known to the client. success = callback_(dump_path_c_, next_minidump_id_c_, callback_context_, - exinfo, assertion, success); + exinfo, assertion, nullptr, success); } return success; diff --git a/toolkit/crashreporter/breakpad-client/windows/handler/exception_handler.h b/toolkit/crashreporter/breakpad-client/windows/handler/exception_handler.h index 7d923f22a2d6..4c13db2722b0 100644 --- a/toolkit/crashreporter/breakpad-client/windows/handler/exception_handler.h +++ b/toolkit/crashreporter/breakpad-client/windows/handler/exception_handler.h @@ -75,6 +75,12 @@ #include "common/scoped_ptr.h" #include "google_breakpad/common/minidump_format.h" +#ifdef MOZ_PHC +#include "PHC.h" +#else +namespace mozilla { namespace phc { class AddrInfo {}; } } +#endif + namespace google_breakpad { using std::vector; @@ -94,6 +100,7 @@ class ExceptionHandler { // Breakpad will immediately report the exception as unhandled without // writing a minidump, allowing another handler the opportunity to handle it. typedef bool (*FilterCallback)(void* context, EXCEPTION_POINTERS* exinfo, + const mozilla::phc::AddrInfo* addr_info, MDRawAssertionInfo* assertion); // A callback function to run after the minidump has been written. @@ -125,6 +132,7 @@ class ExceptionHandler { void* context, EXCEPTION_POINTERS* exinfo, MDRawAssertionInfo* assertion, + const mozilla::phc::AddrInfo* addr_info, bool succeeded); // HandlerType specifies which types of handlers should be installed, if diff --git a/toolkit/crashreporter/moz.build b/toolkit/crashreporter/moz.build index f30a20e04b7d..40ff97278732 100644 --- a/toolkit/crashreporter/moz.build +++ b/toolkit/crashreporter/moz.build @@ -115,6 +115,9 @@ if CONFIG['MOZ_CRASHREPORTER']: DEFINES['UNICODE'] = True DEFINES['_UNICODE'] = True + if CONFIG['MOZ_PHC']: + DEFINES['MOZ_PHC'] = True + LOCAL_INCLUDES += [ 'google-breakpad/src', ] diff --git a/toolkit/crashreporter/nsExceptionHandler.cpp b/toolkit/crashreporter/nsExceptionHandler.cpp index e00bf2715e5a..ec32995feb4e 100644 --- a/toolkit/crashreporter/nsExceptionHandler.cpp +++ b/toolkit/crashreporter/nsExceptionHandler.cpp @@ -668,6 +668,72 @@ class BinaryAnnotationWriter : public AnnotationWriter { PlatformWriter& mPlatformWriter; }; +#ifdef MOZ_PHC +// The stack traces are encoded as a comma-separated list of decimal +// (not hexadecimal!) addresses, e.g. "12345678,12345679,12345680". +static void WritePHCStackTrace(AnnotationWriter& aWriter, + const Annotation aName, + const phc::StackTrace* aStack) { + // 21 is the max length of a 64-bit decimal address entry, including the + // trailing comma or '\0'. And then we add another 32 just to be safe. + char addrsString[mozilla::phc::StackTrace::kMaxFrames * 21 + 32]; + char addrString[32]; + char* p = addrsString; + *p = 0; + for (size_t i = 0; i < aStack->mLength; i++) { + if (i != 0) { + strcat(addrsString, ","); + p++; + } + XP_STOA(uintptr_t(aStack->mPcs[i]), addrString); + strcat(addrsString, addrString); + } + aWriter.Write(aName, addrsString); +} + +static void WritePHCAddrInfo(AnnotationWriter& writer, + const phc::AddrInfo* aAddrInfo) { + // Is this a PHC allocation needing special treatment? + if (aAddrInfo && aAddrInfo->mKind != phc::AddrInfo::Kind::Unknown) { + const char* kindString; + switch (aAddrInfo->mKind) { + case phc::AddrInfo::Kind::Unknown: + kindString = "Unknown(?!)"; + break; + case phc::AddrInfo::Kind::NeverAllocatedPage: + kindString = "NeverAllocatedPage"; + break; + case phc::AddrInfo::Kind::InUsePage: + kindString = "InUsePage(?!)"; + break; + case phc::AddrInfo::Kind::FreedPage: + kindString = "FreedPage"; + break; + case phc::AddrInfo::Kind::GuardPage: + kindString = "GuardPage"; + break; + default: + kindString = "Unmatched(?!)"; + break; + } + writer.Write(Annotation::PHCKind, kindString); + + char baseAddrString[32]; + XP_STOA(uintptr_t(aAddrInfo->mBaseAddr), baseAddrString); + writer.Write(Annotation::PHCBaseAddress, baseAddrString); + + char usableSizeString[32]; + XP_TTOA(aAddrInfo->mUsableSize, usableSizeString); + writer.Write(Annotation::PHCUsableSize, usableSizeString); + + WritePHCStackTrace(writer, Annotation::PHCAllocStack, + &aAddrInfo->mAllocStack); + WritePHCStackTrace(writer, Annotation::PHCFreeStack, + &aAddrInfo->mFreeStack); + } +} +#endif + /** * If minidump_id is null, we assume that dump_path contains the full * dump file path. @@ -885,6 +951,7 @@ static void WriteMozCrashReason(AnnotationWriter& aWriter) { } static void WriteAnnotationsForMainProcessCrash(PlatformWriter& pw, + const phc::AddrInfo* addrInfo, time_t crashTime) { INIAnnotationWriter writer(pw); for (auto key : MakeEnumeratedRange(Annotation::Count)) { @@ -961,6 +1028,10 @@ static void WriteAnnotationsForMainProcessCrash(PlatformWriter& pw, writer.Write(Annotation::ContainsMemoryReport, "1"); } +#ifdef MOZ_PHC + WritePHCAddrInfo(writer, addrInfo); +#endif + std::function getThreadAnnotationCB = [&](const char* aValue) -> void { if (aValue) { @@ -971,6 +1042,7 @@ static void WriteAnnotationsForMainProcessCrash(PlatformWriter& pw, } static void WriteCrashEventFile(time_t crashTime, const char* crashTimeString, + const phc::AddrInfo* addrInfo, #ifdef XP_LINUX const MinidumpDescriptor& descriptor #else @@ -1013,7 +1085,7 @@ static void WriteCrashEventFile(time_t crashTime, const char* crashTimeString, WriteLiteral(eventFile, "\n"); WriteString(eventFile, id_ascii); WriteLiteral(eventFile, "\n"); - WriteAnnotationsForMainProcessCrash(eventFile, crashTime); + WriteAnnotationsForMainProcessCrash(eventFile, addrInfo, crashTime); } } @@ -1033,7 +1105,7 @@ bool MinidumpCallback( #ifdef XP_WIN EXCEPTION_POINTERS* exinfo, MDRawAssertionInfo* assertion, #endif - bool succeeded) { + const phc::AddrInfo* addrInfo, bool succeeded) { bool returnValue = showOSCrashReporter ? false : succeeded; static XP_CHAR minidumpPath[XP_PATH_MAX]; @@ -1086,7 +1158,7 @@ bool MinidumpCallback( WriteString(lastCrashFile, crashTimeString); } - WriteCrashEventFile(crashTime, crashTimeString, + WriteCrashEventFile(crashTime, crashTimeString, addrInfo, #ifdef XP_LINUX descriptor #else @@ -1100,7 +1172,7 @@ bool MinidumpCallback( #else OpenAPIData(apiData, dump_path, minidump_id); #endif - WriteAnnotationsForMainProcessCrash(apiData, crashTime); + WriteAnnotationsForMainProcessCrash(apiData, addrInfo, crashTime); if (!doReport) { #ifdef XP_WIN @@ -1220,7 +1292,8 @@ static bool BuildTempPath(PathStringT& aResult) { return true; } -static void PrepareChildExceptionTimeAnnotations(void* context) { +static void PrepareChildExceptionTimeAnnotations( + void* context, const phc::AddrInfo* addrInfo) { MOZ_ASSERT(!XRE_IsParentProcess()); FileHandle f; @@ -1245,6 +1318,10 @@ static void PrepareChildExceptionTimeAnnotations(void* context) { WriteMozCrashReason(writer); +#ifdef MOZ_PHC + WritePHCAddrInfo(writer, addrInfo); +#endif + std::function getThreadAnnotationCB = [&](const char* aValue) -> void { if (aValue) { @@ -1275,6 +1352,7 @@ static void FreeBreakpadVM() { * Also calls FreeBreakpadVM if appropriate. */ static bool FPEFilter(void* context, EXCEPTION_POINTERS* exinfo, + const phc::AddrInfo* addr_info, MDRawAssertionInfo* assertion) { if (!exinfo) { mozilla::IOInterposer::Disable(); @@ -1307,10 +1385,11 @@ static bool FPEFilter(void* context, EXCEPTION_POINTERS* exinfo, } static bool ChildFPEFilter(void* context, EXCEPTION_POINTERS* exinfo, + const phc::AddrInfo* addrInfo, MDRawAssertionInfo* assertion) { - bool result = FPEFilter(context, exinfo, assertion); + bool result = FPEFilter(context, exinfo, addrInfo, assertion); if (result) { - PrepareChildExceptionTimeAnnotations(context); + PrepareChildExceptionTimeAnnotations(context, addrInfo); } return result; } @@ -1364,14 +1443,14 @@ static bool ShouldReport() { #if !defined(XP_WIN) -static bool Filter(void* context) { +static bool Filter(void* context, const phc::AddrInfo* addrInfo) { mozilla::IOInterposer::Disable(); return true; } -static bool ChildFilter(void* context) { +static bool ChildFilter(void* context, const phc::AddrInfo* addrInfo) { mozilla::IOInterposer::Disable(); - PrepareChildExceptionTimeAnnotations(context); + PrepareChildExceptionTimeAnnotations(context, addrInfo); return true; } @@ -3310,7 +3389,7 @@ static bool PairedDumpCallback( #ifdef XP_WIN EXCEPTION_POINTERS* /*unused*/, MDRawAssertionInfo* /*unused*/, #endif - bool succeeded) { + const phc::AddrInfo* addrInfo, bool succeeded) { nsCOMPtr& minidump = *static_cast*>(context); xpstring path; diff --git a/toolkit/crashreporter/test/CrashTestUtils.jsm b/toolkit/crashreporter/test/CrashTestUtils.jsm index 42fdeac252c1..4c22437fac49 100644 --- a/toolkit/crashreporter/test/CrashTestUtils.jsm +++ b/toolkit/crashreporter/test/CrashTestUtils.jsm @@ -31,6 +31,8 @@ var CrashTestUtils = { CRASH_X64CFI_SAVE_XMM128_FAR: 18, CRASH_X64CFI_EPILOG: 19, CRASH_X64CFI_EOF: 20, + CRASH_PHC_USE_AFTER_FREE: 21, + CRASH_PHC_DOUBLE_FREE: 22, // Constants for dumpHasStream() // From google_breakpad/common/minidump_format.h diff --git a/toolkit/crashreporter/test/moz.build b/toolkit/crashreporter/test/moz.build index 85a6ebf992ff..157551323cf9 100755 --- a/toolkit/crashreporter/test/moz.build +++ b/toolkit/crashreporter/test/moz.build @@ -5,7 +5,16 @@ # file, You can obtain one at http://mozilla.org/MPL/2.0/. FINAL_TARGET = '_tests/xpcshell/toolkit/crashreporter/test' -XPCSHELL_TESTS_MANIFESTS += ['unit/xpcshell.ini', 'unit_ipc/xpcshell.ini'] +XPCSHELL_TESTS_MANIFESTS += [ + 'unit/xpcshell.ini', + 'unit_ipc/xpcshell.ini' +] +if CONFIG['MOZ_PHC']: + XPCSHELL_TESTS_MANIFESTS += [ + 'unit/xpcshell-phc.ini', + 'unit_ipc/xpcshell-phc.ini' + ] + BROWSER_CHROME_MANIFESTS += ['browser/browser.ini'] UNIFIED_SOURCES += [ @@ -41,6 +50,9 @@ else: '-fexceptions', ] +if CONFIG['MOZ_PHC']: + DEFINES['MOZ_PHC'] = True + GeckoSharedLibrary('testcrasher') DEFINES['SHARED_LIBRARY'] = '%s%s%s' % ( diff --git a/toolkit/crashreporter/test/nsTestCrasher.cpp b/toolkit/crashreporter/test/nsTestCrasher.cpp index b6cb1c3130cd..0f944d6abca6 100644 --- a/toolkit/crashreporter/test/nsTestCrasher.cpp +++ b/toolkit/crashreporter/test/nsTestCrasher.cpp @@ -12,6 +12,10 @@ # include #endif +#ifdef MOZ_PHC +# include "replace_malloc_bridge.h" +#endif + /* * This pure virtual call example is from MSDN */ @@ -77,6 +81,8 @@ const int16_t CRASH_X64CFI_SAVE_XMM128 = 17; const int16_t CRASH_X64CFI_SAVE_XMM128_FAR = 18; const int16_t CRASH_X64CFI_EPILOG = 19; const int16_t CRASH_X64CFI_EOF = 20; +const int16_t CRASH_PHC_USE_AFTER_FREE = 21; +const int16_t CRASH_PHC_DOUBLE_FREE = 22; #if XP_WIN && HAVE_64BIT_BUILD && defined(_M_X64) && !defined(__MINGW32__) @@ -118,6 +124,21 @@ void MOZ_NEVER_INLINE ReserveStack() { #endif // XP_WIN && HAVE_64BIT_BUILD +#ifdef MOZ_PHC +char* GetPHCAllocation(size_t aSize) { + // A crude but effective way to get a PHC allocation. + for (int i = 0; i < 2000000; i++) { + char* p = (char*)malloc(aSize); + if (ReplaceMalloc::IsPHCAllocation(p, nullptr)) { + return p; + } + free(p); + } + // This failure doesn't seem to occur in practice... + MOZ_CRASH("failed to get a PHC allocation"); +} +#endif + extern "C" NS_EXPORT void Crash(int16_t how) { switch (how) { case CRASH_INVALID_POINTER_DEREF: { @@ -170,6 +191,22 @@ extern "C" NS_EXPORT void Crash(int16_t how) { break; } #endif // XP_WIN && HAVE_64BIT_BUILD && !defined(__MINGW32__) +#ifdef MOZ_PHC + case CRASH_PHC_USE_AFTER_FREE: { + // Do a UAF, triggering a crash. + char* p = GetPHCAllocation(32); + free(p); + *p = 0; + // not reached + } + case CRASH_PHC_DOUBLE_FREE: { + // Do a double free, triggering a crash. + char* p = GetPHCAllocation(64); + free(p); + free(p); + // not reached + } +#endif default: break; } diff --git a/toolkit/crashreporter/test/unit/test_crash_phc.js b/toolkit/crashreporter/test/unit/test_crash_phc.js new file mode 100644 index 000000000000..d07c5ce9ca7c --- /dev/null +++ b/toolkit/crashreporter/test/unit/test_crash_phc.js @@ -0,0 +1,43 @@ +function check(extra, size) { + Assert.equal(extra.PHCKind, "FreedPage"); + + // This is a string holding a decimal address. + Assert.ok(/^\d+$/.test(extra.PHCBaseAddress)); + + Assert.equal(extra.PHCUsableSize, size); + + // These are strings holding comma-separated lists of decimal addresses. + Assert.ok(/^(\d+,)+\d+$/.test(extra.PHCAllocStack)); + Assert.ok(/^(\d+,)+\d+$/.test(extra.PHCFreeStack)); +} + +function run_test() { + if (!("@mozilla.org/toolkit/crash-reporter;1" in Cc)) { + dump( + "INFO | test_crash_phc.js | Can't test crashreporter in a non-libxul build.\n" + ); + return; + } + + do_crash( + function() { + crashType = CrashTestUtils.CRASH_PHC_USE_AFTER_FREE; + }, + function(mdump, extra) { + // CRASH_PHC_USE_AFTER_FREE uses 32 for the size. + check(extra, 32); + }, + true + ); + + do_crash( + function() { + crashType = CrashTestUtils.CRASH_PHC_DOUBLE_FREE; + }, + function(mdump, extra) { + // CRASH_PHC_DOUBLE_FREE uses 64 for the size. + check(extra, 64); + }, + true + ); +} diff --git a/toolkit/crashreporter/test/unit/xpcshell-phc.ini b/toolkit/crashreporter/test/unit/xpcshell-phc.ini new file mode 100644 index 000000000000..056bbd6fbeeb --- /dev/null +++ b/toolkit/crashreporter/test/unit/xpcshell-phc.ini @@ -0,0 +1,9 @@ +[DEFAULT] +head = head_crashreporter.js +skip-if = toolkit == 'android' || (os == "win" && processor == "aarch64") # 1536217 +support-files = + crasher_subprocess_head.js + crasher_subprocess_tail.js + +[test_crash_phc.js] + diff --git a/toolkit/crashreporter/test/unit_ipc/test_content_phc.js b/toolkit/crashreporter/test/unit_ipc/test_content_phc.js new file mode 100644 index 000000000000..65fe1e147c85 --- /dev/null +++ b/toolkit/crashreporter/test/unit_ipc/test_content_phc.js @@ -0,0 +1,30 @@ +/* import-globals-from ../unit/head_crashreporter.js */ +load("../unit/head_crashreporter.js"); + +function run_test() { + if (!("@mozilla.org/toolkit/crash-reporter;1" in Cc)) { + dump( + "INFO | test_content_phc.js | Can't test crashreporter in a non-libxul build.\n" + ); + return; + } + + do_content_crash( + function() { + crashType = CrashTestUtils.CRASH_PHC_USE_AFTER_FREE; + }, + function(mdump, extra) { + Assert.equal(extra.PHCKind, "FreedPage"); + + // This is a string holding a decimal address. + Assert.ok(/^\d+$/.test(extra.PHCBaseAddress)); + + // CRASH_PHC_USE_AFTER_FREE uses 32 for the size. + Assert.equal(extra.PHCUsableSize, 32); + + // These are strings holding comma-separated lists of decimal addresses. + Assert.ok(/^(\d+,)+\d+$/.test(extra.PHCAllocStack)); + Assert.ok(/^(\d+,)+\d+$/.test(extra.PHCFreeStack)); + } + ); +} diff --git a/toolkit/crashreporter/test/unit_ipc/test_content_phc2.js b/toolkit/crashreporter/test/unit_ipc/test_content_phc2.js new file mode 100644 index 000000000000..6bde22b2e2fa --- /dev/null +++ b/toolkit/crashreporter/test/unit_ipc/test_content_phc2.js @@ -0,0 +1,33 @@ +/* import-globals-from ../unit/head_crashreporter.js */ +load("../unit/head_crashreporter.js"); + +function run_test() { + if (!("@mozilla.org/toolkit/crash-reporter;1" in Cc)) { + dump( + "INFO | test_content_phc.js | Can't test crashreporter in a non-libxul build.\n" + ); + return; + } + + // For some unknown reason, having two do_content_crash() calls in a single + // test doesn't work. That explains why this test exists separately from + // test_content_phc.js. + do_content_crash( + function() { + crashType = CrashTestUtils.CRASH_PHC_DOUBLE_FREE; + }, + function(mdump, extra) { + Assert.equal(extra.PHCKind, "FreedPage"); + + // This is a string holding a decimal address. + Assert.ok(/^\d+$/.test(extra.PHCBaseAddress)); + + // CRASH_PHC_DOUBLE_FREE uses 64 for the size. + Assert.equal(extra.PHCUsableSize, 64); + + // These are strings holding comma-separated lists of decimal addresses. + Assert.ok(/^(\d+,)+\d+$/.test(extra.PHCAllocStack)); + Assert.ok(/^(\d+,)+\d+$/.test(extra.PHCFreeStack)); + } + ); +} diff --git a/toolkit/crashreporter/test/unit_ipc/xpcshell-phc.ini b/toolkit/crashreporter/test/unit_ipc/xpcshell-phc.ini new file mode 100644 index 000000000000..bbaa332b81ff --- /dev/null +++ b/toolkit/crashreporter/test/unit_ipc/xpcshell-phc.ini @@ -0,0 +1,10 @@ +[DEFAULT] +head = +skip-if = toolkit == 'android' || (os == "win" && processor == "aarch64") # 1536217 +support-files = + !/toolkit/crashreporter/test/unit/crasher_subprocess_head.js + !/toolkit/crashreporter/test/unit/crasher_subprocess_tail.js + !/toolkit/crashreporter/test/unit/head_crashreporter.js + +[test_content_phc.js] +[test_content_phc2.js]