mirror of
https://github.com/mozilla/gecko-dev.git
synced 2024-11-24 21:31:04 +00:00
Bug 1532470: Part 1 - Add ability to specify desired memory range when reserving memory; r=handyman
In order to support 4-byte patches on ARM64, we need to be able to reserve trampoline space within +/- 128 MB of the beginning of a function. These changes allow us to make such reservations using OS APIs when available. Differential Revision: https://phabricator.services.mozilla.com/D32190 --HG-- extra : moz-landing-system : lando
This commit is contained in:
parent
1e222b2b52
commit
2c8e5e76b3
@ -8,12 +8,66 @@
|
|||||||
#define mozilla_interceptor_MMPolicies_h
|
#define mozilla_interceptor_MMPolicies_h
|
||||||
|
|
||||||
#include "mozilla/Assertions.h"
|
#include "mozilla/Assertions.h"
|
||||||
|
#include "mozilla/DynamicallyLinkedFunctionPtr.h"
|
||||||
|
#include "mozilla/MathAlgorithms.h"
|
||||||
|
#include "mozilla/Maybe.h"
|
||||||
|
#include "mozilla/Span.h"
|
||||||
#include "mozilla/TypedEnumBits.h"
|
#include "mozilla/TypedEnumBits.h"
|
||||||
#include "mozilla/Types.h"
|
#include "mozilla/Types.h"
|
||||||
#include "mozilla/WindowsMapRemoteView.h"
|
#include "mozilla/WindowsMapRemoteView.h"
|
||||||
|
|
||||||
#include <windows.h>
|
#include <windows.h>
|
||||||
|
|
||||||
|
// MinGW does not have these definitions yet
|
||||||
|
#if defined(__MINGW32__)
|
||||||
|
typedef struct _MEM_ADDRESS_REQUIREMENTS {
|
||||||
|
PVOID LowestStartingAddress;
|
||||||
|
PVOID HighestEndingAddress;
|
||||||
|
SIZE_T Alignment;
|
||||||
|
} MEM_ADDRESS_REQUIREMENTS, *PMEM_ADDRESS_REQUIREMENTS;
|
||||||
|
|
||||||
|
typedef enum MEM_EXTENDED_PARAMETER_TYPE {
|
||||||
|
MemExtendedParameterInvalidType = 0,
|
||||||
|
MemExtendedParameterAddressRequirements,
|
||||||
|
MemExtendedParameterNumaNode,
|
||||||
|
MemExtendedParameterPartitionHandle,
|
||||||
|
MemExtendedParameterUserPhysicalHandle,
|
||||||
|
MemExtendedParameterAttributeFlags,
|
||||||
|
MemExtendedParameterMax
|
||||||
|
} MEM_EXTENDED_PARAMETER_TYPE, *PMEM_EXTENDED_PARAMETER_TYPE;
|
||||||
|
|
||||||
|
#define MEM_EXTENDED_PARAMETER_TYPE_BITS 8
|
||||||
|
|
||||||
|
typedef struct DECLSPEC_ALIGN(8) MEM_EXTENDED_PARAMETER {
|
||||||
|
|
||||||
|
struct {
|
||||||
|
DWORD64 Type : MEM_EXTENDED_PARAMETER_TYPE_BITS;
|
||||||
|
DWORD64 Reserved : 64 - MEM_EXTENDED_PARAMETER_TYPE_BITS;
|
||||||
|
} DUMMYSTRUCTNAME;
|
||||||
|
|
||||||
|
union {
|
||||||
|
DWORD64 ULong64;
|
||||||
|
PVOID Pointer;
|
||||||
|
SIZE_T Size;
|
||||||
|
HANDLE Handle;
|
||||||
|
DWORD ULong;
|
||||||
|
} DUMMYUNIONNAME;
|
||||||
|
|
||||||
|
} MEM_EXTENDED_PARAMETER, *PMEM_EXTENDED_PARAMETER;
|
||||||
|
#endif // defined(__MINGW32__)
|
||||||
|
|
||||||
|
#if (NTDDI_VERSION < NTDDI_WIN10_RS4) || defined(__MINGW32__)
|
||||||
|
PVOID WINAPI VirtualAlloc2(HANDLE Process, PVOID BaseAddress, SIZE_T Size,
|
||||||
|
ULONG AllocationType, ULONG PageProtection,
|
||||||
|
MEM_EXTENDED_PARAMETER* ExtendedParameters,
|
||||||
|
ULONG ParameterCount);
|
||||||
|
PVOID WINAPI MapViewOfFile3(HANDLE FileMapping, HANDLE Process,
|
||||||
|
PVOID BaseAddress, ULONG64 Offset, SIZE_T ViewSize,
|
||||||
|
ULONG AllocationType, ULONG PageProtection,
|
||||||
|
MEM_EXTENDED_PARAMETER* ExtendedParameters,
|
||||||
|
ULONG ParameterCount);
|
||||||
|
#endif // (NTDDI_VERSION < NTDDI_WIN10_RS4) || defined(__MINGW32__)
|
||||||
|
|
||||||
// _CRT_RAND_S is not defined everywhere, but we need it.
|
// _CRT_RAND_S is not defined everywhere, but we need it.
|
||||||
#if !defined(_CRT_RAND_S)
|
#if !defined(_CRT_RAND_S)
|
||||||
extern "C" errno_t rand_s(unsigned int* randomValue);
|
extern "C" errno_t rand_s(unsigned int* randomValue);
|
||||||
@ -22,14 +76,22 @@ extern "C" errno_t rand_s(unsigned int* randomValue);
|
|||||||
namespace mozilla {
|
namespace mozilla {
|
||||||
namespace interceptor {
|
namespace interceptor {
|
||||||
|
|
||||||
enum class ReservationFlags : uint32_t {
|
class MOZ_TRIVIAL_CTOR_DTOR MMPolicyBase {
|
||||||
eDefault = 0,
|
protected:
|
||||||
eForceFirst2GB = 1,
|
static uintptr_t AlignDown(const uintptr_t aUnaligned,
|
||||||
};
|
const uintptr_t aAlignTo) {
|
||||||
|
MOZ_ASSERT(IsPowerOfTwo(aAlignTo));
|
||||||
|
#pragma warning(suppress : 4146)
|
||||||
|
return aUnaligned & (-aAlignTo);
|
||||||
|
}
|
||||||
|
|
||||||
MOZ_MAKE_ENUM_CLASS_BITWISE_OPERATORS(ReservationFlags)
|
static uintptr_t AlignUp(const uintptr_t aUnaligned,
|
||||||
|
const uintptr_t aAlignTo) {
|
||||||
|
MOZ_ASSERT(IsPowerOfTwo(aAlignTo));
|
||||||
|
#pragma warning(suppress : 4146)
|
||||||
|
return aUnaligned + ((-aUnaligned) & (aAlignTo - 1));
|
||||||
|
}
|
||||||
|
|
||||||
class MMPolicyBase {
|
|
||||||
public:
|
public:
|
||||||
static DWORD ComputeAllocationSize(const uint32_t aRequestedSize) {
|
static DWORD ComputeAllocationSize(const uint32_t aRequestedSize) {
|
||||||
MOZ_ASSERT(aRequestedSize);
|
MOZ_ASSERT(aRequestedSize);
|
||||||
@ -65,45 +127,149 @@ class MMPolicyBase {
|
|||||||
return kPageSize;
|
return kPageSize;
|
||||||
}
|
}
|
||||||
|
|
||||||
#if defined(_M_X64)
|
static uintptr_t GetMaxUserModeAddress() {
|
||||||
|
static const uintptr_t kMaxUserModeAddr = []() -> uintptr_t {
|
||||||
|
SYSTEM_INFO sysInfo;
|
||||||
|
::GetSystemInfo(&sysInfo);
|
||||||
|
return reinterpret_cast<uintptr_t>(sysInfo.lpMaximumApplicationAddress);
|
||||||
|
}();
|
||||||
|
|
||||||
|
return kMaxUserModeAddr;
|
||||||
|
}
|
||||||
|
|
||||||
|
static const uint8_t* GetLowerBound(const Span<const uint8_t>& aBounds) {
|
||||||
|
return &(*aBounds.cbegin());
|
||||||
|
}
|
||||||
|
|
||||||
|
static const uint8_t* GetUpperBoundIncl(const Span<const uint8_t>& aBounds) {
|
||||||
|
// We return an upper bound that is inclusive.
|
||||||
|
return &(*(aBounds.cend() - 1));
|
||||||
|
}
|
||||||
|
|
||||||
|
static const uint8_t* GetUpperBoundExcl(const Span<const uint8_t>& aBounds) {
|
||||||
|
// We return an upper bound that is exclusive by adding 1 to the inclusive
|
||||||
|
// upper bound.
|
||||||
|
return GetUpperBoundIncl(aBounds) + 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* It is convenient for us to provide address range information based on a
|
||||||
|
* "pivot" and a distance from that pivot, as branch instructions operate
|
||||||
|
* within a range of the program counter. OTOH, to actually manage the
|
||||||
|
* regions of memory, it is easier to think about them in terms of their
|
||||||
|
* lower and upper bounds. This function converts from the former format to
|
||||||
|
* the latter format.
|
||||||
|
*/
|
||||||
|
static Maybe<Span<const uint8_t>> SpanFromPivotAndDistance(
|
||||||
|
const uint32_t aSize, const uintptr_t aPivotAddr,
|
||||||
|
const uint32_t aMaxDistanceFromPivot) {
|
||||||
|
if (!aPivotAddr || !aMaxDistanceFromPivot) {
|
||||||
|
return Nothing();
|
||||||
|
}
|
||||||
|
|
||||||
|
// We don't allow regions below 1MB so that we're not allocating near any
|
||||||
|
// sensitive areas in our address space.
|
||||||
|
const uintptr_t kMinAllowableAddress = 0x100000;
|
||||||
|
|
||||||
|
const uintptr_t kGranularity(GetAllocGranularity());
|
||||||
|
|
||||||
|
// We subtract the max distance from the pivot to determine our lower bound.
|
||||||
|
CheckedInt<uintptr_t> lowerBound(aPivotAddr);
|
||||||
|
lowerBound -= aMaxDistanceFromPivot;
|
||||||
|
if (lowerBound.isValid()) {
|
||||||
|
// In this case, the subtraction has not underflowed, but we still want
|
||||||
|
// the lower bound to be at least kMinAllowableAddress.
|
||||||
|
lowerBound = std::max(lowerBound.value(), kMinAllowableAddress);
|
||||||
|
} else {
|
||||||
|
// In this case, we underflowed. Forcibly set the lower bound to
|
||||||
|
// kMinAllowableAddress.
|
||||||
|
lowerBound = CheckedInt<uintptr_t>(kMinAllowableAddress);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Align up to the next unit of allocation granularity when necessary.
|
||||||
|
lowerBound = AlignUp(lowerBound.value(), kGranularity);
|
||||||
|
MOZ_ASSERT(lowerBound.isValid());
|
||||||
|
if (!lowerBound.isValid()) {
|
||||||
|
return Nothing();
|
||||||
|
}
|
||||||
|
|
||||||
|
// We must ensure that our region is below the maximum allowable user-mode
|
||||||
|
// address, or our reservation will fail.
|
||||||
|
const uintptr_t kMaxUserModeAddr = GetMaxUserModeAddress();
|
||||||
|
|
||||||
|
// We add the max distance from the pivot to determine our upper bound.
|
||||||
|
CheckedInt<uintptr_t> upperBound(aPivotAddr);
|
||||||
|
upperBound += aMaxDistanceFromPivot;
|
||||||
|
if (upperBound.isValid()) {
|
||||||
|
// In this case, the addition has not overflowed, but we still want
|
||||||
|
// the upper bound to be at most kMaxUserModeAddr.
|
||||||
|
upperBound = std::min(upperBound.value(), kMaxUserModeAddr);
|
||||||
|
} else {
|
||||||
|
// In this case, we overflowed. Forcibly set the upper bound to
|
||||||
|
// kMaxUserModeAddr.
|
||||||
|
upperBound = CheckedInt<uintptr_t>(kMaxUserModeAddr);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Subtract the desired allocation size so that any chunk allocated in the
|
||||||
|
// region will be reachable.
|
||||||
|
upperBound -= aSize;
|
||||||
|
if (!upperBound.isValid()) {
|
||||||
|
return Nothing();
|
||||||
|
}
|
||||||
|
|
||||||
|
// Align down to the next unit of allocation granularity when necessary.
|
||||||
|
upperBound = AlignDown(upperBound.value(), kGranularity);
|
||||||
|
if (!upperBound.isValid()) {
|
||||||
|
return Nothing();
|
||||||
|
}
|
||||||
|
|
||||||
|
MOZ_ASSERT(lowerBound.value() < upperBound.value());
|
||||||
|
if (lowerBound.value() >= upperBound.value()) {
|
||||||
|
return Nothing();
|
||||||
|
}
|
||||||
|
|
||||||
|
// Return the result as a Span
|
||||||
|
return Some(MakeSpan(reinterpret_cast<const uint8_t*>(lowerBound.value()),
|
||||||
|
upperBound.value() - lowerBound.value()));
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* This function locates a virtual memory region of |aDesiredBytesLen| that
|
* This function locates a virtual memory region of |aDesiredBytesLen| that
|
||||||
* resides in the lowest 2GB of address space. We do this by scanning the
|
* resides in the interval [aRangeMin, aRangeMax). We do this by scanning the
|
||||||
* virtual memory space for a block of unallocated memory that is sufficiently
|
* virtual memory space for a block of unallocated memory that is sufficiently
|
||||||
* large. We must stay below the 2GB mark because a 10-byte patch uses movsxd
|
* large.
|
||||||
* (ie, sign extension) to expand the pointer to 64-bits, so bit 31 of the
|
|
||||||
* found region must be 0.
|
|
||||||
*/
|
*/
|
||||||
static PVOID FindLowRegion(HANDLE aProcess, const size_t aDesiredBytesLen) {
|
static PVOID FindRegion(HANDLE aProcess, const size_t aDesiredBytesLen,
|
||||||
const DWORD granularity = GetAllocGranularity();
|
const uint8_t* aRangeMin, const uint8_t* aRangeMax) {
|
||||||
|
const DWORD kGranularity = GetAllocGranularity();
|
||||||
MOZ_ASSERT(aDesiredBytesLen / granularity > 0);
|
MOZ_ASSERT(aDesiredBytesLen >= kGranularity);
|
||||||
if (!aDesiredBytesLen) {
|
if (!aDesiredBytesLen) {
|
||||||
return nullptr;
|
return nullptr;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
MOZ_ASSERT(aRangeMin < aRangeMax);
|
||||||
|
if (aRangeMin >= aRangeMax) {
|
||||||
|
return nullptr;
|
||||||
|
}
|
||||||
|
|
||||||
// Generate a randomized base address that falls within the interval
|
// Generate a randomized base address that falls within the interval
|
||||||
// [1MiB, 2GiB - aDesiredBytesLen]
|
// [aRangeMin, aRangeMax - aDesiredBytesLen]
|
||||||
unsigned int rnd = 0;
|
unsigned int rnd = 0;
|
||||||
rand_s(&rnd);
|
rand_s(&rnd);
|
||||||
|
|
||||||
// Reduce rnd to a value that falls within the acceptable range
|
// Reduce rnd to a value that falls within the acceptable range
|
||||||
const uint64_t kMinAddress = 0x0000000000100000ULL;
|
uintptr_t maxOffset =
|
||||||
const uint64_t kMaxAddress = 0x0000000080000000ULL;
|
(aRangeMax - aRangeMin - aDesiredBytesLen) / kGranularity;
|
||||||
uint64_t maxOffset =
|
uintptr_t offset = (uintptr_t(rnd) % maxOffset) * kGranularity;
|
||||||
(kMaxAddress - kMinAddress - aDesiredBytesLen) / granularity;
|
|
||||||
uint64_t offset = (uint64_t(rnd) % maxOffset) * granularity;
|
|
||||||
|
|
||||||
// Start searching at this address
|
// Start searching at this address
|
||||||
char* address = reinterpret_cast<char*>(kMinAddress) + offset;
|
const uint8_t* address = aRangeMin + offset;
|
||||||
// The max address needs to incorporate the desired length
|
// The max address needs to incorporate the desired length
|
||||||
char* const kMaxPtr =
|
const uint8_t* const kMaxPtr = aRangeMax - aDesiredBytesLen;
|
||||||
reinterpret_cast<char*>(kMaxAddress) - aDesiredBytesLen;
|
|
||||||
|
|
||||||
MOZ_DIAGNOSTIC_ASSERT(address <= kMaxPtr);
|
MOZ_DIAGNOSTIC_ASSERT(address <= kMaxPtr);
|
||||||
|
|
||||||
MEMORY_BASIC_INFORMATION mbi = {};
|
MEMORY_BASIC_INFORMATION mbi;
|
||||||
SIZE_T len = sizeof(mbi);
|
SIZE_T len = sizeof(mbi);
|
||||||
|
|
||||||
// Scan the range for a free chunk that is at least as large as
|
// Scan the range for a free chunk that is at least as large as
|
||||||
@ -114,55 +280,83 @@ class MMPolicyBase {
|
|||||||
return mbi.BaseAddress;
|
return mbi.BaseAddress;
|
||||||
}
|
}
|
||||||
|
|
||||||
address = reinterpret_cast<char*>(mbi.BaseAddress) + mbi.RegionSize;
|
address =
|
||||||
|
reinterpret_cast<const uint8_t*>(mbi.BaseAddress) + mbi.RegionSize;
|
||||||
}
|
}
|
||||||
|
|
||||||
return nullptr;
|
return nullptr;
|
||||||
}
|
}
|
||||||
|
|
||||||
#endif // defined(_M_X64)
|
/**
|
||||||
|
* This function reserves a |aSize| block of virtual memory.
|
||||||
template <typename ReserveFnT>
|
*
|
||||||
|
* When |aBounds| is Nothing, it just calls |aReserveFn| and lets Windows
|
||||||
|
* choose the base address.
|
||||||
|
*
|
||||||
|
* Otherwise, it tries to call |aReserveRangeFn| to reserve the memory within
|
||||||
|
* the bounds provided by |aBounds|. It is advantageous to use this function
|
||||||
|
* because the OS's VM manager has better information as to which base
|
||||||
|
* addresses are the best to use.
|
||||||
|
*
|
||||||
|
* If |aReserveRangeFn| retuns Nothing, this means that the platform support
|
||||||
|
* is not available. In that case, we fall back to manually computing a region
|
||||||
|
* to use for reserving the memory by calling |FindRegion|.
|
||||||
|
*/
|
||||||
|
template <typename ReserveFnT, typename ReserveRangeFnT>
|
||||||
static PVOID Reserve(HANDLE aProcess, const uint32_t aSize,
|
static PVOID Reserve(HANDLE aProcess, const uint32_t aSize,
|
||||||
const ReserveFnT& aReserveFn,
|
const ReserveFnT& aReserveFn,
|
||||||
const ReservationFlags aFlags) {
|
const ReserveRangeFnT& aReserveRangeFn,
|
||||||
#if defined(_M_X64)
|
const Maybe<Span<const uint8_t>>& aBounds) {
|
||||||
if (aFlags & ReservationFlags::eForceFirst2GB) {
|
if (!aBounds) {
|
||||||
size_t curAttempt = 0;
|
// No restrictions, let the OS choose the base address
|
||||||
const size_t kMaxAttempts = 8;
|
return aReserveFn(aProcess, nullptr, aSize);
|
||||||
|
}
|
||||||
|
|
||||||
// We loop here because |FindLowRegion| may return a base address that
|
const uint8_t* lowerBound = GetLowerBound(aBounds.ref());
|
||||||
// is reserved elsewhere before we have had a chance to reserve it
|
const uint8_t* upperBoundExcl = GetUpperBoundExcl(aBounds.ref());
|
||||||
// ourselves.
|
|
||||||
while (curAttempt < kMaxAttempts) {
|
|
||||||
PVOID base = FindLowRegion(aProcess, aSize);
|
|
||||||
if (!base) {
|
|
||||||
return nullptr;
|
|
||||||
}
|
|
||||||
|
|
||||||
PVOID result = aReserveFn(aProcess, base, aSize);
|
Maybe<PVOID> result =
|
||||||
if (result) {
|
aReserveRangeFn(aProcess, aSize, lowerBound, upperBoundExcl);
|
||||||
return result;
|
if (result) {
|
||||||
}
|
return result.value();
|
||||||
|
}
|
||||||
|
|
||||||
++curAttempt;
|
// aReserveRangeFn is not available on this machine. We'll do a manual
|
||||||
|
// search.
|
||||||
|
|
||||||
|
size_t curAttempt = 0;
|
||||||
|
const size_t kMaxAttempts = 8;
|
||||||
|
|
||||||
|
// We loop here because |FindRegion| may return a base address that
|
||||||
|
// is reserved elsewhere before we have had a chance to reserve it
|
||||||
|
// ourselves.
|
||||||
|
while (curAttempt < kMaxAttempts) {
|
||||||
|
PVOID base = FindRegion(aProcess, aSize, lowerBound, upperBoundExcl);
|
||||||
|
if (!base) {
|
||||||
|
return nullptr;
|
||||||
}
|
}
|
||||||
|
|
||||||
// If we run out of attempts, we fall through to the default case where
|
result = Some(aReserveFn(aProcess, base, aSize));
|
||||||
// the system chooses any base address it wants. In that case, the hook
|
if (result.value()) {
|
||||||
// will be set on a best-effort basis.
|
return result.value();
|
||||||
|
}
|
||||||
|
|
||||||
|
++curAttempt;
|
||||||
}
|
}
|
||||||
#endif // defined(_M_X64)
|
|
||||||
|
// If we run out of attempts, we fall through to the default case where
|
||||||
|
// the system chooses any base address it wants. In that case, the hook
|
||||||
|
// will be set on a best-effort basis.
|
||||||
|
|
||||||
return aReserveFn(aProcess, nullptr, aSize);
|
return aReserveFn(aProcess, nullptr, aSize);
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
class MMPolicyInProcess : public MMPolicyBase {
|
class MOZ_TRIVIAL_CTOR_DTOR MMPolicyInProcess : public MMPolicyBase {
|
||||||
public:
|
public:
|
||||||
typedef MMPolicyInProcess MMPolicyT;
|
typedef MMPolicyInProcess MMPolicyT;
|
||||||
|
|
||||||
explicit MMPolicyInProcess()
|
constexpr MMPolicyInProcess()
|
||||||
: mBase(nullptr), mReservationSize(0), mCommitOffset(0) {}
|
: mBase(nullptr), mReservationSize(0), mCommitOffset(0) {}
|
||||||
|
|
||||||
MMPolicyInProcess(const MMPolicyInProcess&) = delete;
|
MMPolicyInProcess(const MMPolicyInProcess&) = delete;
|
||||||
@ -186,9 +380,6 @@ class MMPolicyInProcess : public MMPolicyBase {
|
|||||||
return *this;
|
return *this;
|
||||||
}
|
}
|
||||||
|
|
||||||
// We always leak mBase
|
|
||||||
~MMPolicyInProcess() = default;
|
|
||||||
|
|
||||||
explicit operator bool() const { return !!mBase; }
|
explicit operator bool() const { return !!mBase; }
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@ -262,7 +453,8 @@ class MMPolicyInProcess : public MMPolicyBase {
|
|||||||
/**
|
/**
|
||||||
* @return the effective number of bytes reserved, or 0 on failure
|
* @return the effective number of bytes reserved, or 0 on failure
|
||||||
*/
|
*/
|
||||||
uint32_t Reserve(const uint32_t aSize, const ReservationFlags aFlags) {
|
uint32_t Reserve(const uint32_t aSize,
|
||||||
|
const Maybe<Span<const uint8_t>>& aBounds) {
|
||||||
if (!aSize) {
|
if (!aSize) {
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
@ -278,8 +470,31 @@ class MMPolicyInProcess : public MMPolicyBase {
|
|||||||
return ::VirtualAlloc(aBase, aSize, MEM_RESERVE, PAGE_NOACCESS);
|
return ::VirtualAlloc(aBase, aSize, MEM_RESERVE, PAGE_NOACCESS);
|
||||||
};
|
};
|
||||||
|
|
||||||
mBase = static_cast<uint8_t*>(MMPolicyBase::Reserve(
|
auto reserveWithinRangeFn =
|
||||||
::GetCurrentProcess(), mReservationSize, reserveFn, aFlags));
|
[](HANDLE aProcess, uint32_t aSize, const uint8_t* aRangeMin,
|
||||||
|
const uint8_t* aRangeMaxExcl) -> Maybe<PVOID> {
|
||||||
|
static const DynamicallyLinkedFunctionPtr<decltype(&::VirtualAlloc2)>
|
||||||
|
pVirtualAlloc2(L"kernelbase.dll", "VirtualAlloc2");
|
||||||
|
if (!pVirtualAlloc2) {
|
||||||
|
return Nothing();
|
||||||
|
}
|
||||||
|
|
||||||
|
// NB: MEM_ADDRESS_REQUIREMENTS::HighestEndingAddress is *inclusive*
|
||||||
|
MEM_ADDRESS_REQUIREMENTS memReq = {
|
||||||
|
const_cast<uint8_t*>(aRangeMin),
|
||||||
|
const_cast<uint8_t*>(aRangeMaxExcl - 1)};
|
||||||
|
|
||||||
|
MEM_EXTENDED_PARAMETER memParam = {};
|
||||||
|
memParam.Type = MemExtendedParameterAddressRequirements;
|
||||||
|
memParam.Pointer = &memReq;
|
||||||
|
|
||||||
|
return Some(pVirtualAlloc2(aProcess, nullptr, aSize, MEM_RESERVE,
|
||||||
|
PAGE_NOACCESS, &memParam, 1));
|
||||||
|
};
|
||||||
|
|
||||||
|
mBase = static_cast<uint8_t*>(
|
||||||
|
MMPolicyBase::Reserve(::GetCurrentProcess(), mReservationSize,
|
||||||
|
reserveFn, reserveWithinRangeFn, aBounds));
|
||||||
|
|
||||||
if (!mBase) {
|
if (!mBase) {
|
||||||
return 0;
|
return 0;
|
||||||
@ -471,7 +686,8 @@ class MMPolicyOutOfProcess : public MMPolicyBase {
|
|||||||
/**
|
/**
|
||||||
* @return the effective number of bytes reserved, or 0 on failure
|
* @return the effective number of bytes reserved, or 0 on failure
|
||||||
*/
|
*/
|
||||||
uint32_t Reserve(const uint32_t aSize, const ReservationFlags aFlags) {
|
uint32_t Reserve(const uint32_t aSize,
|
||||||
|
const Maybe<Span<const uint8_t>>& aBounds) {
|
||||||
if (!aSize || !mProcess) {
|
if (!aSize || !mProcess) {
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
@ -502,8 +718,31 @@ class MMPolicyOutOfProcess : public MMPolicyBase {
|
|||||||
PAGE_EXECUTE_READ);
|
PAGE_EXECUTE_READ);
|
||||||
};
|
};
|
||||||
|
|
||||||
mRemoteView =
|
auto reserveWithinRangeFn =
|
||||||
MMPolicyBase::Reserve(mProcess, mReservationSize, reserveFn, aFlags);
|
[mapping = mMapping](HANDLE aProcess, uint32_t aSize,
|
||||||
|
const uint8_t* aRangeMin,
|
||||||
|
const uint8_t* aRangeMaxExcl) -> Maybe<PVOID> {
|
||||||
|
static const DynamicallyLinkedFunctionPtr<decltype(&::MapViewOfFile3)>
|
||||||
|
pMapViewOfFile3(L"kernelbase.dll", "MapViewOfFile3");
|
||||||
|
if (!pMapViewOfFile3) {
|
||||||
|
return Nothing();
|
||||||
|
}
|
||||||
|
|
||||||
|
// NB: MEM_ADDRESS_REQUIREMENTS::HighestEndingAddress is *inclusive*
|
||||||
|
MEM_ADDRESS_REQUIREMENTS memReq = {
|
||||||
|
const_cast<uint8_t*>(aRangeMin),
|
||||||
|
const_cast<uint8_t*>(aRangeMaxExcl - 1)};
|
||||||
|
|
||||||
|
MEM_EXTENDED_PARAMETER memParam = {};
|
||||||
|
memParam.Type = MemExtendedParameterAddressRequirements;
|
||||||
|
memParam.Pointer = &memReq;
|
||||||
|
|
||||||
|
return Some(pMapViewOfFile3(mapping, aProcess, nullptr, 0, aSize, 0,
|
||||||
|
PAGE_EXECUTE_READ, &memParam, 1));
|
||||||
|
};
|
||||||
|
|
||||||
|
mRemoteView = MMPolicyBase::Reserve(mProcess, mReservationSize, reserveFn,
|
||||||
|
reserveWithinRangeFn, aBounds);
|
||||||
if (!mRemoteView) {
|
if (!mRemoteView) {
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
Loading…
Reference in New Issue
Block a user