mirror of
https://github.com/capstone-engine/llvm-capstone.git
synced 2024-12-04 12:15:46 +00:00
[Support] Support NetBSD PaX MPROTECT in sys::Memory.
Removes AllocateRWX, setWritable and setExecutable from sys::Memory and standardizes on allocateMappedMemory / protectMappedMemory. The allocateMappedMemory method is updated to request full permissions for memory blocks so that they can be marked executable later. llvm-svn: 318464
This commit is contained in:
parent
854a8743e8
commit
afcb70d031
@ -109,51 +109,10 @@ namespace sys {
|
||||
static std::error_code protectMappedMemory(const MemoryBlock &Block,
|
||||
unsigned Flags);
|
||||
|
||||
/// This method allocates a block of Read/Write/Execute memory that is
|
||||
/// suitable for executing dynamically generated code (e.g. JIT). An
|
||||
/// attempt to allocate \p NumBytes bytes of virtual memory is made.
|
||||
/// \p NearBlock may point to an existing allocation in which case
|
||||
/// an attempt is made to allocate more memory near the existing block.
|
||||
///
|
||||
/// On success, this returns a non-null memory block, otherwise it returns
|
||||
/// a null memory block and fills in *ErrMsg.
|
||||
///
|
||||
/// @brief Allocate Read/Write/Execute memory.
|
||||
static MemoryBlock AllocateRWX(size_t NumBytes,
|
||||
const MemoryBlock *NearBlock,
|
||||
std::string *ErrMsg = nullptr);
|
||||
|
||||
/// This method releases a block of Read/Write/Execute memory that was
|
||||
/// allocated with the AllocateRWX method. It should not be used to
|
||||
/// release any memory block allocated any other way.
|
||||
///
|
||||
/// On success, this returns false, otherwise it returns true and fills
|
||||
/// in *ErrMsg.
|
||||
/// @brief Release Read/Write/Execute memory.
|
||||
static bool ReleaseRWX(MemoryBlock &block, std::string *ErrMsg = nullptr);
|
||||
|
||||
/// InvalidateInstructionCache - Before the JIT can run a block of code
|
||||
/// that has been emitted it must invalidate the instruction cache on some
|
||||
/// platforms.
|
||||
static void InvalidateInstructionCache(const void *Addr, size_t Len);
|
||||
|
||||
/// setExecutable - Before the JIT can run a block of code, it has to be
|
||||
/// given read and executable privilege. Return true if it is already r-x
|
||||
/// or the system is able to change its previlege.
|
||||
static bool setExecutable(MemoryBlock &M, std::string *ErrMsg = nullptr);
|
||||
|
||||
/// setWritable - When adding to a block of code, the JIT may need
|
||||
/// to mark a block of code as RW since the protections are on page
|
||||
/// boundaries, and the JIT internal allocations are not page aligned.
|
||||
static bool setWritable(MemoryBlock &M, std::string *ErrMsg = nullptr);
|
||||
|
||||
/// setRangeExecutable - Mark the page containing a range of addresses
|
||||
/// as executable.
|
||||
static bool setRangeExecutable(const void *Addr, size_t Size);
|
||||
|
||||
/// setRangeWritable - Mark the page containing a range of addresses
|
||||
/// as writable.
|
||||
static bool setRangeWritable(const void *Addr, size_t Size);
|
||||
};
|
||||
|
||||
/// Owning version of MemoryBlock.
|
||||
|
@ -102,6 +102,10 @@ Memory::allocateMappedMemory(size_t NumBytes,
|
||||
|
||||
int Protect = getPosixProtectionFlags(PFlags);
|
||||
|
||||
#if defined(__NetBSD__) && defined(PROT_MPROTECT)
|
||||
Protect |= PROT_MPROTECT(PROT_READ | PROT_WRITE | PROT_EXEC);
|
||||
#endif
|
||||
|
||||
// Use any near hint and the page size to set a page-aligned starting address
|
||||
uintptr_t Start = NearBlock ? reinterpret_cast<uintptr_t>(NearBlock->base()) +
|
||||
NearBlock->size() : 0;
|
||||
@ -166,129 +170,6 @@ Memory::protectMappedMemory(const MemoryBlock &M, unsigned Flags) {
|
||||
return std::error_code();
|
||||
}
|
||||
|
||||
/// AllocateRWX - Allocate a slab of memory with read/write/execute
|
||||
/// permissions. This is typically used for JIT applications where we want
|
||||
/// to emit code to the memory then jump to it. Getting this type of memory
|
||||
/// is very OS specific.
|
||||
///
|
||||
MemoryBlock
|
||||
Memory::AllocateRWX(size_t NumBytes, const MemoryBlock* NearBlock,
|
||||
std::string *ErrMsg) {
|
||||
if (NumBytes == 0) return MemoryBlock();
|
||||
|
||||
static const size_t PageSize = Process::getPageSize();
|
||||
size_t NumPages = (NumBytes+PageSize-1)/PageSize;
|
||||
|
||||
int fd = -1;
|
||||
|
||||
int flags = MAP_PRIVATE |
|
||||
#ifdef MAP_ANONYMOUS
|
||||
MAP_ANONYMOUS
|
||||
#else
|
||||
MAP_ANON
|
||||
#endif
|
||||
;
|
||||
|
||||
void* start = NearBlock ? (unsigned char*)NearBlock->base() +
|
||||
NearBlock->size() : nullptr;
|
||||
|
||||
#if defined(__APPLE__) && (defined(__arm__) || defined(__arm64__))
|
||||
void *pa = ::mmap(start, PageSize*NumPages, PROT_READ|PROT_EXEC,
|
||||
flags, fd, 0);
|
||||
#elif defined(__NetBSD__) && defined(PROT_MPROTECT)
|
||||
void *pa =
|
||||
::mmap(start, PageSize * NumPages,
|
||||
PROT_READ | PROT_WRITE | PROT_MPROTECT(PROT_EXEC), flags, fd, 0);
|
||||
#else
|
||||
void *pa = ::mmap(start, PageSize*NumPages, PROT_READ|PROT_WRITE|PROT_EXEC,
|
||||
flags, fd, 0);
|
||||
#endif
|
||||
if (pa == MAP_FAILED) {
|
||||
if (NearBlock) //Try again without a near hint
|
||||
return AllocateRWX(NumBytes, nullptr);
|
||||
|
||||
MakeErrMsg(ErrMsg, "Can't allocate RWX Memory");
|
||||
return MemoryBlock();
|
||||
}
|
||||
|
||||
#if defined(__APPLE__) && (defined(__arm__) || defined(__arm64__))
|
||||
kern_return_t kr = vm_protect(mach_task_self(), (vm_address_t)pa,
|
||||
(vm_size_t)(PageSize*NumPages), 0,
|
||||
VM_PROT_READ | VM_PROT_EXECUTE | VM_PROT_COPY);
|
||||
if (KERN_SUCCESS != kr) {
|
||||
MakeErrMsg(ErrMsg, "vm_protect max RX failed");
|
||||
return MemoryBlock();
|
||||
}
|
||||
|
||||
kr = vm_protect(mach_task_self(), (vm_address_t)pa,
|
||||
(vm_size_t)(PageSize*NumPages), 0,
|
||||
VM_PROT_READ | VM_PROT_WRITE);
|
||||
if (KERN_SUCCESS != kr) {
|
||||
MakeErrMsg(ErrMsg, "vm_protect RW failed");
|
||||
return MemoryBlock();
|
||||
}
|
||||
#endif
|
||||
|
||||
MemoryBlock result;
|
||||
result.Address = pa;
|
||||
result.Size = NumPages*PageSize;
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
bool Memory::ReleaseRWX(MemoryBlock &M, std::string *ErrMsg) {
|
||||
if (M.Address == nullptr || M.Size == 0) return false;
|
||||
if (0 != ::munmap(M.Address, M.Size))
|
||||
return MakeErrMsg(ErrMsg, "Can't release RWX Memory");
|
||||
return false;
|
||||
}
|
||||
|
||||
bool Memory::setWritable (MemoryBlock &M, std::string *ErrMsg) {
|
||||
#if defined(__APPLE__) && (defined(__arm__) || defined(__arm64__))
|
||||
if (M.Address == 0 || M.Size == 0) return false;
|
||||
Memory::InvalidateInstructionCache(M.Address, M.Size);
|
||||
kern_return_t kr = vm_protect(mach_task_self(), (vm_address_t)M.Address,
|
||||
(vm_size_t)M.Size, 0, VM_PROT_READ | VM_PROT_WRITE);
|
||||
return KERN_SUCCESS == kr;
|
||||
#else
|
||||
return true;
|
||||
#endif
|
||||
}
|
||||
|
||||
bool Memory::setExecutable (MemoryBlock &M, std::string *ErrMsg) {
|
||||
if (M.Address == nullptr || M.Size == 0) return false;
|
||||
Memory::InvalidateInstructionCache(M.Address, M.Size);
|
||||
#if defined(__APPLE__) && (defined(__arm__) || defined(__arm64__))
|
||||
kern_return_t kr = vm_protect(mach_task_self(), (vm_address_t)M.Address,
|
||||
(vm_size_t)M.Size, 0, VM_PROT_READ | VM_PROT_EXECUTE | VM_PROT_COPY);
|
||||
return KERN_SUCCESS == kr;
|
||||
#else
|
||||
return true;
|
||||
#endif
|
||||
}
|
||||
|
||||
bool Memory::setRangeWritable(const void *Addr, size_t Size) {
|
||||
#if defined(__APPLE__) && (defined(__arm__) || defined(__arm64__))
|
||||
kern_return_t kr = vm_protect(mach_task_self(), (vm_address_t)Addr,
|
||||
(vm_size_t)Size, 0,
|
||||
VM_PROT_READ | VM_PROT_WRITE);
|
||||
return KERN_SUCCESS == kr;
|
||||
#else
|
||||
return true;
|
||||
#endif
|
||||
}
|
||||
|
||||
bool Memory::setRangeExecutable(const void *Addr, size_t Size) {
|
||||
#if defined(__APPLE__) && (defined(__arm__) || defined(__arm64__))
|
||||
kern_return_t kr = vm_protect(mach_task_self(), (vm_address_t)Addr,
|
||||
(vm_size_t)Size, 0,
|
||||
VM_PROT_READ | VM_PROT_EXECUTE | VM_PROT_COPY);
|
||||
return KERN_SUCCESS == kr;
|
||||
#else
|
||||
return true;
|
||||
#endif
|
||||
}
|
||||
|
||||
/// InvalidateInstructionCache - Before the JIT can run a block of code
|
||||
/// that has been emitted it must invalidate the instruction cache on some
|
||||
/// platforms.
|
||||
|
@ -160,85 +160,5 @@ void Memory::InvalidateInstructionCache(
|
||||
FlushInstructionCache(GetCurrentProcess(), Addr, Len);
|
||||
}
|
||||
|
||||
|
||||
MemoryBlock Memory::AllocateRWX(size_t NumBytes,
|
||||
const MemoryBlock *NearBlock,
|
||||
std::string *ErrMsg) {
|
||||
MemoryBlock MB;
|
||||
std::error_code EC;
|
||||
MB = allocateMappedMemory(NumBytes, NearBlock,
|
||||
MF_READ|MF_WRITE|MF_EXEC, EC);
|
||||
if (EC != std::error_code() && ErrMsg) {
|
||||
MakeErrMsg(ErrMsg, EC.message());
|
||||
}
|
||||
return MB;
|
||||
}
|
||||
|
||||
bool Memory::ReleaseRWX(MemoryBlock &M, std::string *ErrMsg) {
|
||||
std::error_code EC = releaseMappedMemory(M);
|
||||
if (EC == std::error_code())
|
||||
return false;
|
||||
MakeErrMsg(ErrMsg, EC.message());
|
||||
return true;
|
||||
}
|
||||
|
||||
static DWORD getProtection(const void *addr) {
|
||||
MEMORY_BASIC_INFORMATION info;
|
||||
if (sizeof(info) == ::VirtualQuery(addr, &info, sizeof(info))) {
|
||||
return info.Protect;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
bool Memory::setWritable(MemoryBlock &M, std::string *ErrMsg) {
|
||||
if (!setRangeWritable(M.Address, M.Size)) {
|
||||
return MakeErrMsg(ErrMsg, "Cannot set memory to writeable");
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
bool Memory::setExecutable(MemoryBlock &M, std::string *ErrMsg) {
|
||||
if (!setRangeExecutable(M.Address, M.Size)) {
|
||||
return MakeErrMsg(ErrMsg, "Cannot set memory to executable");
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
bool Memory::setRangeWritable(const void *Addr, size_t Size) {
|
||||
DWORD prot = getProtection(Addr);
|
||||
if (!prot)
|
||||
return false;
|
||||
|
||||
if (prot == PAGE_EXECUTE || prot == PAGE_EXECUTE_READ) {
|
||||
prot = PAGE_EXECUTE_READWRITE;
|
||||
} else if (prot == PAGE_NOACCESS || prot == PAGE_READONLY) {
|
||||
prot = PAGE_READWRITE;
|
||||
}
|
||||
|
||||
DWORD oldProt;
|
||||
Memory::InvalidateInstructionCache(Addr, Size);
|
||||
return ::VirtualProtect(const_cast<LPVOID>(Addr), Size, prot, &oldProt)
|
||||
== TRUE;
|
||||
}
|
||||
|
||||
bool Memory::setRangeExecutable(const void *Addr, size_t Size) {
|
||||
DWORD prot = getProtection(Addr);
|
||||
if (!prot)
|
||||
return false;
|
||||
|
||||
if (prot == PAGE_NOACCESS) {
|
||||
prot = PAGE_EXECUTE;
|
||||
} else if (prot == PAGE_READONLY) {
|
||||
prot = PAGE_EXECUTE_READ;
|
||||
} else if (prot == PAGE_READWRITE) {
|
||||
prot = PAGE_EXECUTE_READWRITE;
|
||||
}
|
||||
|
||||
DWORD oldProt;
|
||||
Memory::InvalidateInstructionCache(Addr, Size);
|
||||
return ::VirtualProtect(const_cast<LPVOID>(Addr), Size, prot, &oldProt)
|
||||
== TRUE;
|
||||
}
|
||||
|
||||
} // namespace sys
|
||||
} // namespace llvm
|
||||
|
@ -178,10 +178,14 @@ public:
|
||||
void deregisterEHFrames() override {}
|
||||
|
||||
void preallocateSlab(uint64_t Size) {
|
||||
std::string Err;
|
||||
sys::MemoryBlock MB = sys::Memory::AllocateRWX(Size, nullptr, &Err);
|
||||
std::error_code EC;
|
||||
sys::MemoryBlock MB =
|
||||
sys::Memory::allocateMappedMemory(Size, nullptr,
|
||||
sys::Memory::MF_READ |
|
||||
sys::Memory::MF_WRITE,
|
||||
EC);
|
||||
if (!MB.base())
|
||||
report_fatal_error("Can't allocate enough memory: " + Err);
|
||||
report_fatal_error("Can't allocate enough memory: " + EC.message());
|
||||
|
||||
PreallocSlab = MB;
|
||||
UsePreallocation = true;
|
||||
@ -222,10 +226,14 @@ uint8_t *TrivialMemoryManager::allocateCodeSection(uintptr_t Size,
|
||||
if (UsePreallocation)
|
||||
return allocateFromSlab(Size, Alignment, true /* isCode */);
|
||||
|
||||
std::string Err;
|
||||
sys::MemoryBlock MB = sys::Memory::AllocateRWX(Size, nullptr, &Err);
|
||||
std::error_code EC;
|
||||
sys::MemoryBlock MB =
|
||||
sys::Memory::allocateMappedMemory(Size, nullptr,
|
||||
sys::Memory::MF_READ |
|
||||
sys::Memory::MF_WRITE,
|
||||
EC);
|
||||
if (!MB.base())
|
||||
report_fatal_error("MemoryManager allocation failed: " + Err);
|
||||
report_fatal_error("MemoryManager allocation failed: " + EC.message());
|
||||
FunctionMemory.push_back(MB);
|
||||
return (uint8_t*)MB.base();
|
||||
}
|
||||
@ -242,10 +250,14 @@ uint8_t *TrivialMemoryManager::allocateDataSection(uintptr_t Size,
|
||||
if (UsePreallocation)
|
||||
return allocateFromSlab(Size, Alignment, false /* isCode */);
|
||||
|
||||
std::string Err;
|
||||
sys::MemoryBlock MB = sys::Memory::AllocateRWX(Size, nullptr, &Err);
|
||||
std::error_code EC;
|
||||
sys::MemoryBlock MB =
|
||||
sys::Memory::allocateMappedMemory(Size, nullptr,
|
||||
sys::Memory::MF_READ |
|
||||
sys::Memory::MF_WRITE,
|
||||
EC);
|
||||
if (!MB.base())
|
||||
report_fatal_error("MemoryManager allocation failed: " + Err);
|
||||
report_fatal_error("MemoryManager allocation failed: " + EC.message());
|
||||
DataMemory.push_back(MB);
|
||||
return (uint8_t*)MB.base();
|
||||
}
|
||||
@ -453,9 +465,11 @@ static int executeInput() {
|
||||
|
||||
// Make sure the memory is executable.
|
||||
// setExecutable will call InvalidateInstructionCache.
|
||||
std::string ErrorStr;
|
||||
if (!sys::Memory::setExecutable(FM, &ErrorStr))
|
||||
ErrorAndExit("unable to mark function executable: '" + ErrorStr + "'");
|
||||
if (auto EC = sys::Memory::protectMappedMemory(FM,
|
||||
sys::Memory::MF_READ |
|
||||
sys::Memory::MF_EXEC))
|
||||
ErrorAndExit("unable to mark function executable: '" + EC.message() +
|
||||
"'");
|
||||
}
|
||||
|
||||
// Dispatch to _main().
|
||||
|
Loading…
Reference in New Issue
Block a user