mirror of
https://github.com/RPCS3/llvm-mirror.git
synced 2025-01-20 02:34:48 +00:00
On Darwin ARM, memory needs special handling to do JIT. This patch expands
this handling to work properly for modifying stub functions, relocations back to entry points after JIT compilation, etc.. llvm-svn: 57013
This commit is contained in:
parent
d9ff019d3e
commit
a49386d8e7
@ -35,6 +35,14 @@ public:
|
||||
/// JIT Memory Manager if the client does not provide one to the JIT.
|
||||
static JITMemoryManager *CreateDefaultMemManager();
|
||||
|
||||
/// setMemoryWritable - When code generation is in progress,
|
||||
/// the code pages may need permissions changed.
|
||||
virtual void setMemoryWritable(void) = 0;
|
||||
|
||||
/// setMemoryExecutable - When code generation is done and we're ready to
|
||||
/// start execution, the code pages may need permissions changed.
|
||||
virtual void setMemoryExecutable(void) = 0;
|
||||
|
||||
//===--------------------------------------------------------------------===//
|
||||
// Global Offset Table Management
|
||||
//===--------------------------------------------------------------------===//
|
||||
|
@ -70,10 +70,15 @@ namespace sys {
|
||||
/// platforms.
|
||||
static void InvalidateInstructionCache(const void *Addr, size_t Len);
|
||||
|
||||
/// SetRXPrivilege - Before the JIT can run a block of code, it has to be
|
||||
/// setExecutable - Before the JIT can run a block of code, it has to be
|
||||
/// given read and executable privilege. Return true if it is already r-x
|
||||
/// or the system is able to change its previlege.
|
||||
static bool SetRXPrivilege(const void *Addr, size_t Size);
|
||||
static bool setExecutable (MemoryBlock &M, std::string *ErrMsg = 0);
|
||||
|
||||
/// setWritable - When adding to a block of code, the JIT may need
|
||||
/// to mark a block of code as RW since the protections are on page
|
||||
/// boundaries, and the JIT internal allocations are not page aligned.
|
||||
static bool setWritable (MemoryBlock &M, std::string *ErrMsg = 0);
|
||||
};
|
||||
}
|
||||
}
|
||||
|
@ -560,6 +560,10 @@ namespace {
|
||||
if (ExceptionHandling) DE->setModuleInfo(Info);
|
||||
}
|
||||
|
||||
void setMemoryExecutable(void) {
|
||||
MemMgr->setMemoryExecutable();
|
||||
}
|
||||
|
||||
private:
|
||||
void *getPointerToGlobal(GlobalValue *GV, void *Reference, bool NoNeedStub);
|
||||
void *getPointerToGVLazyPtr(GlobalValue *V, void *Reference,
|
||||
@ -791,6 +795,8 @@ unsigned JITEmitter::GetSizeOfGlobalsInBytes(MachineFunction &MF) {
|
||||
|
||||
void JITEmitter::startFunction(MachineFunction &F) {
|
||||
uintptr_t ActualSize = 0;
|
||||
// Set the memory writable, if it's not already
|
||||
MemMgr->setMemoryWritable();
|
||||
if (MemMgr->NeedsExactSize()) {
|
||||
DOUT << "ExactSize\n";
|
||||
const TargetInstrInfo* TII = F.getTarget().getInstrInfo();
|
||||
@ -938,7 +944,7 @@ bool JITEmitter::finishFunction(MachineFunction &F) {
|
||||
Relocations.clear();
|
||||
|
||||
// Mark code region readable and executable if it's not so already.
|
||||
sys::Memory::SetRXPrivilege(FnStart, FnEnd-FnStart);
|
||||
MemMgr->setMemoryExecutable();
|
||||
|
||||
#ifndef NDEBUG
|
||||
{
|
||||
@ -1086,6 +1092,10 @@ void JITEmitter::startFunctionStub(const GlobalValue* F, unsigned StubSize,
|
||||
|
||||
void *JITEmitter::finishFunctionStub(const GlobalValue* F) {
|
||||
NumBytes += getCurrentPCOffset();
|
||||
|
||||
// Invalidate the icache if necessary.
|
||||
sys::Memory::InvalidateInstructionCache(BufferBegin, NumBytes);
|
||||
|
||||
std::swap(SavedBufferBegin, BufferBegin);
|
||||
BufferEnd = SavedBufferEnd;
|
||||
CurBufferPtr = SavedCurBufferPtr;
|
||||
|
@ -365,6 +365,21 @@ namespace {
|
||||
// Finally, remove this entry from TableBlocks.
|
||||
TableBlocks.erase(I);
|
||||
}
|
||||
|
||||
/// setMemoryWritable - When code generation is in progress,
|
||||
/// the code pages may need permissions changed.
|
||||
void setMemoryWritable(void)
|
||||
{
|
||||
for (unsigned i = 0, e = Blocks.size(); i != e; ++i)
|
||||
sys::Memory::setWritable(Blocks[i]);
|
||||
}
|
||||
/// setMemoryExecutable - When code generation is done and we're ready to
|
||||
/// start execution, the code pages may need permissions changed.
|
||||
void setMemoryExecutable(void)
|
||||
{
|
||||
for (unsigned i = 0, e = Blocks.size(); i != e; ++i)
|
||||
sys::Memory::setExecutable(Blocks[i]);
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
|
@ -58,14 +58,3 @@ void llvm::sys::Memory::InvalidateInstructionCache(const void *Addr,
|
||||
#endif // end PPC
|
||||
|
||||
}
|
||||
|
||||
bool llvm::sys::Memory::SetRXPrivilege(const void *Addr, size_t Size) {
|
||||
#if defined(__APPLE__) && defined(__arm__)
|
||||
kern_return_t kr = vm_protect(mach_task_self(), (vm_address_t)Addr,
|
||||
(vm_size_t)Size, 0,
|
||||
VM_PROT_READ | VM_PROT_EXECUTE | VM_PROT_COPY);
|
||||
return KERN_SUCCESS == kr;
|
||||
#else
|
||||
return true;
|
||||
#endif
|
||||
}
|
||||
|
@ -76,7 +76,7 @@ llvm::sys::Memory::AllocateRWX(unsigned NumBytes, const MemoryBlock* NearBlock,
|
||||
(vm_size_t)(pageSize*NumPages), 0,
|
||||
VM_PROT_READ | VM_PROT_EXECUTE | VM_PROT_COPY);
|
||||
if (KERN_SUCCESS != kr) {
|
||||
MakeErrMsg(ErrMsg, "vm_protect max RWX failed\n");
|
||||
MakeErrMsg(ErrMsg, "vm_protect max RX failed\n");
|
||||
return sys::MemoryBlock();
|
||||
}
|
||||
|
||||
@ -103,3 +103,27 @@ bool llvm::sys::Memory::ReleaseRWX(MemoryBlock &M, std::string *ErrMsg) {
|
||||
return false;
|
||||
}
|
||||
|
||||
bool llvm::sys::Memory::setWritable (MemoryBlock &M, std::string *ErrMsg) {
|
||||
#if defined(__APPLE__) && defined(__arm__)
|
||||
if (M.Address == 0 || M.Size == 0) return false;
|
||||
sys::Memory::InvalidateInstructionCache(M.Address, M.Size);
|
||||
kern_return_t kr = vm_protect(mach_task_self(), (vm_address_t)M.Address,
|
||||
(vm_size_t)M.Size, 0, VM_PROT_READ | VM_PROT_WRITE);
|
||||
return KERN_SUCCESS == kr;
|
||||
#else
|
||||
return true;
|
||||
#endif
|
||||
}
|
||||
|
||||
bool llvm::sys::Memory::setExecutable (MemoryBlock &M, std::string *ErrMsg) {
|
||||
#if defined(__APPLE__) && defined(__arm__)
|
||||
if (M.Address == 0 || M.Size == 0) return false;
|
||||
sys::Memory::InvalidateInstructionCache(M.Address, M.Size);
|
||||
kern_return_t kr = vm_protect(mach_task_self(), (vm_address_t)M.Address,
|
||||
(vm_size_t)M.Size, 0, VM_PROT_READ | VM_PROT_EXECUTE | VM_PROT_COPY);
|
||||
return KERN_SUCCESS == kr;
|
||||
#else
|
||||
return false;
|
||||
#endif
|
||||
}
|
||||
|
||||
|
Loading…
x
Reference in New Issue
Block a user