mirror of
https://github.com/hrydgard/ppsspp.git
synced 2024-11-26 23:10:38 +00:00
jit: Add more reasonable estimates for RX protect.
This commit is contained in:
parent
5294a64828
commit
b2798c7ada
@ -80,7 +80,7 @@ public:
|
|||||||
// If not WX Exclusive, no need to call ProtectMemoryPages because we never change the protection from RWX.
|
// If not WX Exclusive, no need to call ProtectMemoryPages because we never change the protection from RWX.
|
||||||
PoisonMemory(offset);
|
PoisonMemory(offset);
|
||||||
ResetCodePtr(offset);
|
ResetCodePtr(offset);
|
||||||
if (PlatformIsWXExclusive()) {
|
if (PlatformIsWXExclusive() && offset != 0) {
|
||||||
// Need to re-protect the part we didn't clear.
|
// Need to re-protect the part we didn't clear.
|
||||||
ProtectMemoryPages(region, offset, MEM_PROT_READ | MEM_PROT_EXEC);
|
ProtectMemoryPages(region, offset, MEM_PROT_READ | MEM_PROT_EXEC);
|
||||||
}
|
}
|
||||||
|
@ -43,7 +43,7 @@ void ThunkManager::Init()
|
|||||||
#endif
|
#endif
|
||||||
|
|
||||||
AllocCodeSpace(THUNK_ARENA_SIZE);
|
AllocCodeSpace(THUNK_ARENA_SIZE);
|
||||||
BeginWrite();
|
BeginWrite(512);
|
||||||
save_regs = GetCodePtr();
|
save_regs = GetCodePtr();
|
||||||
#if PPSSPP_ARCH(AMD64)
|
#if PPSSPP_ARCH(AMD64)
|
||||||
for (int i = 2; i < ABI_GetNumXMMRegs(); i++)
|
for (int i = 2; i < ABI_GetNumXMMRegs(); i++)
|
||||||
@ -151,7 +151,7 @@ const void *ThunkManager::ProtectFunction(const void *function, int num_params)
|
|||||||
|
|
||||||
_assert_msg_(region != nullptr, "Can't protect functions before the emu is started.");
|
_assert_msg_(region != nullptr, "Can't protect functions before the emu is started.");
|
||||||
|
|
||||||
BeginWrite();
|
BeginWrite(128);
|
||||||
const u8 *call_point = GetCodePtr();
|
const u8 *call_point = GetCodePtr();
|
||||||
Enter(this, true);
|
Enter(this, true);
|
||||||
|
|
||||||
|
@ -71,8 +71,8 @@ namespace MIPSComp {
|
|||||||
using namespace ArmJitConstants;
|
using namespace ArmJitConstants;
|
||||||
|
|
||||||
void ArmJit::GenerateFixedCode() {
|
void ArmJit::GenerateFixedCode() {
|
||||||
|
BeginWrite(GetMemoryProtectPageSize());
|
||||||
const u8 *start = AlignCodePage();
|
const u8 *start = AlignCodePage();
|
||||||
BeginWrite();
|
|
||||||
|
|
||||||
// LR == SCRATCHREG2 on ARM32 so it needs to be pushed.
|
// LR == SCRATCHREG2 on ARM32 so it needs to be pushed.
|
||||||
restoreRoundingMode = AlignCode16(); {
|
restoreRoundingMode = AlignCode16(); {
|
||||||
|
@ -234,7 +234,7 @@ void ArmJit::Compile(u32 em_address) {
|
|||||||
ClearCache();
|
ClearCache();
|
||||||
}
|
}
|
||||||
|
|
||||||
BeginWrite();
|
BeginWrite(JitBlockCache::MAX_BLOCK_INSTRUCTIONS * 16);
|
||||||
|
|
||||||
int block_num = blocks.AllocateBlock(em_address);
|
int block_num = blocks.AllocateBlock(em_address);
|
||||||
JitBlock *b = blocks.GetBlock(block_num);
|
JitBlock *b = blocks.GetBlock(block_num);
|
||||||
|
@ -97,8 +97,8 @@ namespace MIPSComp {
|
|||||||
using namespace Arm64JitConstants;
|
using namespace Arm64JitConstants;
|
||||||
|
|
||||||
void Arm64Jit::GenerateFixedCode(const JitOptions &jo) {
|
void Arm64Jit::GenerateFixedCode(const JitOptions &jo) {
|
||||||
|
BeginWrite(GetMemoryProtectPageSize());
|
||||||
const u8 *start = AlignCodePage();
|
const u8 *start = AlignCodePage();
|
||||||
BeginWrite();
|
|
||||||
|
|
||||||
if (jo.useStaticAlloc) {
|
if (jo.useStaticAlloc) {
|
||||||
saveStaticRegisters = AlignCode16();
|
saveStaticRegisters = AlignCode16();
|
||||||
|
@ -230,7 +230,7 @@ void Arm64Jit::Compile(u32 em_address) {
|
|||||||
ClearCache();
|
ClearCache();
|
||||||
}
|
}
|
||||||
|
|
||||||
BeginWrite(4);
|
BeginWrite(JitBlockCache::MAX_BLOCK_INSTRUCTIONS * 16);
|
||||||
|
|
||||||
int block_num = blocks.AllocateBlock(em_address);
|
int block_num = blocks.AllocateBlock(em_address);
|
||||||
JitBlock *b = blocks.GetBlock(block_num);
|
JitBlock *b = blocks.GetBlock(block_num);
|
||||||
|
@ -66,8 +66,8 @@ void ImHere() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
void Jit::GenerateFixedCode(JitOptions &jo) {
|
void Jit::GenerateFixedCode(JitOptions &jo) {
|
||||||
|
BeginWrite(GetMemoryProtectPageSize());
|
||||||
AlignCodePage();
|
AlignCodePage();
|
||||||
BeginWrite();
|
|
||||||
|
|
||||||
restoreRoundingMode = AlignCode16(); {
|
restoreRoundingMode = AlignCode16(); {
|
||||||
STMXCSR(MIPSSTATE_VAR(temp));
|
STMXCSR(MIPSSTATE_VAR(temp));
|
||||||
|
@ -281,7 +281,8 @@ void Jit::Compile(u32 em_address) {
|
|||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
BeginWrite();
|
// Sometimes we compile fairly large blocks, although it's uncommon.
|
||||||
|
BeginWrite(JitBlockCache::MAX_BLOCK_INSTRUCTIONS * 16);
|
||||||
|
|
||||||
int block_num = blocks.AllocateBlock(em_address);
|
int block_num = blocks.AllocateBlock(em_address);
|
||||||
JitBlock *b = blocks.GetBlock(block_num);
|
JitBlock *b = blocks.GetBlock(block_num);
|
||||||
|
@ -462,7 +462,7 @@ void JitSafeMemFuncs::Init(ThunkManager *thunks) {
|
|||||||
AllocCodeSpace(FUNCS_ARENA_SIZE);
|
AllocCodeSpace(FUNCS_ARENA_SIZE);
|
||||||
thunks_ = thunks;
|
thunks_ = thunks;
|
||||||
|
|
||||||
BeginWrite();
|
BeginWrite(1024);
|
||||||
readU32 = GetCodePtr();
|
readU32 = GetCodePtr();
|
||||||
CreateReadFunc(32, (const void *)&Memory::Read_U32);
|
CreateReadFunc(32, (const void *)&Memory::Read_U32);
|
||||||
readU16 = GetCodePtr();
|
readU16 = GetCodePtr();
|
||||||
|
@ -161,7 +161,7 @@ static const JitLookup jitLookup[] = {
|
|||||||
|
|
||||||
JittedVertexDecoder VertexDecoderJitCache::Compile(const VertexDecoder &dec, int32_t *jittedSize) {
|
JittedVertexDecoder VertexDecoderJitCache::Compile(const VertexDecoder &dec, int32_t *jittedSize) {
|
||||||
dec_ = &dec;
|
dec_ = &dec;
|
||||||
BeginWrite();
|
BeginWrite(4096);
|
||||||
const u8 *start = AlignCode16();
|
const u8 *start = AlignCode16();
|
||||||
|
|
||||||
bool prescaleStep = false;
|
bool prescaleStep = false;
|
||||||
|
@ -143,7 +143,7 @@ static const JitLookup jitLookup[] = {
|
|||||||
JittedVertexDecoder VertexDecoderJitCache::Compile(const VertexDecoder &dec, int32_t *jittedSize) {
|
JittedVertexDecoder VertexDecoderJitCache::Compile(const VertexDecoder &dec, int32_t *jittedSize) {
|
||||||
dec_ = &dec;
|
dec_ = &dec;
|
||||||
|
|
||||||
BeginWrite();
|
BeginWrite(4096);
|
||||||
const u8 *start = AlignCode16();
|
const u8 *start = AlignCode16();
|
||||||
|
|
||||||
bool prescaleStep = false;
|
bool prescaleStep = false;
|
||||||
|
@ -164,7 +164,7 @@ static const JitLookup jitLookup[] = {
|
|||||||
|
|
||||||
JittedVertexDecoder VertexDecoderJitCache::Compile(const VertexDecoder &dec, int32_t *jittedSize) {
|
JittedVertexDecoder VertexDecoderJitCache::Compile(const VertexDecoder &dec, int32_t *jittedSize) {
|
||||||
dec_ = &dec;
|
dec_ = &dec;
|
||||||
BeginWrite();
|
BeginWrite(4096);
|
||||||
const u8 *start = this->AlignCode16();
|
const u8 *start = this->AlignCode16();
|
||||||
|
|
||||||
#if PPSSPP_ARCH(X86)
|
#if PPSSPP_ARCH(X86)
|
||||||
|
@ -42,11 +42,12 @@ SingleFunc PixelJitCache::CompileSingle(const PixelFuncID &id) {
|
|||||||
RegCache::GEN_ARG_ID,
|
RegCache::GEN_ARG_ID,
|
||||||
});
|
});
|
||||||
|
|
||||||
BeginWrite();
|
BeginWrite(64);
|
||||||
Describe("Init");
|
Describe("Init");
|
||||||
WriteConstantPool(id);
|
WriteConstantPool(id);
|
||||||
|
|
||||||
const u8 *resetPos = AlignCode16();
|
const u8 *resetPos = AlignCode16();
|
||||||
|
EndWrite();
|
||||||
bool success = true;
|
bool success = true;
|
||||||
|
|
||||||
#if PPSSPP_PLATFORM(WINDOWS)
|
#if PPSSPP_PLATFORM(WINDOWS)
|
||||||
|
@ -420,7 +420,7 @@ int CodeBlock::WriteProlog(int extraStack, const std::vector<RegCache::Reg> &vec
|
|||||||
#if PPSSPP_ARCH(X86) || PPSSPP_ARCH(AMD64)
|
#if PPSSPP_ARCH(X86) || PPSSPP_ARCH(AMD64)
|
||||||
using namespace Gen;
|
using namespace Gen;
|
||||||
|
|
||||||
BeginWrite();
|
BeginWrite(32768);
|
||||||
AlignCode16();
|
AlignCode16();
|
||||||
lastPrologStart_ = GetWritableCodePtr();
|
lastPrologStart_ = GetWritableCodePtr();
|
||||||
|
|
||||||
|
@ -45,7 +45,7 @@ FetchFunc SamplerJitCache::CompileFetch(const SamplerID &id) {
|
|||||||
regCache_.ForceRetain(RegCache::GEN_RESULT);
|
regCache_.ForceRetain(RegCache::GEN_RESULT);
|
||||||
regCache_.ChangeReg(XMM0, RegCache::VEC_RESULT);
|
regCache_.ChangeReg(XMM0, RegCache::VEC_RESULT);
|
||||||
|
|
||||||
BeginWrite();
|
BeginWrite(2048);
|
||||||
Describe("Init");
|
Describe("Init");
|
||||||
const u8 *start = AlignCode16();
|
const u8 *start = AlignCode16();
|
||||||
|
|
||||||
@ -122,7 +122,7 @@ FetchFunc SamplerJitCache::CompileFetch(const SamplerID &id) {
|
|||||||
|
|
||||||
NearestFunc SamplerJitCache::CompileNearest(const SamplerID &id) {
|
NearestFunc SamplerJitCache::CompileNearest(const SamplerID &id) {
|
||||||
_assert_msg_(!id.fetch && !id.linear, "Fetch and linear should be cleared on sampler id");
|
_assert_msg_(!id.fetch && !id.linear, "Fetch and linear should be cleared on sampler id");
|
||||||
BeginWrite();
|
BeginWrite(2048);
|
||||||
Describe("Init");
|
Describe("Init");
|
||||||
|
|
||||||
// Let's drop some helpful constants here.
|
// Let's drop some helpful constants here.
|
||||||
@ -438,7 +438,7 @@ NearestFunc SamplerJitCache::CompileNearest(const SamplerID &id) {
|
|||||||
|
|
||||||
LinearFunc SamplerJitCache::CompileLinear(const SamplerID &id) {
|
LinearFunc SamplerJitCache::CompileLinear(const SamplerID &id) {
|
||||||
_assert_msg_(id.linear && !id.fetch, "Only linear should be set on sampler id");
|
_assert_msg_(id.linear && !id.fetch, "Only linear should be set on sampler id");
|
||||||
BeginWrite();
|
BeginWrite(2048);
|
||||||
Describe("Init");
|
Describe("Init");
|
||||||
|
|
||||||
// We don't use stackArgPos_ here, this is just for DXT.
|
// We don't use stackArgPos_ here, this is just for DXT.
|
||||||
|
Loading…
Reference in New Issue
Block a user