mirror of
https://github.com/ptitSeb/box64.git
synced 2024-11-23 06:30:22 +00:00
Made protect tracking in a single thread, and use a mailbox message system to avoid the use of lock
This commit is contained in:
parent
841003bc47
commit
fe90260cee
@ -152,6 +152,7 @@ set(ELFLOADER_SRC
|
||||
"${BOX64_ROOT}/src/box64context.c"
|
||||
"${BOX64_ROOT}/src/build_info.c"
|
||||
"${BOX64_ROOT}/src/custommem.c"
|
||||
"${BOX64_ROOT}/src/mailbox.c"
|
||||
"${BOX64_ROOT}/src/dynarec/dynarec.c"
|
||||
"${BOX64_ROOT}/src/elfs/elfloader.c"
|
||||
"${BOX64_ROOT}/src/elfs/elfparser.c"
|
||||
|
@ -6,6 +6,9 @@
|
||||
#include <signal.h>
|
||||
#include <sys/mman.h>
|
||||
|
||||
#ifdef DYNAREC
|
||||
#include "dynarec/arm64_lock.h"
|
||||
#endif
|
||||
#include "box64context.h"
|
||||
#include "debug.h"
|
||||
#include "elfloader.h"
|
||||
@ -123,11 +126,11 @@ static void init_mutexes(box64context_t* context)
|
||||
pthread_mutex_init(&context->mutex_once, &attr);
|
||||
pthread_mutex_init(&context->mutex_once2, &attr);
|
||||
pthread_mutex_init(&context->mutex_trace, &attr);
|
||||
#ifndef DYNAREC
|
||||
pthread_mutex_init(&context->mutex_lock, &attr);
|
||||
#else
|
||||
#ifdef DYNAREC
|
||||
pthread_mutex_init(&context->mutex_dyndump, &attr);
|
||||
#endif
|
||||
#else
|
||||
pthread_mutex_init(&context->mutex_lock, &attr);
|
||||
#endif
|
||||
pthread_mutex_init(&context->mutex_tls, &attr);
|
||||
pthread_mutex_init(&context->mutex_thread, &attr);
|
||||
pthread_mutex_init(&context->mutex_bridge, &attr);
|
||||
@ -135,10 +138,24 @@ static void init_mutexes(box64context_t* context)
|
||||
pthread_mutexattr_destroy(&attr);
|
||||
}
|
||||
|
||||
void startMailbox(void);
|
||||
void haltMailbox(void);
|
||||
static void atfork_prepare_box64context(void)
|
||||
{
|
||||
// semaphores will go in undetermined state
|
||||
haltMailbox();
|
||||
}
|
||||
static void atfork_parent_box64context(void)
|
||||
{
|
||||
// reinit mailbox only
|
||||
init_mutexes(my_context);
|
||||
startMailbox();
|
||||
}
|
||||
static void atfork_child_box64context(void)
|
||||
{
|
||||
// (re)init mutex if it was lock before the fork
|
||||
// reinit mutexes and mailbox
|
||||
init_mutexes(my_context);
|
||||
startMailbox();
|
||||
}
|
||||
|
||||
EXPORTDYN
|
||||
@ -180,7 +197,7 @@ box64context_t *NewBox64Context(int argc)
|
||||
context->argv = (char**)calloc(context->argc+1, sizeof(char*));
|
||||
|
||||
init_mutexes(context);
|
||||
pthread_atfork(NULL, NULL, atfork_child_box64context);
|
||||
pthread_atfork(atfork_prepare_box64context, atfork_parent_box64context, atfork_child_box64context);
|
||||
|
||||
pthread_key_create(&context->tlskey, free_tlsdatasize);
|
||||
|
||||
@ -273,11 +290,11 @@ void FreeBox64Context(box64context_t** context)
|
||||
pthread_mutex_destroy(&ctx->mutex_once);
|
||||
pthread_mutex_destroy(&ctx->mutex_once2);
|
||||
pthread_mutex_destroy(&ctx->mutex_trace);
|
||||
#ifndef DYNAREC
|
||||
pthread_mutex_destroy(&ctx->mutex_lock);
|
||||
#else
|
||||
#ifdef DYNAREC
|
||||
pthread_mutex_destroy(&ctx->mutex_dyndump);
|
||||
#endif
|
||||
#else
|
||||
pthread_mutex_destroy(&ctx->mutex_lock);
|
||||
#endif
|
||||
pthread_mutex_destroy(&ctx->mutex_tls);
|
||||
pthread_mutex_destroy(&ctx->mutex_thread);
|
||||
pthread_mutex_destroy(&ctx->mutex_bridge);
|
||||
|
154
src/custommem.c
154
src/custommem.c
@ -44,7 +44,6 @@ static uintptr_t box64_jmptbldefault0[1<<JMPTABL_SHIFT];
|
||||
#define MEMPROT_SHIFT 12
|
||||
#define MEMPROT_SHIFT2 (32-MEMPROT_SHIFT)
|
||||
#define MEMPROT_SIZE (1<<(32-MEMPROT_SHIFT))
|
||||
static pthread_mutex_t mutex_prot;
|
||||
KHASH_MAP_INIT_INT(memprot, uint8_t*)
|
||||
static kh_memprot_t *memprot;
|
||||
static int inited = 0;
|
||||
@ -497,11 +496,11 @@ uintptr_t AllocDynarecMap(dynablock_t* db, size_t size)
|
||||
if(!size)
|
||||
return 0;
|
||||
if(size>MMAPSIZE-2*sizeof(blockmark_t)) {
|
||||
pthread_mutex_lock(&mutex_mmap);
|
||||
//pthread_mutex_lock(&mutex_mmap);
|
||||
#ifndef USE_MMAP
|
||||
void *p = NULL;
|
||||
if(posix_memalign(&p, box64_pagesize, size)) {
|
||||
pthread_mutex_unlock(&mutex_mmap);
|
||||
//pthread_mutex_unlock(&mutex_mmap);
|
||||
dynarec_log(LOG_INFO, "Cannot create dynamic map of %zu bytes\n", size);
|
||||
return 0;
|
||||
}
|
||||
@ -509,7 +508,7 @@ uintptr_t AllocDynarecMap(dynablock_t* db, size_t size)
|
||||
#else
|
||||
void* p = mmap(NULL, size, PROT_READ|PROT_WRITE|PROT_EXEC, MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
|
||||
if(p==(void*)-1) {
|
||||
pthread_mutex_unlock(&mutex_mmap);
|
||||
//pthread_mutex_unlock(&mutex_mmap);
|
||||
dynarec_log(LOG_INFO, "Cannot create dynamic map of %zu bytes\n", size);
|
||||
return 0;
|
||||
}
|
||||
@ -524,17 +523,17 @@ uintptr_t AllocDynarecMap(dynablock_t* db, size_t size)
|
||||
int ret;
|
||||
k = kh_put(dynablocks, blocks, (uintptr_t)p, &ret);
|
||||
kh_value(blocks, k) = db;
|
||||
pthread_mutex_unlock(&mutex_mmap);
|
||||
//pthread_mutex_unlock(&mutex_mmap);
|
||||
return (uintptr_t)p;
|
||||
}
|
||||
|
||||
pthread_mutex_lock(&mutex_mmap);
|
||||
//pthread_mutex_lock(&mutex_mmap);
|
||||
|
||||
uintptr_t ret = FindFreeDynarecMap(db, size);
|
||||
if(!ret)
|
||||
ret = AddNewDynarecMap(db, size);
|
||||
|
||||
pthread_mutex_unlock(&mutex_mmap);
|
||||
//pthread_mutex_unlock(&mutex_mmap);
|
||||
|
||||
return ret;
|
||||
}
|
||||
@ -544,7 +543,7 @@ void FreeDynarecMap(dynablock_t* db, uintptr_t addr, size_t size)
|
||||
if(!addr || !size)
|
||||
return;
|
||||
if(size>MMAPSIZE-2*sizeof(blockmark_t)) {
|
||||
pthread_mutex_lock(&mutex_mmap);
|
||||
//pthread_mutex_lock(&mutex_mmap);
|
||||
#ifndef USE_MMAP
|
||||
free((void*)addr);
|
||||
#else
|
||||
@ -556,12 +555,12 @@ void FreeDynarecMap(dynablock_t* db, uintptr_t addr, size_t size)
|
||||
if(k!=kh_end(blocks))
|
||||
kh_del(dynablocks, blocks, k);
|
||||
}
|
||||
pthread_mutex_unlock(&mutex_mmap);
|
||||
//pthread_mutex_unlock(&mutex_mmap);
|
||||
return;
|
||||
}
|
||||
pthread_mutex_lock(&mutex_mmap);
|
||||
//pthread_mutex_lock(&mutex_mmap);
|
||||
ActuallyFreeDynarecMap(db, addr, size);
|
||||
pthread_mutex_unlock(&mutex_mmap);
|
||||
//pthread_mutex_unlock(&mutex_mmap);
|
||||
}
|
||||
|
||||
dynablocklist_t* getDB(uintptr_t idx)
|
||||
@ -591,17 +590,17 @@ void addDBFromAddressRange(uintptr_t addr, size_t size)
|
||||
int idx1 = (i )&((1<<DYNAMAP_SHIFT)-1);
|
||||
if(!dynmap123[idx3]) {
|
||||
dynablocklist_t*** p = (dynablocklist_t***)calloc(1<<DYNAMAP_SHIFT, sizeof(dynablocklist_t**));
|
||||
if(arm64_lock_storeifnull(&dynmap123[idx3], p)!=p)
|
||||
if(arm64_lock_storeifnull(&dynmap123[idx3], (uint64_t)p)!=(uint64_t)p)
|
||||
free(p);
|
||||
}
|
||||
if(!dynmap123[idx3][idx2]) {
|
||||
dynablocklist_t** p = (dynablocklist_t**)calloc(1<<DYNAMAP_SHIFT, sizeof(dynablocklist_t*));
|
||||
if(arm64_lock_storeifnull(&dynmap123[idx3][idx2], p)!=p)
|
||||
if(arm64_lock_storeifnull(&dynmap123[idx3][idx2], (uint64_t)p)!=(uint64_t)p)
|
||||
free(p);
|
||||
}
|
||||
if(!dynmap123[idx3][idx2][idx1]) {
|
||||
dynablocklist_t* p = NewDynablockList(i<<DYNAMAP_SHIFT, 1<<DYNAMAP_SHIFT, 0);
|
||||
if(arm64_lock_storeifnull(&dynmap123[idx3][idx2][idx1], p)!=p)
|
||||
if(arm64_lock_storeifnull(&dynmap123[idx3][idx2][idx1], (uint64_t)p)!=(uint64_t)p)
|
||||
FreeDynablockList(&p);
|
||||
}
|
||||
}
|
||||
@ -630,13 +629,13 @@ void cleanDBFromAddressRange(uintptr_t addr, size_t size, int destroy)
|
||||
if(destroy) {
|
||||
if(FreeRangeDynablock(dblist, addr, size) && 0) { // dblist is empty, check if we can delete more...
|
||||
// disabling this for now. It seems to cause random crash in Terraria
|
||||
if(!arm64_lock_storeifref(&dynmap123[idx3][idx2][idx1], NULL, dblist)) {
|
||||
if(!arm64_lock_storeifref(&dynmap123[idx3][idx2][idx1], 0, (uint64_t)dblist)) {
|
||||
dynablocklist_t** p = dynmap123[idx3][idx2];
|
||||
if(dynmapempty((void**)p)) {
|
||||
if(!arm64_lock_storeifref(&dynmap123[idx3][idx2], NULL, p)) {
|
||||
if(!arm64_lock_storeifref(&dynmap123[idx3][idx2], 0, (uint64_t)p)) {
|
||||
dynablocklist_t*** p2 = dynmap123[idx3];
|
||||
if(dynmapempty((void**)p2)) {
|
||||
if(!arm64_lock_storeifref(&dynmap123[idx3], NULL, p2)) {
|
||||
if(!arm64_lock_storeifref(&dynmap123[idx3], 0, (uint64_t)p2)) {
|
||||
free(p2);
|
||||
}
|
||||
}
|
||||
@ -668,25 +667,25 @@ void addJumpTableIfDefault64(void* addr, void* jmp)
|
||||
uintptr_t*** tbl = (uintptr_t***)malloc((1<<JMPTABL_SHIFT)*sizeof(uintptr_t**));
|
||||
for(int i=0; i<(1<<JMPTABL_SHIFT); ++i)
|
||||
tbl[i] = box64_jmptbldefault1;
|
||||
if(arm64_lock_storeifref(&box64_jmptbl3[idx3], tbl, box64_jmptbldefault2)!=tbl)
|
||||
if(arm64_lock_storeifref(&box64_jmptbl3[idx3], (uint64_t)tbl, (uint64_t)box64_jmptbldefault2)!=(uint64_t)tbl)
|
||||
free(tbl);
|
||||
}
|
||||
if(box64_jmptbl3[idx3][idx2] == box64_jmptbldefault1) {
|
||||
uintptr_t** tbl = (uintptr_t**)malloc((1<<JMPTABL_SHIFT)*sizeof(uintptr_t*));
|
||||
for(int i=0; i<(1<<JMPTABL_SHIFT); ++i)
|
||||
tbl[i] = box64_jmptbldefault0;
|
||||
if(arm64_lock_storeifref(&box64_jmptbl3[idx3][idx2], tbl, box64_jmptbldefault1)!=tbl)
|
||||
if(arm64_lock_storeifref(&box64_jmptbl3[idx3][idx2], (uint64_t)tbl, (uint64_t)box64_jmptbldefault1)!=(uint64_t)tbl)
|
||||
free(tbl);
|
||||
}
|
||||
if(box64_jmptbl3[idx3][idx2][idx1] == box64_jmptbldefault0) {
|
||||
uintptr_t* tbl = (uintptr_t*)malloc((1<<JMPTABL_SHIFT)*sizeof(uintptr_t));
|
||||
for(int i=0; i<(1<<JMPTABL_SHIFT); ++i)
|
||||
tbl[i] = (uintptr_t)arm64_next;
|
||||
if(arm64_lock_storeifref(&box64_jmptbl3[idx3][idx2][idx1], tbl, box64_jmptbldefault0)!=tbl)
|
||||
if(arm64_lock_storeifref(&box64_jmptbl3[idx3][idx2][idx1], (uint64_t)tbl, (uint64_t)box64_jmptbldefault0)!=(uint64_t)tbl)
|
||||
free(tbl);
|
||||
}
|
||||
|
||||
arm64_lock_storeifref(&box64_jmptbl3[idx3][idx2][idx1][idx0], jmp, arm64_next);
|
||||
arm64_lock_storeifref(&box64_jmptbl3[idx3][idx2][idx1][idx0], (uint64_t)jmp, (uint64_t)arm64_next);
|
||||
}
|
||||
void setJumpTableDefault64(void* addr)
|
||||
{
|
||||
@ -738,21 +737,21 @@ uintptr_t getJumpTableAddress64(uintptr_t addr)
|
||||
uintptr_t*** tbl = (uintptr_t***)malloc((1<<JMPTABL_SHIFT)*sizeof(uintptr_t**));
|
||||
for(int i=0; i<(1<<JMPTABL_SHIFT); ++i)
|
||||
tbl[i] = box64_jmptbldefault1;
|
||||
if(arm64_lock_storeifref(&box64_jmptbl3[idx3], tbl, box64_jmptbldefault2)!=tbl)
|
||||
if(arm64_lock_storeifref(&box64_jmptbl3[idx3], (uint64_t)tbl, (uint64_t)box64_jmptbldefault2)!=(uint64_t)tbl)
|
||||
free(tbl);
|
||||
}
|
||||
if(box64_jmptbl3[idx3][idx2] == box64_jmptbldefault1) {
|
||||
uintptr_t** tbl = (uintptr_t**)malloc((1<<JMPTABL_SHIFT)*sizeof(uintptr_t*));
|
||||
for(int i=0; i<(1<<JMPTABL_SHIFT); ++i)
|
||||
tbl[i] = box64_jmptbldefault0;
|
||||
if(arm64_lock_storeifref(&box64_jmptbl3[idx3][idx2], tbl, box64_jmptbldefault1)!=tbl)
|
||||
if(arm64_lock_storeifref(&box64_jmptbl3[idx3][idx2], (uint64_t)tbl, (uint64_t)box64_jmptbldefault1)!=(uint64_t)tbl)
|
||||
free(tbl);
|
||||
}
|
||||
if(box64_jmptbl3[idx3][idx2][idx1] == box64_jmptbldefault0) {
|
||||
uintptr_t* tbl = (uintptr_t*)malloc((1<<JMPTABL_SHIFT)*sizeof(uintptr_t));
|
||||
for(int i=0; i<(1<<JMPTABL_SHIFT); ++i)
|
||||
tbl[i] = (uintptr_t)arm64_next;
|
||||
if(arm64_lock_storeifref(&box64_jmptbl3[idx3][idx2][idx1], tbl, box64_jmptbldefault0)!=tbl)
|
||||
if(arm64_lock_storeifref(&box64_jmptbl3[idx3][idx2][idx1], (uint64_t)tbl, (uint64_t)box64_jmptbldefault0)!=(uint64_t)tbl)
|
||||
free(tbl);
|
||||
}
|
||||
|
||||
@ -760,9 +759,9 @@ uintptr_t getJumpTableAddress64(uintptr_t addr)
|
||||
}
|
||||
|
||||
// Remove the Write flag from an adress range, so DB can be executed safely
|
||||
void protectDBnolock(uintptr_t addr, uintptr_t size)
|
||||
void internalProtectDB(uintptr_t addr, uintptr_t size)
|
||||
{
|
||||
dynarec_log(LOG_DEBUG, "protectDB %p -> %p\n", (void*)addr, (void*)(addr+size-1));
|
||||
dynarec_log(LOG_DEBUG, "internalProtectDB %p -> %p\n", (void*)addr, (void*)(addr+size-1));
|
||||
uintptr_t idx = (addr>>MEMPROT_SHIFT);
|
||||
uintptr_t end = ((addr+size-1LL)>>MEMPROT_SHIFT);
|
||||
int ret;
|
||||
@ -784,32 +783,35 @@ void protectDBnolock(uintptr_t addr, uintptr_t size)
|
||||
}
|
||||
}
|
||||
|
||||
void protectDB(uintptr_t addr, size_t size)
|
||||
int internalIsprotectedDB(uintptr_t addr, size_t size)
|
||||
{
|
||||
pthread_mutex_lock(&mutex_prot);
|
||||
protectDBnolock(addr, size);
|
||||
pthread_mutex_unlock(&mutex_prot);
|
||||
}
|
||||
|
||||
void lockDB()
|
||||
{
|
||||
pthread_mutex_lock(&mutex_prot);
|
||||
}
|
||||
|
||||
void unlockDB()
|
||||
{
|
||||
pthread_mutex_unlock(&mutex_prot);
|
||||
uintptr_t idx = (addr>>MEMPROT_SHIFT);
|
||||
uintptr_t end = ((addr+size-1LL)>>MEMPROT_SHIFT);
|
||||
int ret;
|
||||
for (uintptr_t i=idx; i<=end; ++i) {
|
||||
const uint32_t key = (i>>MEMPROT_SHIFT2)&0xffffffff;
|
||||
khint_t k = kh_put(memprot, memprot, key, &ret);
|
||||
if(ret) {
|
||||
uint8_t *m = (uint8_t*)calloc(1, MEMPROT_SIZE);
|
||||
kh_value(memprot, k) = m;
|
||||
}
|
||||
const uintptr_t ii = i&(MEMPROT_SIZE-1);
|
||||
uint8_t prot = kh_value(memprot, k)[ii];
|
||||
if(!(prot&PROT_DYNAREC)) {
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
return 1;
|
||||
}
|
||||
|
||||
// Add the Write flag from an adress range, and mark all block as dirty
|
||||
// no log, as it can be executed inside a signal handler
|
||||
void unprotectDB(uintptr_t addr, size_t size)
|
||||
void internalUnprotectDB(uintptr_t addr, size_t size)
|
||||
{
|
||||
dynarec_log(LOG_DEBUG, "unprotectDB %p -> %p\n", (void*)addr, (void*)(addr+size-1));
|
||||
dynarec_log(LOG_DEBUG, "internalUnprotectDB %p -> %p\n", (void*)addr, (void*)(addr+size-1));
|
||||
uintptr_t idx = (addr>>MEMPROT_SHIFT);
|
||||
uintptr_t end = ((addr+size-1LL)>>MEMPROT_SHIFT);
|
||||
int ret;
|
||||
pthread_mutex_lock(&mutex_prot);
|
||||
for (uintptr_t i=idx; i<=end; ++i) {
|
||||
const uint32_t key = (i>>MEMPROT_SHIFT2)&0xffffffff;
|
||||
khint_t k = kh_put(memprot, memprot, key, &ret);
|
||||
@ -825,20 +827,18 @@ void unprotectDB(uintptr_t addr, size_t size)
|
||||
cleanDBFromAddressRange((i<<MEMPROT_SHIFT), 1<<MEMPROT_SHIFT, 0);
|
||||
}
|
||||
}
|
||||
pthread_mutex_unlock(&mutex_prot);
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
void updateProtection(uintptr_t addr, size_t size, uint32_t prot)
|
||||
void internalUpdateProtection(uintptr_t addr, size_t size, uint32_t prot)
|
||||
{
|
||||
dynarec_log(LOG_DEBUG, "updateProtection %p:%p 0x%x\n", (void*)addr, (void*)(addr+size-1), prot);
|
||||
dynarec_log(LOG_DEBUG, "internalUpdateProtection %p:%p 0x%x\n", (void*)addr, (void*)(addr+size-1), prot);
|
||||
uintptr_t idx = (addr>>MEMPROT_SHIFT);
|
||||
uintptr_t end = ((addr+size-1LL)>>MEMPROT_SHIFT);
|
||||
int ret;
|
||||
uintptr_t last = idx<<MEMPROT_SHIFT;
|
||||
uint8_t oldprot = 0xff;
|
||||
pthread_mutex_lock(&mutex_prot);
|
||||
for (uintptr_t i=idx; i<=end; ++i) {
|
||||
const uint32_t key = (i>>MEMPROT_SHIFT2)&0xffffffff;
|
||||
khint_t k = kh_put(memprot, memprot, key, &ret);
|
||||
@ -869,16 +869,14 @@ void updateProtection(uintptr_t addr, size_t size, uint32_t prot)
|
||||
}
|
||||
if(oldprot!=0xff)
|
||||
mprotect((void*)last, (end<<MEMPROT_SHIFT)-last, oldprot&~PROT_CUSTOM); // need to optimize
|
||||
pthread_mutex_unlock(&mutex_prot);
|
||||
}
|
||||
|
||||
void setProtection(uintptr_t addr, size_t size, uint32_t prot)
|
||||
void internalSetProtection(uintptr_t addr, size_t size, uint32_t prot)
|
||||
{
|
||||
dynarec_log(LOG_DEBUG, "setProtection %p:%p 0x%x\n", (void*)addr, (void*)(addr+size-1), prot);
|
||||
dynarec_log(LOG_DEBUG, "internalSetProtection %p:%p 0x%x\n", (void*)addr, (void*)(addr+size-1), prot);
|
||||
uintptr_t idx = (addr>>MEMPROT_SHIFT);
|
||||
uintptr_t end = ((addr+size-1LL)>>MEMPROT_SHIFT);
|
||||
int ret;
|
||||
pthread_mutex_lock(&mutex_prot);
|
||||
for (uintptr_t i=idx; i<=end; ++i) {
|
||||
const uint32_t key = (i>>MEMPROT_SHIFT2)&0xffffffff;
|
||||
khint_t k = kh_put(memprot, memprot, key, &ret);
|
||||
@ -891,16 +889,14 @@ void setProtection(uintptr_t addr, size_t size, uint32_t prot)
|
||||
memset(kh_value(memprot, k)+start, prot|PROT_ALLOC, finish-start+1);
|
||||
i+=finish-start; // +1 from the "for" loop
|
||||
}
|
||||
pthread_mutex_unlock(&mutex_prot);
|
||||
}
|
||||
|
||||
void allocProtection(uintptr_t addr, size_t size, uint32_t prot)
|
||||
void internalAllocProtection(uintptr_t addr, size_t size, uint32_t prot)
|
||||
{
|
||||
dynarec_log(LOG_DEBUG, "allocProtection %p:%p 0x%x\n", (void*)addr, (void*)(addr+size-1), prot);
|
||||
dynarec_log(LOG_DEBUG, "internalAllocProtection %p:%p 0x%x\n", (void*)addr, (void*)(addr+size-1), prot);
|
||||
uintptr_t idx = (addr>>MEMPROT_SHIFT);
|
||||
uintptr_t end = ((addr+size-1LL)>>MEMPROT_SHIFT);
|
||||
int ret;
|
||||
pthread_mutex_lock(&mutex_prot);
|
||||
for (uintptr_t i=idx; i<=end; ++i) {
|
||||
const uint32_t key = (i>>MEMPROT_SHIFT2)&0xffffffff;
|
||||
khint_t k = kh_put(memprot, memprot, key, &ret);
|
||||
@ -917,10 +913,9 @@ void allocProtection(uintptr_t addr, size_t size, uint32_t prot)
|
||||
}
|
||||
i+=finish-start; // +1 from the "for" loop
|
||||
}
|
||||
pthread_mutex_unlock(&mutex_prot);
|
||||
}
|
||||
|
||||
void loadProtectionFromMap()
|
||||
void internalLoadProtectionFromMap()
|
||||
{
|
||||
char buf[500];
|
||||
FILE *f = fopen("/proc/self/maps", "r");
|
||||
@ -933,7 +928,7 @@ void loadProtectionFromMap()
|
||||
uintptr_t s, e;
|
||||
if(sscanf(buf, "%lx-%lx %c%c%c", &s, &e, &r, &w, &x)==5) {
|
||||
int prot = ((r=='r')?PROT_READ:0)|((w=='w')?PROT_WRITE:0)|((x=='x')?PROT_EXEC:0);
|
||||
allocProtection(s, e-s, prot);
|
||||
internalAllocProtection(s, e-s, prot);
|
||||
}
|
||||
}
|
||||
fclose(f);
|
||||
@ -947,12 +942,11 @@ static int blockempty(uint8_t* mem)
|
||||
return 1;
|
||||
}
|
||||
|
||||
void freeProtection(uintptr_t addr, size_t size)
|
||||
void internalFreeProtection(uintptr_t addr, size_t size)
|
||||
{
|
||||
dynarec_log(LOG_DEBUG, "freeProtection %p:%p\n", (void*)addr, (void*)(addr+size-1));
|
||||
dynarec_log(LOG_DEBUG, "internalFreeProtection %p:%p\n", (void*)addr, (void*)(addr+size-1));
|
||||
uintptr_t idx = (addr>>MEMPROT_SHIFT);
|
||||
uintptr_t end = ((addr+size-1LL)>>MEMPROT_SHIFT);
|
||||
pthread_mutex_lock(&mutex_prot);
|
||||
for (uintptr_t i=idx; i<=end; ++i) {
|
||||
const uint32_t key = (i>>MEMPROT_SHIFT2)&0xffffffff;
|
||||
khint_t k = kh_get(memprot, memprot, key);
|
||||
@ -968,21 +962,15 @@ void freeProtection(uintptr_t addr, size_t size)
|
||||
i+=finish-start; // +1 from the "for" loop
|
||||
}
|
||||
}
|
||||
pthread_mutex_unlock(&mutex_prot);
|
||||
}
|
||||
|
||||
uint32_t getProtection(uintptr_t addr)
|
||||
uint32_t internalGetProtection(uintptr_t addr)
|
||||
{
|
||||
const uint32_t key = (addr>>32)&0xffffffff;
|
||||
pthread_mutex_lock(&mutex_prot);
|
||||
khint_t k = kh_get(memprot, memprot, key);
|
||||
if(k==kh_end(memprot)) {
|
||||
pthread_mutex_unlock(&mutex_prot);
|
||||
return 0;
|
||||
}
|
||||
if (k == kh_end(memprot)) return 0;
|
||||
const uintptr_t idx = ((addr&0xffffffff)>>MEMPROT_SHIFT);
|
||||
uint32_t ret = kh_val(memprot, k)[idx];
|
||||
pthread_mutex_unlock(&mutex_prot);
|
||||
return ret;
|
||||
}
|
||||
|
||||
@ -1034,20 +1022,14 @@ static uintptr_t maxFree(uintptr_t addr, uintptr_t sz)
|
||||
addr &= ~0xffffffffLL;
|
||||
} while(1);
|
||||
}
|
||||
void* find32bitBlock(size_t size)
|
||||
{
|
||||
return findBlockNearHint(LOWEST, size);
|
||||
}
|
||||
void* find47bitBlock(size_t size)
|
||||
void* internalFind47bitBlock(size_t size)
|
||||
{
|
||||
// slow iterative search... Would need something better one day
|
||||
uintptr_t addr = 0x100000000LL;
|
||||
pthread_mutex_lock(&mutex_prot);
|
||||
do {
|
||||
addr = nextFree(addr);
|
||||
uintptr_t sz = maxFree(addr, size);
|
||||
if(sz>=size) {
|
||||
pthread_mutex_unlock(&mutex_prot);
|
||||
return (void*)addr;
|
||||
}
|
||||
addr += sz;
|
||||
@ -1058,51 +1040,47 @@ void* find47bitBlock(size_t size)
|
||||
addr = nextFree(addr);
|
||||
uintptr_t sz = maxFree(addr, size);
|
||||
if(sz>=size) {
|
||||
pthread_mutex_unlock(&mutex_prot);
|
||||
return (void*)addr;
|
||||
}
|
||||
addr += sz;
|
||||
} while(addr<0x100000000LL);
|
||||
pthread_mutex_unlock(&mutex_prot);
|
||||
printf_log(LOG_NONE, "Warning: cannot find a 0x%zx block in 47bits address space\n", size);
|
||||
return NULL;
|
||||
}
|
||||
void* find47bitBlockNearHint(void* hint, size_t size)
|
||||
void* internalFind47bitBlockNearHint(void* hint, size_t size)
|
||||
{
|
||||
// slow iterative search... Would need something better one day
|
||||
uintptr_t addr = (uintptr_t)hint;
|
||||
pthread_mutex_lock(&mutex_prot);
|
||||
do {
|
||||
addr = nextFree(addr);
|
||||
uintptr_t sz = maxFree(addr, size);
|
||||
if(sz>=size) {
|
||||
pthread_mutex_unlock(&mutex_prot);
|
||||
return (void*)addr;
|
||||
}
|
||||
addr += sz;
|
||||
} while(addr<0x800000000000LL);
|
||||
pthread_mutex_unlock(&mutex_prot);
|
||||
printf_log(LOG_NONE, "Warning: cannot find a 0x%zx block in 32bits address space\n", size);
|
||||
return NULL;
|
||||
}
|
||||
void* findBlockNearHint(void* hint, size_t size)
|
||||
void* internalFindBlockNearHint(void* hint, size_t size)
|
||||
{
|
||||
// slow iterative search... Would need something better one day
|
||||
uintptr_t addr = (uintptr_t)hint;
|
||||
pthread_mutex_lock(&mutex_prot);
|
||||
do {
|
||||
addr = nextFree(addr);
|
||||
uintptr_t sz = maxFree(addr, size);
|
||||
if(sz>=size) {
|
||||
pthread_mutex_unlock(&mutex_prot);
|
||||
return (void*)addr;
|
||||
}
|
||||
addr += sz;
|
||||
} while(addr<0x100000000LL);
|
||||
pthread_mutex_unlock(&mutex_prot);
|
||||
printf_log(LOG_NONE, "Warning: cannot find a 0x%zx block in 32bits address space\n", size);
|
||||
return NULL;
|
||||
}
|
||||
void* internalFind32bitBlock(size_t size)
|
||||
{
|
||||
return internalFindBlockNearHint(LOWEST, size);
|
||||
}
|
||||
#undef LOWEST
|
||||
|
||||
int unlockCustommemMutex()
|
||||
@ -1115,7 +1093,6 @@ int unlockCustommemMutex()
|
||||
ret|=(1<<B); \
|
||||
}
|
||||
GO(mutex_blocks, 0)
|
||||
GO(mutex_prot, 1)
|
||||
#ifdef DYNAREC
|
||||
GO(mutex_mmap, 2)
|
||||
#endif
|
||||
@ -1130,7 +1107,6 @@ void relockCustommemMutex(int locks)
|
||||
pthread_mutex_lock(&A); \
|
||||
|
||||
GO(mutex_blocks, 0)
|
||||
GO(mutex_prot, 1)
|
||||
#ifdef DYNAREC
|
||||
GO(mutex_mmap, 2)
|
||||
#endif
|
||||
@ -1143,7 +1119,6 @@ static void init_mutexes(void)
|
||||
pthread_mutexattr_init(&attr);
|
||||
pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_ERRORCHECK);
|
||||
pthread_mutex_init(&mutex_blocks, &attr);
|
||||
pthread_mutex_init(&mutex_prot, &attr);
|
||||
#ifdef DYNAREC
|
||||
pthread_mutex_init(&mutex_mmap, &attr);
|
||||
#endif
|
||||
@ -1254,6 +1229,5 @@ void fini_custommem_helper(box64context_t *ctx)
|
||||
free(p_blocks[i].block);
|
||||
#endif
|
||||
free(p_blocks);
|
||||
pthread_mutex_destroy(&mutex_prot);
|
||||
pthread_mutex_destroy(&mutex_blocks);
|
||||
}
|
||||
|
@ -88,7 +88,7 @@ arm64_lock_xchg:
|
||||
ret
|
||||
|
||||
arm64_lock_storeifnull:
|
||||
// address is x0, value is x1, x1 store to x0 only if [x0] is 0. return new [x0] value (so x1 or old value)
|
||||
// address is x0, value is x1, x1 store to x0 only if [x0] is 0, return new [x0] value (so x1 or old value)
|
||||
ldaxr x2, [x0]
|
||||
cbnz x2, arm64_lock_storeifnull_exit
|
||||
mov x2, x1
|
||||
@ -99,7 +99,7 @@ arm64_lock_storeifnull_exit:
|
||||
ret
|
||||
|
||||
arm64_lock_storeifref:
|
||||
// address is x0, value is x1, x1 store to x0 only if [x0] is x3. return new [x0] value (so x1 or old value)
|
||||
// address is x0, value is x1, x1 store to x0 only if [x0] is x2, return new [x0] value (so x1 or old value)
|
||||
ldaxr x3, [x0]
|
||||
cmp x2, x3
|
||||
bne arm64_lock_storeifref_exit
|
||||
|
@ -27,13 +27,13 @@ extern void arm64_lock_read_dq(uint64_t * a, uint64_t* b, void* addr);
|
||||
// STLXRD of ADDR, return 0 if ok, 1 if not
|
||||
extern int arm64_lock_write_dq(uint64_t a, uint64_t b, void* addr);
|
||||
|
||||
// Atomicaly exchange value at [p] with val, return old p
|
||||
// Atomically exchange value at [p] with val, return old p
|
||||
extern uintptr_t arm64_lock_xchg(void* p, uintptr_t val);
|
||||
|
||||
// Atomicaly store value to [p] only if [p] is NULL. Return new [p] value (so val or old)
|
||||
extern void* arm64_lock_storeifnull(void*p, void* val);
|
||||
// Atomically store val to [p] only if [p] is 0. Return new [p] value (so val or old)
|
||||
extern uint64_t arm64_lock_storeifnull(void* p, uint64_t val);
|
||||
|
||||
// Atomicaly store value to [p] only if [p] is ref. Return new [p] value (so val or old)
|
||||
extern void* arm64_lock_storeifref(void*p, void* val, void* ref);
|
||||
// Atomically store val to [p] only if [p] is ref. Return new [p] value (so val or old)
|
||||
extern uint64_t arm64_lock_storeifref(void* p, uint64_t val, uint64_t ref);
|
||||
|
||||
#endif //__ARM64_LOCK__H__
|
||||
#endif //__ARM64_LOCK__H__
|
||||
|
@ -82,7 +82,9 @@ void FreeDynablock(dynablock_t* db)
|
||||
// only the father free the DynarecMap
|
||||
if(!db->father) {
|
||||
dynarec_log(LOG_DEBUG, " -- FreeDyrecMap(%p, %d)\n", db->block, db->size);
|
||||
pthread_mutex_lock(&my_context->mutex_dyndump);
|
||||
FreeDynarecMap(db, (uintptr_t)db->block, db->size);
|
||||
pthread_mutex_unlock(&my_context->mutex_dyndump);
|
||||
}
|
||||
free(db->sons);
|
||||
free(db->instsize);
|
||||
@ -270,7 +272,7 @@ dynablock_t *AddNewDynablock(dynablocklist_t* dynablocks, uintptr_t addr, int* c
|
||||
|
||||
if(!dynablocks->direct) {
|
||||
dynablock_t** p = (dynablock_t**)calloc(dynablocks->textsz, sizeof(dynablock_t*));
|
||||
if(arm64_lock_storeifnull(&dynablocks->direct, p)!=p)
|
||||
if(arm64_lock_storeifnull(&dynablocks->direct, (uint64_t)p) != (uint64_t)p)
|
||||
free(p); // someone already create the direct array, too late...
|
||||
}
|
||||
|
||||
@ -279,7 +281,7 @@ dynablock_t *AddNewDynablock(dynablocklist_t* dynablocks, uintptr_t addr, int* c
|
||||
|
||||
block = (dynablock_t*)calloc(1, sizeof(dynablock_t));
|
||||
block->parent = dynablocks;
|
||||
dynablock_t* tmp = (dynablock_t*)arm64_lock_storeifnull(&dynablocks->direct[addr-dynablocks->text], block);
|
||||
dynablock_t* tmp = (dynablock_t*)arm64_lock_storeifnull(&dynablocks->direct[addr-dynablocks->text], (uint64_t)block);
|
||||
if(tmp != block) {
|
||||
// a block appeard!
|
||||
free(block);
|
||||
@ -332,10 +334,6 @@ static dynablock_t* internalDBGetBlock(x64emu_t* emu, uintptr_t addr, uintptr_t
|
||||
if(!created)
|
||||
return block; // existing block...
|
||||
|
||||
#if 0
|
||||
if(box64_dynarec_dump)
|
||||
pthread_mutex_lock(&my_context->mutex_dyndump);
|
||||
#endif
|
||||
// fill the block
|
||||
block->x64_addr = (void*)addr;
|
||||
pthread_mutex_lock(&my_context->mutex_dyndump);
|
||||
@ -347,18 +345,15 @@ static dynablock_t* internalDBGetBlock(x64emu_t* emu, uintptr_t addr, uintptr_t
|
||||
pthread_mutex_unlock(&my_context->mutex_dyndump);
|
||||
if(!ret) {
|
||||
dynarec_log(LOG_DEBUG, "Fillblock of block %p for %p returned an error\n", block, (void*)addr);
|
||||
void* old = (void*)arm64_lock_storeifref(&dynablocks->direct[addr-dynablocks->text], 0, block);
|
||||
if(old!=block && old) {// put it back in place, strange things are happening here!
|
||||
void* old = (void*)arm64_lock_storeifref(&dynablocks->direct[addr-dynablocks->text], 0, (uint64_t)block);
|
||||
if(old != block && old) {
|
||||
// put it back in place, strange things are happening here!
|
||||
dynarec_log(LOG_INFO, "Warning, a wild block appeared at %p: %p\n", (void*)addr, old);
|
||||
// doing nothing else, the block has not be writen
|
||||
// doing nothing else, the block has not been written
|
||||
}
|
||||
free(block);
|
||||
block = NULL;
|
||||
}
|
||||
#if 0
|
||||
if(box64_dynarec_dump)
|
||||
pthread_mutex_unlock(&my_context->mutex_dyndump);
|
||||
#endif
|
||||
// check size
|
||||
if(block && block->x64_size) {
|
||||
int blocksz = block->x64_size;
|
||||
@ -371,8 +366,7 @@ static dynablock_t* internalDBGetBlock(x64emu_t* emu, uintptr_t addr, uintptr_t
|
||||
dblist->maxsz = blocksz;
|
||||
}
|
||||
}
|
||||
lockDB();
|
||||
protectDBnolock((uintptr_t)block->x64_addr, block->x64_size);
|
||||
protectDB((uintptr_t)block->x64_addr, block->x64_size);
|
||||
// fill-in jumptable
|
||||
addJumpTableIfDefault64(block->x64_addr, block->block);
|
||||
for(int i=0; i<block->sons_size; ++i) {
|
||||
@ -380,7 +374,6 @@ static dynablock_t* internalDBGetBlock(x64emu_t* emu, uintptr_t addr, uintptr_t
|
||||
block->sons[i]->done = 1;
|
||||
}
|
||||
block->done = 1;
|
||||
unlockDB();
|
||||
}
|
||||
|
||||
dynarec_log(LOG_DEBUG, " --- DynaRec Block %s @%p:%p (%p, 0x%x bytes, with %d son(s))\n", created?"created":"recycled", (void*)addr, (void*)(addr+((block)?block->x64_size:0)), (block)?block->block:0, (block)?block->size:0, (block)?block->sons_size:0);
|
||||
@ -405,13 +398,11 @@ dynablock_t* DBGetBlock(x64emu_t* emu, uintptr_t addr, int create, dynablock_t**
|
||||
db = internalDBGetBlock(emu, addr, addr, create, *current);
|
||||
} else {
|
||||
father->need_test = 0;
|
||||
lockDB();
|
||||
protectDBnolock((uintptr_t)father->x64_addr, father->x64_size);
|
||||
protectDB((uintptr_t)father->x64_addr, father->x64_size);
|
||||
// fill back jumptable
|
||||
addJumpTableIfDefault64(father->x64_addr, father->block);
|
||||
for(int i=0; i<father->sons_size; ++i)
|
||||
addJumpTableIfDefault64(father->sons[i]->x64_addr, father->sons[i]->block);
|
||||
unlockDB();
|
||||
}
|
||||
}
|
||||
return db;
|
||||
@ -433,13 +424,11 @@ dynablock_t* DBAlternateBlock(x64emu_t* emu, uintptr_t addr, uintptr_t filladdr)
|
||||
db = internalDBGetBlock(emu, addr, filladdr, create, NULL);
|
||||
} else {
|
||||
father->need_test = 0;
|
||||
lockDB();
|
||||
protectDBnolock((uintptr_t)father->x64_addr, father->x64_size);
|
||||
protectDB((uintptr_t)father->x64_addr, father->x64_size);
|
||||
// fill back jumptable
|
||||
addJumpTableIfDefault64(father->x64_addr, father->block);
|
||||
for(int i=0; i<father->sons_size; ++i)
|
||||
addJumpTableIfDefault64(father->sons[i]->x64_addr, father->sons[i]->block);
|
||||
unlockDB();
|
||||
}
|
||||
}
|
||||
return db;
|
||||
|
@ -338,20 +338,23 @@ void* FillBlock64(dynablock_t* block, uintptr_t addr) {
|
||||
block->done = 1;
|
||||
return (void*)block;
|
||||
}
|
||||
// protect the 1st page
|
||||
protectDB(addr, 1);
|
||||
// init the helper
|
||||
dynarec_arm_t helper = {0};
|
||||
helper.start = addr;
|
||||
uintptr_t start = addr;
|
||||
uintptr_t end = arm_pass0(&helper, addr);
|
||||
if(!helper.size) {
|
||||
dynarec_log(LOG_DEBUG, "Warning, null-sized dynarec block (%p)\n", (void*)addr);
|
||||
if(!helper.size || !isprotectedDB(addr, 1)) {
|
||||
dynarec_log(LOG_DEBUG, "Warning, null-sized dynarec block (%p) or write on purge on pass0\n", (void*)addr);
|
||||
block->done = 1;
|
||||
return (void*)block;
|
||||
}
|
||||
helper.cap = helper.size+3; // needs epilog handling
|
||||
helper.insts = (instruction_arm64_t*)calloc(helper.cap, sizeof(instruction_arm64_t));
|
||||
// already protect the block and compute hash signature
|
||||
protectDB(addr, end-addr); //end is 1byte after actual end
|
||||
if((addr&~0xfff)!=(end&~0xfff)) // need to protect some other pages too
|
||||
protectDB(addr, end-addr); //end is 1byte after actual end
|
||||
uint32_t hash = X31_hash_code((void*)addr, end-addr);
|
||||
// pass 1, addresses, x64 jump addresses, flags
|
||||
arm_pass1(&helper, addr);
|
||||
@ -422,7 +425,15 @@ void* FillBlock64(dynablock_t* block, uintptr_t addr) {
|
||||
}
|
||||
printf_log(LOG_NONE, "Table64 \t%d -> %d\n", oldtable64size*8, helper.table64size*8);
|
||||
printf_log(LOG_NONE, " ------------\n");
|
||||
//TODO: Cancel block and return empty one
|
||||
|
||||
free(helper.insts);
|
||||
free(helper.next);
|
||||
free(helper.table64);
|
||||
free(helper.sons_x64);
|
||||
free(helper.sons_arm);
|
||||
FreeDynarecMap(block, (uintptr_t)p, sz);
|
||||
return NULL;
|
||||
|
||||
}
|
||||
// add table64 if needed
|
||||
if(helper.table64size) {
|
||||
@ -453,7 +464,7 @@ void* FillBlock64(dynablock_t* block, uintptr_t addr) {
|
||||
block->x64_size = end-start;
|
||||
block->hash = X31_hash_code(block->x64_addr, block->x64_size);
|
||||
// Check if something changed, to abbort if it as
|
||||
if(block->hash != hash) {
|
||||
if(block->hash != hash || !isprotectedDB(addr, end-addr)) {
|
||||
dynarec_log(LOG_INFO, "Warning, a block changed while beeing processed hash(%p:%ld)=%x/%x\n", block->x64_addr, block->x64_size, block->hash, hash);
|
||||
free(helper.sons_x64);
|
||||
free(helper.sons_arm);
|
||||
|
@ -38,6 +38,7 @@ uintptr_t getJumpTableAddress64(uintptr_t addr);
|
||||
#define PROT_ALLOC 0x40
|
||||
#define PROT_CUSTOM (PROT_DYNAREC|PROT_ALLOC)
|
||||
|
||||
// Implemented in mailbox.c
|
||||
void updateProtection(uintptr_t addr, size_t size, uint32_t prot);
|
||||
void setProtection(uintptr_t addr, size_t size, uint32_t prot);
|
||||
void freeProtection(uintptr_t addr, size_t size);
|
||||
@ -45,10 +46,8 @@ uint32_t getProtection(uintptr_t addr);
|
||||
void loadProtectionFromMap();
|
||||
#ifdef DYNAREC
|
||||
void protectDB(uintptr_t addr, size_t size);
|
||||
void protectDBnolock(uintptr_t addr, size_t size);
|
||||
void unprotectDB(uintptr_t addr, size_t size);
|
||||
void lockDB();
|
||||
void unlockDB();
|
||||
int isprotectedDB(uintptr_t addr, size_t size);
|
||||
#endif
|
||||
void* find32bitBlock(size_t size);
|
||||
void* findBlockNearHint(void* hint, size_t size);
|
||||
|
@ -1,6 +1,8 @@
|
||||
#ifndef __DEBUG_H_
|
||||
#define __DEBUG_H_
|
||||
#ifndef __DEBUG_H__
|
||||
#define __DEBUG_H__
|
||||
|
||||
#include <stdint.h>
|
||||
#include <stdio.h>
|
||||
|
||||
typedef struct box64context_s box64context_t;
|
||||
extern int box64_log; // log level
|
||||
@ -61,4 +63,4 @@ extern FILE* ftrace;
|
||||
#define EXPORTDYN
|
||||
#endif
|
||||
|
||||
#endif //__DEBUG_H_
|
||||
#endif //__DEBUG_H__
|
||||
|
@ -310,8 +310,8 @@ uint64_t RunFunctionHandler(int* exit, x64_ucontext_t* sigcontext, uintptr_t fnc
|
||||
int oldquitonlongjmp = emu->quitonlongjmp;
|
||||
emu->quitonlongjmp = 2;
|
||||
|
||||
//EmuCall(emu, fnc); // avoid DynaCall for now
|
||||
DynaCall(emu, fnc);
|
||||
EmuCall(emu, fnc); // avoid DynaCall for now
|
||||
//DynaCall(emu, fnc);
|
||||
if(nargs>6)
|
||||
R_RSP+=((nargs-6)*sizeof(void*));
|
||||
|
||||
@ -736,8 +736,8 @@ void my_box64signalhandler(int32_t sig, siginfo_t* info, void * ucntx)
|
||||
int Locks = unlockMutex();
|
||||
uint32_t prot = getProtection((uintptr_t)addr);
|
||||
#ifdef DYNAREC
|
||||
if((Locks & (1<<8)) && (sig==SIGSEGV)) //1<<8 is mutex_dyndump
|
||||
cancelFillBlock(); // Segfault inside a Fillblock, just cancel it's creation, don't relock mutex
|
||||
if((Locks & (1<<8)) && (sig==SIGSEGV)) //1<<8 is building_dynablock
|
||||
cancelFillBlock(); // Segfault inside a Fillblock, just cancel it's creation, don't relock mutexes
|
||||
dynablock_t* db = NULL;
|
||||
int db_searched = 0;
|
||||
if ((sig==SIGSEGV) && (addr) && (info->si_code == SEGV_ACCERR) && (prot&PROT_DYNAREC)) {
|
||||
@ -807,6 +807,7 @@ void my_box64signalhandler(int32_t sig, siginfo_t* info, void * ucntx)
|
||||
glitch_pc = pc;
|
||||
glitch_addr = addr;
|
||||
glitch_prot = prot;
|
||||
cleanDBFromAddressRange(((uintptr_t)addr)&~0xfff, 0x1000, 0);
|
||||
relockMutex(Locks);
|
||||
return; // try again
|
||||
}
|
||||
|
328
src/mailbox.c
Normal file
328
src/mailbox.c
Normal file
@ -0,0 +1,328 @@
|
||||
#include "mailbox.h"
|
||||
|
||||
#include <semaphore.h>
|
||||
#include <stddef.h>
|
||||
#include <stdint.h>
|
||||
#include <stdlib.h>
|
||||
|
||||
#include "custommem.h"
|
||||
#include "debug.h"
|
||||
|
||||
//#define DEBUG_MAILBOX
|
||||
|
||||
// === REQUESTS ===
|
||||
|
||||
typedef enum available_bits_s {
|
||||
AVAILABLE_BITS_32 = 32,
|
||||
AVAILABLE_BITS_47 = 47,
|
||||
AVAILABLE_BITS_MAX = 0,
|
||||
} available_bits_e;
|
||||
|
||||
typedef struct request_s {
|
||||
enum request_type_e {
|
||||
REQUEST_HALT,
|
||||
REQUEST_UPD_PROTECT,
|
||||
REQUEST_SET_PROTECT,
|
||||
REQUEST_FRE_PROTECT,
|
||||
REQUEST_GET_PROTECT,
|
||||
REQUEST_LOD_PROTECT,
|
||||
#ifdef DYNAREC
|
||||
REQUEST_PROT_DB,
|
||||
REQUEST_UNPROT_DB,
|
||||
REQUEST_ISPROT_DB,
|
||||
#endif
|
||||
REQUEST_FIND,
|
||||
REQUEST_FIND_HINT,
|
||||
} type;
|
||||
sem_t *answered;
|
||||
void *ansBuf;
|
||||
union {
|
||||
struct {
|
||||
uintptr_t addr;
|
||||
} protect;
|
||||
struct {
|
||||
uintptr_t addr;
|
||||
size_t size;
|
||||
} size_protect;
|
||||
struct {
|
||||
uintptr_t addr;
|
||||
size_t size;
|
||||
uint32_t prot;
|
||||
} prot_protect;
|
||||
struct {
|
||||
available_bits_e availablebits;
|
||||
uintptr_t size;
|
||||
} find;
|
||||
struct {
|
||||
available_bits_e availablebits;
|
||||
uintptr_t size;
|
||||
void* hint;
|
||||
} find_near;
|
||||
} data;
|
||||
#ifdef DEBUG_MAILBOX
|
||||
int reqno;
|
||||
#endif
|
||||
} request_t;
|
||||
|
||||
request_t curHdr;
|
||||
sem_t empty;
|
||||
sem_t full;
|
||||
sem_t finished;
|
||||
|
||||
static void sendRequest(request_t *rhdr, void *ans) {
|
||||
#ifdef DEBUG_MAILBOX
|
||||
static int reqno = 0;
|
||||
rhdr->reqno = ++reqno;
|
||||
printf_log(LOG_DEBUG, "[REQ%05u] Sending request %02X (ans=%p)\n", rhdr->reqno, rhdr->type, ans);
|
||||
#endif
|
||||
rhdr->ansBuf = ans;
|
||||
if (ans) {
|
||||
sem_t answered;
|
||||
sem_init(&answered, 0, 0);
|
||||
rhdr->answered = &answered;
|
||||
#ifdef DEBUG_MAILBOX
|
||||
printf_log(LOG_DEBUG, "[REQ%05u] Will wait for answer\n", rhdr->reqno);
|
||||
#endif
|
||||
sem_wait(&empty);
|
||||
curHdr = *rhdr;
|
||||
sem_post(&full);
|
||||
sem_wait(&answered);
|
||||
} else {
|
||||
sem_wait(&empty);
|
||||
curHdr = *rhdr;
|
||||
sem_post(&full);
|
||||
}
|
||||
#ifdef DEBUG_MAILBOX
|
||||
printf_log(LOG_DEBUG, "[REQ%05u] Done, leaving\n", rhdr->reqno);
|
||||
#endif
|
||||
}
|
||||
|
||||
// A function per request (avoids exposing dangerous internal functions)
|
||||
void updateProtection(uintptr_t addr, size_t size, uint32_t prot) {
|
||||
request_t req;
|
||||
req.type = REQUEST_UPD_PROTECT;
|
||||
req.data.prot_protect.addr = addr;
|
||||
req.data.prot_protect.size = size;
|
||||
req.data.prot_protect.prot = prot;
|
||||
void *none = NULL;
|
||||
sendRequest(&req, &none); // Need to wait for completion
|
||||
}
|
||||
void setProtection(uintptr_t addr, size_t size, uint32_t prot) {
|
||||
request_t req;
|
||||
req.type = REQUEST_SET_PROTECT;
|
||||
req.data.prot_protect.addr = addr;
|
||||
req.data.prot_protect.size = size;
|
||||
req.data.prot_protect.prot = prot;
|
||||
sendRequest(&req, NULL);
|
||||
}
|
||||
void freeProtection(uintptr_t addr, size_t size) {
|
||||
request_t req;
|
||||
req.type = REQUEST_FRE_PROTECT;
|
||||
req.data.size_protect.addr = addr;
|
||||
req.data.size_protect.size = size;
|
||||
void *none = NULL;
|
||||
sendRequest(&req, &none); // Need to wait for completion
|
||||
}
|
||||
uint32_t getProtection(uintptr_t addr) {
|
||||
request_t req;
|
||||
req.type = REQUEST_GET_PROTECT;
|
||||
req.data.protect.addr = addr;
|
||||
uint32_t ret;
|
||||
sendRequest(&req, &ret);
|
||||
return ret;
|
||||
}
|
||||
void loadProtectionFromMap() {
|
||||
request_t req;
|
||||
req.type = REQUEST_LOD_PROTECT;
|
||||
void *none = NULL;
|
||||
sendRequest(&req, &none); // Need to wait for completion
|
||||
}
|
||||
#ifdef DYNAREC
|
||||
void protectDB(uintptr_t addr, size_t size) {
|
||||
request_t req;
|
||||
req.type = REQUEST_PROT_DB;
|
||||
req.data.size_protect.addr = addr;
|
||||
req.data.size_protect.size = size;
|
||||
void *none = NULL;
|
||||
sendRequest(&req, &none); // Need to wait for completion
|
||||
}
|
||||
void unprotectDB(uintptr_t addr, size_t size) {
|
||||
request_t req;
|
||||
req.type = REQUEST_UNPROT_DB;
|
||||
req.data.size_protect.addr = addr;
|
||||
req.data.size_protect.size = size;
|
||||
void *none = NULL;
|
||||
sendRequest(&req, &none); // Need to wait for completion
|
||||
}
|
||||
int isprotectedDB(uintptr_t addr, size_t size) {
|
||||
request_t req;
|
||||
req.type = REQUEST_ISPROT_DB;
|
||||
req.data.size_protect.addr = addr;
|
||||
req.data.size_protect.size = size;
|
||||
int ret;
|
||||
sendRequest(&req, &ret);
|
||||
return ret;
|
||||
}
|
||||
#endif
|
||||
void* find32bitBlock(size_t size) {
|
||||
request_t req;
|
||||
req.type = REQUEST_FIND;
|
||||
req.data.find.size = size;
|
||||
req.data.find.availablebits = AVAILABLE_BITS_32;
|
||||
void* ret;
|
||||
sendRequest(&req, &ret);
|
||||
return ret;
|
||||
}
|
||||
void* findBlockNearHint(void* hint, size_t size) {
|
||||
request_t req;
|
||||
req.type = REQUEST_FIND_HINT;
|
||||
req.data.find_near.size = size;
|
||||
req.data.find_near.availablebits = AVAILABLE_BITS_MAX;
|
||||
req.data.find_near.hint = hint;
|
||||
void* ret;
|
||||
sendRequest(&req, &ret);
|
||||
return ret;
|
||||
}
|
||||
void* find47bitBlock(size_t size) {
|
||||
request_t req;
|
||||
req.type = REQUEST_FIND;
|
||||
req.data.find.size = size;
|
||||
req.data.find.availablebits = AVAILABLE_BITS_47;
|
||||
void* ret;
|
||||
sendRequest(&req, &ret);
|
||||
return ret;
|
||||
}
|
||||
void* find47bitBlockNearHint(void* hint, size_t size) {
|
||||
request_t req;
|
||||
req.type = REQUEST_FIND_HINT;
|
||||
req.data.find_near.size = size;
|
||||
req.data.find_near.availablebits = AVAILABLE_BITS_47;
|
||||
req.data.find_near.hint = hint;
|
||||
void* ret;
|
||||
sendRequest(&req, &ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
// === MAILBOX ===
|
||||
|
||||
void internalUpdateProtection(uintptr_t addr, size_t size, uint32_t prot);
|
||||
void internalSetProtection(uintptr_t addr, size_t size, uint32_t prot);
|
||||
void internalFreeProtection(uintptr_t addr, size_t size);
|
||||
uint32_t internalGetProtection(uintptr_t addr);
|
||||
void internalLoadProtectionFromMap();
|
||||
#ifdef DYNAREC
|
||||
void internalProtectDB(uintptr_t addr, size_t size);
|
||||
void internalUnprotectDB(uintptr_t addr, size_t size);
|
||||
int internalIsprotectedDB(uintptr_t addr, size_t size);
|
||||
#endif
|
||||
void* internalFind32bitBlock(size_t size);
|
||||
void* internalFindBlockNearHint(void* hint, size_t size);
|
||||
void* internalFind47bitBlock(size_t size);
|
||||
void* internalFind47bitBlockNearHint(void* hint, size_t size);
|
||||
|
||||
void *parse_find_req(request_t *req) {
|
||||
switch (req->data.find.availablebits) {
|
||||
case AVAILABLE_BITS_32:
|
||||
return internalFind32bitBlock(req->data.find.size);
|
||||
case AVAILABLE_BITS_47:
|
||||
return internalFind47bitBlock(req->data.find.size);
|
||||
}
|
||||
__builtin_unreachable();
|
||||
}
|
||||
void *parse_find_near_req(request_t *req) {
|
||||
switch (req->data.find_near.availablebits) {
|
||||
case AVAILABLE_BITS_MAX:
|
||||
return internalFindBlockNearHint(req->data.find_near.hint, req->data.find_near.size);
|
||||
case AVAILABLE_BITS_47:
|
||||
return internalFind47bitBlockNearHint(req->data.find_near.hint, req->data.find_near.size);
|
||||
}
|
||||
__builtin_unreachable();
|
||||
}
|
||||
|
||||
static void runMailbox() {
|
||||
sem_init(&empty, 0, 1);
|
||||
sem_init(&full, 0, 0);
|
||||
|
||||
while (1) {
|
||||
sem_wait(&full);
|
||||
#ifdef DEBUG_MAILBOX
|
||||
printf_log(LOG_DEBUG, "[ WORKER ] Received request %05u (%02X) (ans=%p)\n", curHdr.reqno, curHdr.type, curHdr.ansBuf);
|
||||
#endif
|
||||
switch (curHdr.type) {
|
||||
case REQUEST_HALT:
|
||||
return;
|
||||
|
||||
case REQUEST_UPD_PROTECT:
|
||||
internalUpdateProtection(curHdr.data.prot_protect.addr, curHdr.data.prot_protect.size, curHdr.data.prot_protect.prot);
|
||||
break;
|
||||
case REQUEST_SET_PROTECT:
|
||||
internalSetProtection(curHdr.data.prot_protect.addr, curHdr.data.prot_protect.size, curHdr.data.prot_protect.prot);
|
||||
break;
|
||||
case REQUEST_FRE_PROTECT:
|
||||
internalFreeProtection(curHdr.data.size_protect.addr, curHdr.data.size_protect.size);
|
||||
break;
|
||||
case REQUEST_GET_PROTECT:
|
||||
*(uintptr_t*)curHdr.ansBuf = internalGetProtection(curHdr.data.protect.addr);
|
||||
break;
|
||||
case REQUEST_LOD_PROTECT:
|
||||
internalLoadProtectionFromMap();
|
||||
break;
|
||||
#ifdef DYNAREC
|
||||
case REQUEST_PROT_DB:
|
||||
internalProtectDB(curHdr.data.size_protect.addr, curHdr.data.size_protect.size);
|
||||
break;
|
||||
case REQUEST_UNPROT_DB:
|
||||
internalUnprotectDB(curHdr.data.size_protect.addr, curHdr.data.size_protect.size);
|
||||
break;
|
||||
case REQUEST_ISPROT_DB:
|
||||
*(intptr_t*)curHdr.ansBuf = internalIsprotectedDB(curHdr.data.size_protect.addr, curHdr.data.size_protect.size);
|
||||
break;
|
||||
#endif
|
||||
case REQUEST_FIND:
|
||||
*(void**)curHdr.ansBuf = parse_find_req(&curHdr);
|
||||
break;
|
||||
case REQUEST_FIND_HINT:
|
||||
*(void**)curHdr.ansBuf = parse_find_near_req(&curHdr);
|
||||
break;
|
||||
#ifdef DEBUG_MAILBOX
|
||||
default:
|
||||
printf_log(LOG_DEBUG, "[ WORKER ] AAAAAAA %X\n", curHdr.type);
|
||||
#endif
|
||||
}
|
||||
if (curHdr.ansBuf) {
|
||||
#ifdef DEBUG_MAILBOX
|
||||
printf_log(LOG_DEBUG, "[ WORKER ] Answering %X to REQ%05u\n", *(uintptr_t*)curHdr.ansBuf, curHdr.reqno);
|
||||
#endif
|
||||
sem_post(curHdr.answered);
|
||||
}
|
||||
sem_post(&empty);
|
||||
#ifdef DEBUG_MAILBOX
|
||||
printf_log(LOG_DEBUG, "[ WORKER ] Waiting for next request\n");
|
||||
#endif
|
||||
}
|
||||
|
||||
sem_destroy(&empty);
|
||||
sem_destroy(&full);
|
||||
}
|
||||
|
||||
#include <pthread.h>
|
||||
#include <signal.h>
|
||||
pthread_t thread;
|
||||
static void *mailboxRoutine(void *arg) {
|
||||
sigset_t sigset;
|
||||
sigfillset(&sigset);
|
||||
pthread_sigmask(SIG_BLOCK, &sigset, NULL);
|
||||
runMailbox();
|
||||
return NULL;
|
||||
}
|
||||
|
||||
// Declared in main.c and threads.c
|
||||
void startMailbox(void) {
|
||||
pthread_create(&thread, NULL, mailboxRoutine, NULL);
|
||||
}
|
||||
void haltMailbox(void) {
|
||||
request_t hltreq;
|
||||
hltreq.type = REQUEST_HALT;
|
||||
sendRequest(&hltreq, NULL);
|
||||
pthread_join(thread, NULL);
|
||||
}
|
23
src/mailbox.h
Normal file
23
src/mailbox.h
Normal file
@ -0,0 +1,23 @@
|
||||
#ifndef __MAILBOX_H__
|
||||
#define __MAILBOX_H__
|
||||
|
||||
#include <stddef.h>
|
||||
#include <stdint.h>
|
||||
typedef struct dynablock_s dynablock_t;
|
||||
|
||||
//void updateProtection(uintptr_t addr, size_t size, uint32_t prot);
|
||||
//void setProtection(uintptr_t addr, size_t size, uint32_t prot);
|
||||
//void freeProtection(uintptr_t addr, size_t size);
|
||||
//uint32_t getProtection(uintptr_t addr);
|
||||
//void loadProtectionFromMap();
|
||||
//#ifdef DYNAREC
|
||||
//void protectDB(uintptr_t addr, size_t size);
|
||||
//void unprotectDB(uintptr_t addr, size_t size);
|
||||
//int isprotectedDB(uintptr_t addr, size_t size);
|
||||
//#endif
|
||||
//void* find32bitBlock(size_t size);
|
||||
//void* findBlockNearHint(void* hint, size_t size);
|
||||
//void* find47bitBlock(size_t size);
|
||||
//void* find47bitBlockNearHint(void* hint, size_t size);
|
||||
|
||||
#endif // __MAILBOX_H__
|
@ -32,6 +32,9 @@
|
||||
#include "x64run.h"
|
||||
#include "symbols.h"
|
||||
|
||||
void startMailbox(void);
|
||||
void haltMailbox(void);
|
||||
|
||||
box64context_t *my_context = NULL;
|
||||
int box64_log = LOG_INFO; //LOG_NONE;
|
||||
int box64_dump = 0;
|
||||
@ -711,6 +714,7 @@ void endBox64()
|
||||
int running = 1;
|
||||
int attempt = 0;
|
||||
printf_log(LOG_DEBUG, "Waiting for all threads to finish before unloading box64context\n");
|
||||
haltMailbox();
|
||||
while(running) {
|
||||
DIR *proc_dir;
|
||||
char dirname[100];
|
||||
@ -743,7 +747,9 @@ void endBox64()
|
||||
}
|
||||
}
|
||||
// all done, free context
|
||||
startMailbox(); // Need to restart mailbox (to use protections)
|
||||
FreeBox64Context(&my_context);
|
||||
haltMailbox();
|
||||
if(libGL) {
|
||||
free(libGL);
|
||||
libGL = NULL;
|
||||
@ -802,6 +808,9 @@ int main(int argc, const char **argv, const char **env) {
|
||||
}
|
||||
if(!box64_nobanner)
|
||||
PrintBox64Version();
|
||||
|
||||
startMailbox();
|
||||
|
||||
// precheck, for win-preload
|
||||
if(strstr(prog, "wine-preloader")==(prog+strlen(prog)-strlen("wine-preloader"))
|
||||
|| strstr(prog, "wine64-preloader")==(prog+strlen(prog)-strlen("wine64-preloader"))) {
|
||||
|
Loading…
Reference in New Issue
Block a user