mirror of
https://github.com/ptitSeb/box64.git
synced 2025-02-17 04:49:17 +00:00
Added a new option BOX64_MMAP32 to use 32bits mapping on external MMAP (help Snapdragon device running Vulkan with Wine/Wow64, active by default on SD845/SD888/SD8G2 profiles)
This commit is contained in:
parent
89da530898
commit
bc852aebeb
@ -104,6 +104,11 @@ Call XInitThreads when loading X11. (This is mostly for old Loki games with the
|
||||
* 0 : Don't force call XInitThreads. (Default.)
|
||||
* 1 : Call XInitThreads as soon as libX11 is loaded.
|
||||
|
||||
#### BOX64_MMAP32 *
|
||||
Will use 32bits address in priority for external MMAP (when 32bits process are detected)
|
||||
* 0 : Use regular mmap (default, except for Snapdragron build)
|
||||
* 1 : Use 32bits address space mmap in priority for external mmap as soon a 32bits process are detected (default for Snapdragon build)
|
||||
|
||||
#### BOX64_X11GLX *
|
||||
Force libX11's GLX extension to be present.
|
||||
* 0 : Do not force libX11's GLX extension to be present.
|
||||
|
@ -6,6 +6,7 @@
|
||||
#include <signal.h>
|
||||
#include <pthread.h>
|
||||
#include <errno.h>
|
||||
#include <syscall.h>
|
||||
|
||||
#include "box64context.h"
|
||||
#include "elfloader.h"
|
||||
@ -374,7 +375,7 @@ void* customMalloc(size_t size)
|
||||
}
|
||||
size_t allocsize = (fullsize>MMAPSIZE)?fullsize:MMAPSIZE;
|
||||
#ifdef USE_MMAP
|
||||
void* p = mmap(NULL, allocsize, PROT_READ|PROT_WRITE, MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
|
||||
void* p = internal_mmap(NULL, allocsize, PROT_READ|PROT_WRITE, MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
|
||||
memset(p, 0, allocsize);
|
||||
#else
|
||||
void* p = box_calloc(1, allocsize);
|
||||
@ -571,7 +572,7 @@ uintptr_t AllocDynarecMap(size_t size)
|
||||
}
|
||||
mprotect(p, allocsize, PROT_READ | PROT_WRITE | PROT_EXEC);
|
||||
#else
|
||||
void* p = mmap(NULL, allocsize, PROT_READ|PROT_WRITE|PROT_EXEC, MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
|
||||
void* p = internal_mmap(NULL, allocsize, PROT_READ|PROT_WRITE|PROT_EXEC, MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
|
||||
if(p==(void*)-1) {
|
||||
dynarec_log(LOG_INFO, "Cannot create dynamic map of %zu bytes\n", allocsize);
|
||||
return 0;
|
||||
@ -1390,7 +1391,7 @@ void reserveHighMem()
|
||||
int prot;
|
||||
while (bend!=0xffffffffffffffffLL) {
|
||||
if(!rb_get_end(mapallmem, cur, &prot, &bend)) {
|
||||
void* ret = mmap64((void*)cur, bend-cur, 0, MAP_ANONYMOUS|MAP_FIXED|MAP_PRIVATE|MAP_NORESERVE, -1, 0);
|
||||
void* ret = internal_mmap((void*)cur, bend-cur, 0, MAP_ANONYMOUS|MAP_FIXED|MAP_PRIVATE|MAP_NORESERVE, -1, 0);
|
||||
printf_log(LOG_DEBUG, "Reserve %p-%p => %p (%s)\n", (void*)cur, bend, ret, strerror(errno));
|
||||
printf_log(LOG_DEBUG, "mmap %p-%p\n", cur, bend);
|
||||
if(ret!=(void*)-1) {
|
||||
@ -1485,7 +1486,7 @@ void fini_custommem_helper(box64context_t *ctx)
|
||||
for (int i=0; i<NCHUNK; ++i) {
|
||||
if(head->chunks[i].block)
|
||||
#ifdef USE_MMAP
|
||||
munmap(head->chunks[i].block, head->chunks[i].size);
|
||||
internal_munmap(head->chunks[i].block, head->chunks[i].size);
|
||||
#else
|
||||
box_free(head->chunks[i].block);
|
||||
#endif
|
||||
@ -1531,7 +1532,7 @@ void fini_custommem_helper(box64context_t *ctx)
|
||||
|
||||
for(int i=0; i<n_blocks; ++i)
|
||||
#ifdef USE_MMAP
|
||||
munmap(p_blocks[i].block, p_blocks[i].size);
|
||||
internal_munmap(p_blocks[i].block, p_blocks[i].size);
|
||||
#else
|
||||
box_free(p_blocks[i].block);
|
||||
#endif
|
||||
@ -1558,3 +1559,39 @@ int isLockAddress(uintptr_t addr)
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
void* internal_mmap(void *addr, unsigned long length, int prot, int flags, int fd, ssize_t offset)
|
||||
{
|
||||
void* ret = (void*)syscall(__NR_mmap, addr, length, prot, flags, fd, offset);
|
||||
return ret;
|
||||
}
|
||||
int internal_munmap(void* addr, unsigned long length)
|
||||
{
|
||||
int ret = syscall(__NR_munmap, addr, length);
|
||||
return ret;
|
||||
}
|
||||
|
||||
void* my_mmap64(x64emu_t* emu, void *addr, unsigned long length, int prot, int flags, int fd, ssize_t offset);
|
||||
|
||||
extern int running32bits;
|
||||
EXPORT void* mmap64(void *addr, unsigned long length, int prot, int flags, int fd, ssize_t offset)
|
||||
{
|
||||
void* ret;
|
||||
if(running32bits && box64_mmap32 && !addr)
|
||||
ret = my_mmap64(NULL, addr, length, prot, flags | 0x40, fd, offset);
|
||||
else
|
||||
ret = internal_mmap(addr, length, prot, flags, fd, offset);
|
||||
if(ret!=MAP_FAILED && mapallmem)
|
||||
setProtection((uintptr_t)ret, length, prot);
|
||||
return ret;
|
||||
}
|
||||
EXPORT void* mmap(void *addr, unsigned long length, int prot, int flags, int fd, ssize_t offset) __attribute__((alias("mmap64")));
|
||||
|
||||
EXPORT int munmap(void* addr, unsigned long length)
|
||||
{
|
||||
int ret = internal_munmap(addr, length);
|
||||
if(!ret && mapallmem) {
|
||||
freeProtection((uintptr_t)addr, length);
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
@ -180,6 +180,9 @@ void DynaRun(x64emu_t* emu)
|
||||
dynarec_log(LOG_DEBUG, "%04d|Running DynaRec Block @%p (%p) of %d x64 insts (hash=0x%x) emu=%p\n", GetTID(), (void*)R_RIP, block->block, block->isize, block->hash, emu);
|
||||
// block is here, let's run it!
|
||||
native_prolog(emu, block->block);
|
||||
extern int running32bits;
|
||||
if(emu->segs[_CS]==0x23)
|
||||
running32bits = 1;
|
||||
}
|
||||
if(emu->fork) {
|
||||
int forktype = emu->fork;
|
||||
|
@ -31,9 +31,6 @@
|
||||
#warning Architecture cannot follow SSE Flush to 0 flag
|
||||
#endif
|
||||
|
||||
// from src/wrapped/wrappedlibc.c
|
||||
int my_munmap(x64emu_t* emu, void* addr, unsigned long length);
|
||||
|
||||
typedef struct cleanup_s {
|
||||
void* f;
|
||||
int arg;
|
||||
@ -194,8 +191,10 @@ void CallAllCleanup(x64emu_t *emu)
|
||||
|
||||
static void internalFreeX64(x64emu_t* emu)
|
||||
{
|
||||
if(emu && emu->stack2free)
|
||||
my_munmap(NULL, emu->stack2free, emu->size_stack);
|
||||
if(emu && emu->stack2free) {
|
||||
if(!internal_munmap(emu->stack2free, emu->size_stack))
|
||||
freeProtection((uintptr_t)emu->stack2free, emu->size_stack);
|
||||
}
|
||||
}
|
||||
|
||||
EXPORTDYN
|
||||
|
@ -31,6 +31,7 @@ int my_setcontext(x64emu_t* emu, void* ucp);
|
||||
#ifdef TEST_INTERPRETER
|
||||
int RunTest(x64test_t *test)
|
||||
#else
|
||||
int running32bits = 0;
|
||||
int Run(x64emu_t *emu, int step)
|
||||
#endif
|
||||
{
|
||||
@ -440,6 +441,8 @@ x64emurun:
|
||||
goto fini;
|
||||
}
|
||||
is32bits = (emu->segs[_CS]==0x23);
|
||||
if(is32bits)
|
||||
running32bits = 1;
|
||||
#endif
|
||||
break;
|
||||
case 0x65: /* GS: prefix */
|
||||
@ -456,6 +459,8 @@ x64emurun:
|
||||
goto fini;
|
||||
}
|
||||
is32bits = (emu->segs[_CS]==0x23);
|
||||
if(is32bits)
|
||||
running32bits = 1;
|
||||
#endif
|
||||
break;
|
||||
case 0x66: /* 16bits prefix */
|
||||
@ -1426,6 +1431,10 @@ x64emurun:
|
||||
R_RIP = addr;
|
||||
STEP;
|
||||
is32bits = (emu->segs[_CS]==0x23);
|
||||
#ifndef TEST_INTERPRETER
|
||||
if(is32bits)
|
||||
running32bits = 1;
|
||||
#endif
|
||||
break;
|
||||
case 0xD0: /* GRP2 Eb,1 */
|
||||
case 0xD2: /* GRP2 Eb,CL */
|
||||
@ -1941,6 +1950,10 @@ x64emurun:
|
||||
}
|
||||
STEP2;
|
||||
is32bits = (emu->segs[_CS]==0x23);
|
||||
#ifndef TEST_INTERPRETER
|
||||
if(is32bits)
|
||||
running32bits = 1;
|
||||
#endif
|
||||
}
|
||||
break;
|
||||
case 4: /* JMP NEAR Ed */
|
||||
@ -1967,6 +1980,10 @@ x64emurun:
|
||||
}
|
||||
STEP2;
|
||||
is32bits = (emu->segs[_CS]==0x23);
|
||||
#ifndef TEST_INTERPRETER
|
||||
if(is32bits)
|
||||
running32bits = 1;
|
||||
#endif
|
||||
}
|
||||
break;
|
||||
case 6: /* Push Ed */
|
||||
|
@ -112,4 +112,7 @@ void addLockAddress(uintptr_t addr); // add an address to the list of "LOCK"a
|
||||
int isLockAddress(uintptr_t addr); // return 1 is the address is used as a LOCK, 0 else
|
||||
#endif
|
||||
|
||||
void* internal_mmap(void *addr, unsigned long length, int prot, int flags, int fd, ssize_t offset);
|
||||
int internal_munmap(void* addr, unsigned long length);
|
||||
|
||||
#endif //__CUSTOM_MEM__H_
|
||||
|
@ -11,6 +11,7 @@ extern uintptr_t box64_pagesize;
|
||||
extern uintptr_t box64_load_addr;
|
||||
extern int box64_dynarec_test;
|
||||
extern int box64_maxcpu;
|
||||
extern int box64_mmap32;
|
||||
#ifdef DYNAREC
|
||||
extern int box64_dynarec_dump;
|
||||
extern int box64_dynarec_trace;
|
||||
|
@ -46,9 +46,6 @@ void _pthread_cleanup_pop(void* buffer, int exec);
|
||||
// it will be pthread_kill@GLIBC_2.17 on aarch64, but it's GLIBC_2.2.5 on x86_64
|
||||
static iFli_t real_phtread_kill_old = NULL;
|
||||
|
||||
// from src/wrapped/wrappedlibc.c
|
||||
void* my_mmap(x64emu_t* emu, void* addr, unsigned long length, int prot, int flags, int fd, int64_t offset);
|
||||
|
||||
typedef struct threadstack_s {
|
||||
void* stack;
|
||||
size_t stacksize;
|
||||
@ -203,7 +200,9 @@ x64emu_t* thread_get_emu()
|
||||
stacksize = stack_size;
|
||||
pthread_attr_destroy(&attr);
|
||||
}
|
||||
void* stack = my_mmap(NULL, NULL, stacksize, PROT_READ|PROT_WRITE, MAP_PRIVATE|MAP_ANONYMOUS|MAP_GROWSDOWN, -1, 0);
|
||||
void* stack = internal_mmap(NULL, stacksize, PROT_READ|PROT_WRITE, MAP_PRIVATE|MAP_ANONYMOUS|MAP_GROWSDOWN, -1, 0);
|
||||
if(stack!=MAP_FAILED)
|
||||
setProtection((uintptr_t)stack, stacksize, PROT_READ|PROT_WRITE);
|
||||
x64emu_t *emu = NewX64Emu(my_context, 0, (uintptr_t)stack, stacksize, 1);
|
||||
SetupX64Emu(emu, NULL);
|
||||
thread_set_emu(emu);
|
||||
@ -484,7 +483,9 @@ EXPORT int my_pthread_create(x64emu_t *emu, void* t, void* attr, void* start_rou
|
||||
stacksize = attr_stacksize;
|
||||
own = 0;
|
||||
} else {
|
||||
stack = my_mmap(NULL, NULL, stacksize, PROT_READ|PROT_WRITE, MAP_PRIVATE|MAP_ANONYMOUS|MAP_GROWSDOWN, -1, 0);
|
||||
stack = internal_mmap(NULL, stacksize, PROT_READ|PROT_WRITE, MAP_PRIVATE|MAP_ANONYMOUS|MAP_GROWSDOWN, -1, 0);
|
||||
if(stack!=MAP_FAILED)
|
||||
setProtection((uintptr_t)stack, stacksize, PROT_READ|PROT_WRITE);
|
||||
own = 1;
|
||||
}
|
||||
|
||||
@ -509,7 +510,9 @@ EXPORT int my_pthread_create(x64emu_t *emu, void* t, void* attr, void* start_rou
|
||||
void* my_prepare_thread(x64emu_t *emu, void* f, void* arg, int ssize, void** pet)
|
||||
{
|
||||
int stacksize = (ssize)?ssize:(2*1024*1024); //default stack size is 2Mo
|
||||
void* stack = my_mmap(NULL, NULL, stacksize, PROT_READ|PROT_WRITE, MAP_PRIVATE|MAP_ANONYMOUS|MAP_GROWSDOWN, -1, 0);
|
||||
void* stack = internal_mmap(NULL, stacksize, PROT_READ|PROT_WRITE, MAP_PRIVATE|MAP_ANONYMOUS|MAP_GROWSDOWN, -1, 0);
|
||||
if(stack!=MAP_FAILED)
|
||||
setProtection((uintptr_t)stack, stacksize, PROT_READ|PROT_WRITE);
|
||||
emuthread_t *et = (emuthread_t*)box_calloc(1, sizeof(emuthread_t));
|
||||
x64emu_t *emuthread = NewX64Emu(emu->context, (uintptr_t)f, (uintptr_t)stack, stacksize, 1);
|
||||
SetupX64Emu(emuthread, emu );
|
||||
|
16
src/main.c
16
src/main.c
@ -51,6 +51,11 @@ int box64_inprocessgpu = 0;
|
||||
int box64_malloc_hack = 0;
|
||||
int box64_dynarec_test = 0;
|
||||
int box64_maxcpu = 0;
|
||||
#if defined(SD845) || defined(SD888) || defined(SD8G2)
|
||||
int box64_mmap32 = 1;
|
||||
#else
|
||||
int box64_mmap32 = 0;
|
||||
#endif
|
||||
#ifdef DYNAREC
|
||||
int box64_dynarec = 1;
|
||||
int box64_dynarec_dump = 0;
|
||||
@ -964,6 +969,17 @@ void LoadLogEnv()
|
||||
printf_log(LOG_NONE, "Will not limit the number of cpu cores exposed\n");
|
||||
}
|
||||
}
|
||||
p = getenv("BOX64_MMAP32");
|
||||
if(p) {
|
||||
if(strlen(p)==1) {
|
||||
if(p[0]>='0' && p[0]<='0'+1)
|
||||
box64_mmap32 = p[0]-'0';
|
||||
}
|
||||
if(box64_mmap32)
|
||||
printf_log(LOG_INFO, "Will use 32bits address in priority for external MMAP (when 32bits process are detected)\n");
|
||||
else
|
||||
printf_log(LOG_INFO, "Will not use 32bits address in priority for external MMAP (when 32bits process are detected)\n");
|
||||
}
|
||||
box64_pagesize = sysconf(_SC_PAGESIZE);
|
||||
if(!box64_pagesize)
|
||||
box64_pagesize = 4096;
|
||||
|
@ -11,9 +11,7 @@
|
||||
#include "emu/x64emu_private.h"
|
||||
#include "emu/x64run_private.h"
|
||||
#include "auxval.h"
|
||||
|
||||
// from src/wrapped/wrappedlibc.c
|
||||
void* my_mmap(x64emu_t* emu, void* addr, unsigned long length, int prot, int flags, int fd, int64_t offset);
|
||||
#include "custommem.h"
|
||||
|
||||
EXPORTDYN
|
||||
int CalcStackSize(box64context_t *context)
|
||||
@ -24,11 +22,12 @@ int CalcStackSize(box64context_t *context)
|
||||
CalcStack(context->elfs[i], &context->stacksz, &context->stackalign);
|
||||
|
||||
//if (posix_memalign((void**)&context->stack, context->stackalign, context->stacksz)) {
|
||||
context->stack = my_mmap(NULL, NULL, context->stacksz, PROT_READ|PROT_WRITE, MAP_PRIVATE|MAP_ANONYMOUS|MAP_GROWSDOWN, -1, 0);
|
||||
context->stack = internal_mmap(NULL, context->stacksz, PROT_READ|PROT_WRITE, MAP_PRIVATE|MAP_ANONYMOUS|MAP_GROWSDOWN, -1, 0);
|
||||
if (context->stack==(void*)-1) {
|
||||
printf_log(LOG_NONE, "Cannot allocate aligned memory (0x%lx/0x%zx) for stack\n", context->stacksz, context->stackalign);
|
||||
return 1;
|
||||
}
|
||||
} else
|
||||
setProtection((uintptr_t)context->stack, context->stacksz, PROT_READ|PROT_WRITE);
|
||||
//memset(context->stack, 0, context->stacksz);
|
||||
printf_log(LOG_DEBUG, "Stack is @%p size=0x%lx align=0x%zx\n", context->stack, context->stacksz, context->stackalign);
|
||||
|
||||
|
@ -86,6 +86,7 @@ CENTRYBOOL(BOX64_NOSIGSEGV, no_sigsegv) \
|
||||
CENTRYBOOL(BOX64_NOSIGILL, no_sigill) \
|
||||
ENTRYBOOL(BOX64_SHOWSEGV, box64_showsegv) \
|
||||
ENTRYBOOL(BOX64_SHOWBT, box64_showbt) \
|
||||
ENTRYBOOL(BOX64_MMAP32, box64_mmap32) \
|
||||
ENTRYBOOL(BOX64_X11THREADS, box64_x11threads) \
|
||||
ENTRYBOOL(BOX64_X11GLX, box64_x11glx) \
|
||||
ENTRYDSTRING(BOX64_LIBGL, box64_libGL) \
|
||||
|
@ -2621,7 +2621,7 @@ EXPORT void* my_mmap64(x64emu_t* emu, void *addr, unsigned long length, int prot
|
||||
addr = find47bitBlock(length);
|
||||
}
|
||||
#endif
|
||||
void* ret = mmap64(addr, length, prot, new_flags, fd, offset);
|
||||
void* ret = internal_mmap(addr, length, prot, new_flags, fd, offset);
|
||||
#ifndef NOALIGN
|
||||
if((ret!=MAP_FAILED) && (flags&MAP_32BIT) &&
|
||||
(((uintptr_t)ret>0xffffffffLL) || (box64_wine && ((uintptr_t)ret&0xffff) && (ret!=addr)))) {
|
||||
@ -2655,7 +2655,7 @@ EXPORT void* my_mmap64(x64emu_t* emu, void *addr, unsigned long length, int prot
|
||||
}
|
||||
#endif
|
||||
if((ret!=MAP_FAILED) && (flags&MAP_FIXED_NOREPLACE) && (ret!=addr)) {
|
||||
munmap(ret, length);
|
||||
internal_munmap(ret, length);
|
||||
errno = EEXIST;
|
||||
return MAP_FAILED;
|
||||
}
|
||||
@ -2738,7 +2738,7 @@ EXPORT int my_munmap(x64emu_t* emu, void* addr, unsigned long length)
|
||||
{
|
||||
(void)emu;
|
||||
if(emu && (box64_log>=LOG_DEBUG || box64_dynarec_log>=LOG_DEBUG)) {printf_log(LOG_NONE, "munmap(%p, %lu)\n", addr, length);}
|
||||
int ret = munmap(addr, length);
|
||||
int ret = internal_munmap(addr, length);
|
||||
#ifdef DYNAREC
|
||||
if(!ret && box64_dynarec && length) {
|
||||
cleanDBFromAddressRange((uintptr_t)addr, length, 1);
|
||||
|
Loading…
x
Reference in New Issue
Block a user