mirror of
https://github.com/ptitSeb/box64.git
synced 2024-11-23 06:30:22 +00:00
Add SW64 base support (#382)
* Add SW64 base support * Add a few syscalls
This commit is contained in:
parent
d90b6b7e51
commit
948205e6f9
1
.gitignore
vendored
1
.gitignore
vendored
@ -55,6 +55,7 @@ dkms.conf
|
||||
build/
|
||||
build*/
|
||||
.vscode
|
||||
.cache
|
||||
.gdb_history
|
||||
src/git_head.h
|
||||
backup/
|
||||
|
@ -21,6 +21,8 @@ option(USE_CCACHE "Set to ON to use ccache if present in the system" ${USE_CCACH
|
||||
option(HAVE_TRACE "Set to ON to have Trace ability (needs ZydisInfo library)" ${HAVE_TRACE})
|
||||
option(NOLOADADDR "Set to ON to avoid fixing the load address of Box64" OFF)
|
||||
option(NOGIT "Set to ON if not building from a git clone repo (like when building from a zip download from github)" ${NOGIT})
|
||||
option(SW64 "Set ON if targeting an SW64 based device" ${SW64})
|
||||
|
||||
if(LARCH64)
|
||||
set(LD80BITS OFF CACHE BOOL "")
|
||||
set(NOALIGN OFF CACHE BOOL "")
|
||||
@ -49,9 +51,16 @@ endif()
|
||||
if(M1 OR LARCH64)
|
||||
set(PAGE16K ON CACHE BOOL "")
|
||||
endif()
|
||||
if(SW64)
|
||||
set(LD80BITS OFF CACHE BOOL "")
|
||||
set(NOALIGN OFF CACHE BOOL "")
|
||||
set(PAGE8K ON CACHE BOOL "")
|
||||
endif()
|
||||
|
||||
option(LD80BITS "Set to ON if host device have 80bits long double (i.e. i386)" ${LD80BITS})
|
||||
option(NOALIGN "Set to ON if host device doesn't need re-align (i.e. i386)" ${NOALIGN})
|
||||
option(ARM_DYNAREC "Set to ON to use ARM Dynamic Recompilation" ${ARM_DYNAREC})
|
||||
option(PAGE8K "Set to ON if host device have PageSize of 8K (instead of 4K)" ${PAGE8K})
|
||||
option(PAGE16K "Set to ON if host device have PageSize of 16K (instead of 4K)" ${PAGE16K})
|
||||
option(PAGE64K "Set to ON if host device have PageSize of 64K (instead of 4K)" ${PAGE64K})
|
||||
option(STATICBUILD "Set to ON to have a static build (Warning, not working)" ${STATICBUILD})
|
||||
@ -130,10 +139,16 @@ elseif(LX2160A)
|
||||
add_definitions(-DLX2160A)
|
||||
add_definitions(-pipe -march=armv8-a+crypto+crc -mcpu=cortex-a72+crypto)
|
||||
set(CMAKE_ASM_FLAGS "-pipe -march=armv8-a+crypto+crc -mcpu=cortex-a72+crypto")
|
||||
elseif(SW64)
|
||||
add_definitions(-DSW64)
|
||||
elseif(ARM_DYNAREC)
|
||||
set(CMAKE_ASM_FLAGS "-pipe -march=armv8-a+crc+simd+crypto")
|
||||
endif()
|
||||
|
||||
if(PAGE8K)
|
||||
add_definitions(-DPAGE8K)
|
||||
endif()
|
||||
|
||||
if(PAGE16K)
|
||||
add_definitions(-DPAGE16K)
|
||||
endif()
|
||||
|
@ -51,6 +51,9 @@ static pthread_mutex_t mutex_prot;
|
||||
#elif defined(PAGE16K)
|
||||
#define MEMPROT_SHIFT 14
|
||||
#define MEMPROT_SHIFT2 (16+14)
|
||||
#elif defined(PAGE8K)
|
||||
#define MEMPROT_SHIFT 13
|
||||
#define MEMPROT_SHIFT2 (16+13)
|
||||
#else
|
||||
#define MEMPROT_SHIFT 12
|
||||
#define MEMPROT_SHIFT2 (16+12)
|
||||
|
@ -298,7 +298,7 @@ int LoadElfMemory(FILE* f, box64context_t* context, elfheader_t* head)
|
||||
void* p = (void*)-1;
|
||||
if(e->p_memsz==e->p_filesz && !(e->p_align&0xfff)) {
|
||||
printf_log(LOG_DEBUG, "MMap block #%zu @%p offset=%p (0x%zx/0x%zx, flags:0x%x)\n", i, dest, (void*)e->p_offset, e->p_filesz, e->p_memsz, e->p_flags);
|
||||
mmap(dest, e->p_filesz, PROT_READ | PROT_WRITE | PROT_EXEC, MAP_FIXED | MAP_PRIVATE, fileno(f), e->p_offset);
|
||||
p = mmap(dest, e->p_filesz, PROT_READ | PROT_WRITE | PROT_EXEC, MAP_FIXED | MAP_PRIVATE, fileno(f), e->p_offset);
|
||||
}
|
||||
if(p!=dest) {
|
||||
printf_log(LOG_DEBUG, "Loading block #%zu %p (0x%zx/0x%zx)\n",i, dest, e->p_filesz, e->p_memsz);
|
||||
|
@ -162,6 +162,7 @@ scwrap_t syscallwrap[] = {
|
||||
{ 213, __NR_epoll_create, 1},
|
||||
#endif
|
||||
{ 217, __NR_getdents64, 3},
|
||||
{ 218, __NR_set_tid_address, 1},
|
||||
{ 220, __NR_semtimedop, 4},
|
||||
{ 228, __NR_clock_gettime, 2},
|
||||
{ 229, __NR_clock_getres, 2},
|
||||
@ -180,6 +181,7 @@ scwrap_t syscallwrap[] = {
|
||||
{ 257, __NR_openat, 4},
|
||||
{ 270, __NR_pselect6, 6},
|
||||
{ 272, __NR_unshare, 1},
|
||||
{ 273, __NR_set_robust_list, 2},
|
||||
{ 274, __NR_get_robust_list, 3},
|
||||
{ 281, __NR_epoll_pwait, 6},
|
||||
#ifdef _NR_eventfd
|
||||
@ -191,6 +193,7 @@ scwrap_t syscallwrap[] = {
|
||||
{ 293, __NR_pipe2, 2},
|
||||
{ 294, __NR_inotify_init1, 1},
|
||||
{ 298, __NR_perf_event_open, 5},
|
||||
{ 302, __NR_prlimit64, 4},
|
||||
{ 309, __NR_getcpu, 3}, // need wrapping?
|
||||
{ 315, __NR_sched_getattr, 4},
|
||||
{ 317, __NR_seccomp, 3},
|
||||
@ -514,6 +517,13 @@ void EXPORT x64Syscall(x64emu_t *emu)
|
||||
R_EAX = (int)syscall(__NR_inotify_init1, 0);
|
||||
break;
|
||||
#endif
|
||||
case 262:
|
||||
R_EAX = (uint64_t)(int64_t)my_fstatat(emu, (int)R_RDI, (char*)R_RSI, (void*)R_RDX, (int)R_R10d);
|
||||
break;
|
||||
case 334: // It is helpeful to run static binary
|
||||
R_RAX = -1;
|
||||
errno = ENOSYS;
|
||||
break;
|
||||
#ifndef __NR_fchmodat4
|
||||
case 434:
|
||||
*(int64_t*)R_RAX = fchmodat((int)R_EDI, (void*)R_RSI, (mode_t)R_RDX, (int)R_R10d);
|
||||
@ -717,6 +727,8 @@ uintptr_t EXPORT my_syscall(x64emu_t *emu)
|
||||
case 253:
|
||||
return (int)syscall(__NR_inotify_init1, 0);
|
||||
#endif
|
||||
case 262:
|
||||
return (uint64_t)(int64_t)my_fstatat(emu, (int)R_RSI, (char*)R_RDX, (void*)R_RCX, (int)R_R8d);
|
||||
#ifndef __NR_fchmodat4
|
||||
case 434:
|
||||
return (int)fchmodat((int)R_ESI, (void*)R_RDX, (mode_t)R_RCX, (int)R_R8d);
|
||||
|
@ -92,6 +92,43 @@ typedef struct va_list {
|
||||
memcpy(&p[6], emu->xmm, 8*16); \
|
||||
}
|
||||
|
||||
#elif defined(__sw_64__) /* or Alpha */
|
||||
/*
|
||||
typdef struct {
|
||||
char* __base;
|
||||
int __offset;
|
||||
}va_list;
|
||||
*/
|
||||
|
||||
// the follow three macro is not fully compatiable with SW64/Alpha
|
||||
// so don't expect va function works well.
|
||||
#define CREATE_SYSV_VALIST(A) \
|
||||
va_list sysv_varargs; \
|
||||
sysv_varargs.__offset=0; \
|
||||
sysv_varargs.__base=(A)
|
||||
|
||||
#define CREATE_VALIST_FROM_VALIST(VA, SCRATCH) \
|
||||
va_list sysv_varargs; \
|
||||
{ \
|
||||
uintptr_t *p = (uintptr_t*)(SCRATCH); \
|
||||
int n = (X64_VA_MAX_REG - (VA)->gp_offset)/8; \
|
||||
if(n) memcpy(&p[0], (VA)->reg_save_area, n*8); \
|
||||
memcpy(&p[n], (VA)->overflow_arg_area, 100*8); \
|
||||
sysv_varargs.__offset = (VA)->gp_offset; \
|
||||
sysv_varargs.__base = (char*)p; \
|
||||
}
|
||||
|
||||
#define CREATE_VALIST_FROM_VAARG(STACK, SCRATCH, N) \
|
||||
va_list sysv_varargs; \
|
||||
{ \
|
||||
uintptr_t *p = (uintptr_t*)(SCRATCH); \
|
||||
p[0]=R_RDI; p[1]=R_RSI; p[2]=R_RDX; \
|
||||
p[3]=R_RCX; p[4]=R_R8; p[5]=R_R9; \
|
||||
memcpy(&p[8+N], STACK, 100*8 - (8+N)*8); \
|
||||
sysv_varargs.__offset = N*8; \
|
||||
sysv_varargs.__base = (char*)p; \
|
||||
}
|
||||
|
||||
#elif defined(__loongarch64) || defined(__powerpc64__) || defined(__riscv)
|
||||
#define CREATE_SYSV_VALIST(A) \
|
||||
va_list sysv_varargs = (va_list)A
|
||||
|
@ -776,6 +776,8 @@ void my_box64signalhandler(int32_t sig, siginfo_t* info, void * ucntx)
|
||||
void * pc = (void*)p->uc_mcontext.gp_regs[PT_NIP];
|
||||
#elif defined(LA464)
|
||||
void * pc = (void*)p->uc_mcontext.__pc;
|
||||
#elif defined(SW64)
|
||||
void * pc = (void*)p->uc_mcontext.sc_pc;
|
||||
#else
|
||||
void * pc = NULL; // unknow arch...
|
||||
#warning Unhandled architecture
|
||||
|
Loading…
Reference in New Issue
Block a user