mirror of
https://github.com/ptitSeb/box64.git
synced 2025-02-16 04:20:55 +00:00
Second pass
This commit is contained in:
parent
48d6135106
commit
51fa96c44b
@ -294,7 +294,7 @@ void customFree(void* p)
|
||||
#ifdef DYNAREC
|
||||
typedef struct mmaplist_s {
|
||||
void* block;
|
||||
int maxfree;
|
||||
size_t maxfree;
|
||||
size_t size;
|
||||
kh_dynablocks_t* dblist;
|
||||
uint8_t* helper;
|
||||
@ -304,9 +304,9 @@ uintptr_t FindFreeDynarecMap(dynablock_t* db, size_t size)
|
||||
{
|
||||
// look for free space
|
||||
void* sub = NULL;
|
||||
for(int i=0; i<mmapsize; ++i) {
|
||||
for(size_t i=0; i<mmapsize; ++i) {
|
||||
if(mmaplist[i].maxfree>=size) {
|
||||
int rsize = 0;
|
||||
size_t rsize = 0;
|
||||
sub = getFirstBlock(mmaplist[i].block, size, &rsize);
|
||||
if(sub) {
|
||||
uintptr_t ret = (uintptr_t)allocBlock(mmaplist[i].block, sub, size);
|
||||
@ -321,7 +321,7 @@ uintptr_t FindFreeDynarecMap(dynablock_t* db, size_t size)
|
||||
int r;
|
||||
k = kh_put(dynablocks, blocks, (uintptr_t)ret, &r);
|
||||
kh_value(blocks, k) = db;
|
||||
for(int j=0; j<size; ++j)
|
||||
for(size_t j=0; j<size; ++j)
|
||||
mmaplist[i].helper[(uintptr_t)ret-(uintptr_t)mmaplist[i].block+j] = (j<256)?j:255;
|
||||
return ret;
|
||||
}
|
||||
@ -332,13 +332,13 @@ uintptr_t FindFreeDynarecMap(dynablock_t* db, size_t size)
|
||||
|
||||
uintptr_t AddNewDynarecMap(dynablock_t* db, size_t size)
|
||||
{
|
||||
int i = mmapsize++;
|
||||
dynarec_log(LOG_DEBUG, "Ask for DynaRec Block Alloc #%d\n", mmapsize);
|
||||
size_t i = mmapsize++;
|
||||
dynarec_log(LOG_DEBUG, "Ask for DynaRec Block Alloc #%zu\n", mmapsize);
|
||||
mmaplist = (mmaplist_t*)realloc(mmaplist, mmapsize*sizeof(mmaplist_t));
|
||||
#ifndef USE_MMAP
|
||||
void *p = NULL;
|
||||
if(posix_memalign(&p, box64_pagesize, MMAPSIZE)) {
|
||||
dynarec_log(LOG_INFO, "Cannot create memory map of %d byte for dynarec block #%d\n", MMAPSIZE, i);
|
||||
dynarec_log(LOG_INFO, "Cannot create memory map of %d byte for dynarec block #%zu\n", MMAPSIZE, i);
|
||||
--mmapsize;
|
||||
return 0;
|
||||
}
|
||||
@ -346,7 +346,7 @@ uintptr_t AddNewDynarecMap(dynablock_t* db, size_t size)
|
||||
#else
|
||||
void* p = mmap(NULL, MMAPSIZE, PROT_READ|PROT_WRITE|PROT_EXEC, MAP_PRIVATE|MAP_ANONYMOUS, -1, 0);
|
||||
if(p==(void*)-1) {
|
||||
dynarec_log(LOG_INFO, "Cannot create memory map of %d byte for dynarec block #%d\n", MMAPSIZE, i);
|
||||
dynarec_log(LOG_INFO, "Cannot create memory map of %d byte for dynarec block #%zu\n", MMAPSIZE, i);
|
||||
--mmapsize;
|
||||
return 0;
|
||||
}
|
||||
@ -374,16 +374,17 @@ uintptr_t AddNewDynarecMap(dynablock_t* db, size_t size)
|
||||
int ret;
|
||||
k = kh_put(dynablocks, blocks, (uintptr_t)sub, &ret);
|
||||
kh_value(blocks, k) = db;
|
||||
for(int j=0; j<size; ++j)
|
||||
for(size_t j=0; j<size; ++j)
|
||||
mmaplist[i].helper[(uintptr_t)sub-(uintptr_t)mmaplist[i].block + j] = (j<256)?j:255;
|
||||
return sub;
|
||||
}
|
||||
|
||||
void ActuallyFreeDynarecMap(dynablock_t* db, uintptr_t addr, int size)
|
||||
void ActuallyFreeDynarecMap(dynablock_t* db, uintptr_t addr, size_t size)
|
||||
{
|
||||
(void)db;
|
||||
if(!addr || !size)
|
||||
return;
|
||||
for(int i=0; i<mmapsize; ++i) {
|
||||
for(size_t i=0; i<mmapsize; ++i) {
|
||||
if ((addr>(uintptr_t)mmaplist[i].block)
|
||||
&& (addr<((uintptr_t)mmaplist[i].block+mmaplist[i].size))) {
|
||||
void* sub = (void*)(addr-sizeof(blockmark_t));
|
||||
@ -394,20 +395,20 @@ void ActuallyFreeDynarecMap(dynablock_t* db, uintptr_t addr, int size)
|
||||
khint_t k = kh_get(dynablocks, blocks, (uintptr_t)sub);
|
||||
if(k!=kh_end(blocks))
|
||||
kh_del(dynablocks, blocks, k);
|
||||
for(int j=0; j<size; ++j)
|
||||
for(size_t j=0; j<size; ++j)
|
||||
mmaplist[i].helper[(uintptr_t)sub-(uintptr_t)mmaplist[i].block+j] = 0;
|
||||
}
|
||||
return;
|
||||
}
|
||||
}
|
||||
if(mmapsize)
|
||||
dynarec_log(LOG_NONE, "Warning, block %p (size %d) not found in mmaplist for Free\n", (void*)addr, size);
|
||||
dynarec_log(LOG_NONE, "Warning, block %p (size %zu) not found in mmaplist for Free\n", (void*)addr, size);
|
||||
}
|
||||
|
||||
dynablock_t* FindDynablockFromNativeAddress(void* addr)
|
||||
{
|
||||
// look in actual list
|
||||
for(int i=0; i<mmapsize; ++i) {
|
||||
for(size_t i=0; i<mmapsize; ++i) {
|
||||
if ((uintptr_t)addr>=(uintptr_t)mmaplist[i].block
|
||||
&& ((uintptr_t)addr<(uintptr_t)mmaplist[i].block+mmaplist[i].size)) {
|
||||
if(!mmaplist[i].helper)
|
||||
@ -426,7 +427,7 @@ dynablock_t* FindDynablockFromNativeAddress(void* addr)
|
||||
return FindDynablockDynablocklist(addr, dblist_oversized);
|
||||
}
|
||||
|
||||
uintptr_t AllocDynarecMap(dynablock_t* db, int size)
|
||||
uintptr_t AllocDynarecMap(dynablock_t* db, size_t size)
|
||||
{
|
||||
if(!size)
|
||||
return 0;
|
||||
@ -435,14 +436,14 @@ uintptr_t AllocDynarecMap(dynablock_t* db, int size)
|
||||
pthread_mutex_lock(&mutex_mmap);
|
||||
void *p = NULL;
|
||||
if(posix_memalign(&p, box64_pagesize, size)) {
|
||||
dynarec_log(LOG_INFO, "Cannot create dynamic map of %d bytes\n", size);
|
||||
dynarec_log(LOG_INFO, "Cannot create dynamic map of %zu bytes\n", size);
|
||||
return 0;
|
||||
}
|
||||
mprotect(p, size, PROT_READ | PROT_WRITE | PROT_EXEC);
|
||||
#else
|
||||
void* p = mmap(NULL, size, PROT_READ|PROT_WRITE|PROT_EXEC, MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
|
||||
if(p==(void*)-1) {
|
||||
dynarec_log(LOG_INFO, "Cannot create dynamic map of %d bytes\n", size);
|
||||
dynarec_log(LOG_INFO, "Cannot create dynamic map of %zu bytes\n", size);
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
@ -471,7 +472,7 @@ uintptr_t AllocDynarecMap(dynablock_t* db, int size)
|
||||
return ret;
|
||||
}
|
||||
|
||||
void FreeDynarecMap(dynablock_t* db, uintptr_t addr, uint32_t size)
|
||||
void FreeDynarecMap(dynablock_t* db, uintptr_t addr, size_t size)
|
||||
{
|
||||
if(!addr || !size)
|
||||
return;
|
||||
@ -512,7 +513,7 @@ dynablocklist_t* getDB(uintptr_t idx)
|
||||
|
||||
// each dynmap is 64k of size
|
||||
|
||||
void addDBFromAddressRange(uintptr_t addr, uintptr_t size)
|
||||
void addDBFromAddressRange(uintptr_t addr, size_t size)
|
||||
{
|
||||
dynarec_log(LOG_DEBUG, "addDBFromAddressRange %p -> %p\n", (void*)addr, (void*)(addr+size-1));
|
||||
uintptr_t idx = (addr>>DYNAMAP_SHIFT);
|
||||
@ -539,7 +540,7 @@ void addDBFromAddressRange(uintptr_t addr, uintptr_t size)
|
||||
}
|
||||
}
|
||||
|
||||
void cleanDBFromAddressRange(uintptr_t addr, uintptr_t size, int destroy)
|
||||
void cleanDBFromAddressRange(uintptr_t addr, size_t size, int destroy)
|
||||
{
|
||||
dynarec_log(LOG_DEBUG, "cleanDBFromAddressRange %p -> %p %s\n", (void*)addr, (void*)(addr+size-1), destroy?"destroy":"mark");
|
||||
uintptr_t idx = (addr>>DYNAMAP_SHIFT);
|
||||
@ -668,7 +669,7 @@ uintptr_t getJumpTableAddress64(uintptr_t addr)
|
||||
|
||||
// Remove the Write flag from an adress range, so DB can be executed
|
||||
// no log, as it can be executed inside a signal handler
|
||||
void protectDB(uintptr_t addr, uintptr_t size)
|
||||
void protectDB(uintptr_t addr, size_t size)
|
||||
{
|
||||
dynarec_log(LOG_DEBUG, "protectDB %p -> %p\n", (void*)addr, (void*)(addr+size-1));
|
||||
uintptr_t idx = (addr>>MEMPROT_SHIFT);
|
||||
@ -697,7 +698,7 @@ void protectDBnolock(uintptr_t addr, uintptr_t size)
|
||||
{
|
||||
dynarec_log(LOG_DEBUG, "protectDB %p -> %p\n", (void*)addr, (void*)(addr+size-1));
|
||||
uintptr_t idx = (addr>>MEMPROT_SHIFT);
|
||||
uintptr_t end = ((addr+size-1)>>MEMPROT_SHIFT);
|
||||
uintptr_t end = ((addr+size-1LL)>>MEMPROT_SHIFT);
|
||||
int ret;
|
||||
for (uintptr_t i=idx; i<=end; ++i) {
|
||||
const uint32_t key = (i>>MEMPROT_SHIFT2)&0xffffffff;
|
||||
@ -728,7 +729,7 @@ void unlockDB()
|
||||
|
||||
// Add the Write flag from an adress range, and mark all block as dirty
|
||||
// no log, as it can be executed inside a signal handler
|
||||
void unprotectDB(uintptr_t addr, uintptr_t size)
|
||||
void unprotectDB(uintptr_t addr, size_t size)
|
||||
{
|
||||
dynarec_log(LOG_DEBUG, "unprotectDB %p -> %p\n", (void*)addr, (void*)(addr+size-1));
|
||||
uintptr_t idx = (addr>>MEMPROT_SHIFT);
|
||||
@ -755,7 +756,7 @@ void unprotectDB(uintptr_t addr, uintptr_t size)
|
||||
|
||||
#endif
|
||||
|
||||
void updateProtection(uintptr_t addr, uintptr_t size, uint32_t prot)
|
||||
void updateProtection(uintptr_t addr, size_t size, uint32_t prot)
|
||||
{
|
||||
dynarec_log(LOG_DEBUG, "updateProtection %p:%p 0x%x\n", (void*)addr, (void*)(addr+size-1), prot);
|
||||
uintptr_t idx = (addr>>MEMPROT_SHIFT);
|
||||
@ -778,7 +779,7 @@ void updateProtection(uintptr_t addr, uintptr_t size, uint32_t prot)
|
||||
pthread_mutex_unlock(&mutex_prot);
|
||||
}
|
||||
|
||||
void setProtection(uintptr_t addr, uintptr_t size, uint32_t prot)
|
||||
void setProtection(uintptr_t addr, size_t size, uint32_t prot)
|
||||
{
|
||||
dynarec_log(LOG_DEBUG, "setProtection %p:%p 0x%x\n", (void*)addr, (void*)(addr+size-1), prot);
|
||||
uintptr_t idx = (addr>>MEMPROT_SHIFT);
|
||||
@ -798,7 +799,7 @@ void setProtection(uintptr_t addr, uintptr_t size, uint32_t prot)
|
||||
pthread_mutex_unlock(&mutex_prot);
|
||||
}
|
||||
|
||||
void freeProtection(uintptr_t addr, uintptr_t size)
|
||||
void freeProtection(uintptr_t addr, size_t size)
|
||||
{
|
||||
dynarec_log(LOG_DEBUG, "freeProtection %p:%p\n", (void*)addr, (void*)(addr+size-1));
|
||||
uintptr_t idx = (addr>>MEMPROT_SHIFT);
|
||||
@ -861,7 +862,7 @@ void* find32bitBlock(size_t size)
|
||||
return p;
|
||||
p += 0x10000;
|
||||
} while(p!=(void*)0xffff0000);
|
||||
printf_log(LOG_NONE, "Warning: cannot find a 0x%lx block in 32bits address space\n", size);
|
||||
printf_log(LOG_NONE, "Warning: cannot find a 0x%zx block in 32bits address space\n", size);
|
||||
return NULL;
|
||||
}
|
||||
void* findBlockNearHint(void* hint, size_t size)
|
||||
@ -887,7 +888,7 @@ void* findBlockNearHint(void* hint, size_t size)
|
||||
return p;
|
||||
p += step;
|
||||
} while(p!=end);
|
||||
printf_log(LOG_NONE, "Warning: cannot find a 0x%lx block in 32bits address space\n", size);
|
||||
printf_log(LOG_NONE, "Warning: cannot find a 0x%zx block in 32bits address space\n", size);
|
||||
return NULL;
|
||||
}
|
||||
#undef LOWEST
|
||||
@ -978,7 +979,7 @@ void fini_custommem_helper(box64context_t *ctx)
|
||||
#ifdef DYNAREC
|
||||
if(box64_dynarec) {
|
||||
dynarec_log(LOG_DEBUG, "Free global Dynarecblocks\n");
|
||||
for (int i=0; i<mmapsize; ++i) {
|
||||
for (size_t i=0; i<mmapsize; ++i) {
|
||||
if(mmaplist[i].block)
|
||||
#ifdef USE_MMAP
|
||||
munmap(mmaplist[i].block, mmaplist[i].size);
|
||||
|
@ -110,8 +110,9 @@
|
||||
#define MOVKw(Rd, imm16) EMIT(MOVK_gen(0, 0, ((uint16_t)imm16)&0xffff, Rd))
|
||||
#define MOVKw_LSL(Rd, imm16, shift) EMIT(MOVK_gen(0, (shift)/16, ((uint16_t)imm16)&0xffff, Rd))
|
||||
|
||||
// This macro will give a -Wsign-compare warning, probably bug #38341
|
||||
#define MOV32w(Rd, imm32) \
|
||||
if(~((uint32_t)(imm32))<0xffff) { \
|
||||
if(~((uint32_t)(imm32))<0xffffu) { \
|
||||
MOVNw(Rd, (~(uint32_t)(imm32))&0xffff); \
|
||||
} else { \
|
||||
MOVZw(Rd, (imm32)&0xffff); \
|
||||
|
@ -214,7 +214,7 @@ void MarkRangeDynablock(dynablocklist_t* dynablocks, uintptr_t addr, uintptr_t s
|
||||
uintptr_t new_size = size + dynablocks->maxsz;
|
||||
MarkDirectDynablock(dynablocks, new_addr, new_size);
|
||||
// the blocks check before
|
||||
for(int idx=(new_addr)>>DYNAMAP_SHIFT; idx<(addr>>DYNAMAP_SHIFT); ++idx)
|
||||
for(unsigned idx=(new_addr)>>DYNAMAP_SHIFT; idx<(addr>>DYNAMAP_SHIFT); ++idx)
|
||||
MarkDirectDynablock(getDB(idx), new_addr, new_size);
|
||||
}
|
||||
}
|
||||
@ -347,7 +347,7 @@ static dynablock_t* internalDBGetBlock(x64emu_t* emu, uintptr_t addr, uintptr_t
|
||||
int blocksz = block->x64_size;
|
||||
if(dynablocks->maxsz<blocksz) {
|
||||
dynablocks->maxsz = blocksz;
|
||||
for(int idx=(addr>>DYNAMAP_SHIFT)+1; idx<=((addr+blocksz)>>DYNAMAP_SHIFT); ++idx) {
|
||||
for(unsigned idx=(addr>>DYNAMAP_SHIFT)+1; idx<=((addr+blocksz)>>DYNAMAP_SHIFT); ++idx) {
|
||||
dynablocklist_t* dblist;
|
||||
if((dblist = getDB(idx)))
|
||||
if(dblist->maxsz<blocksz)
|
||||
|
@ -383,10 +383,10 @@ void* FillBlock64(dynablock_t* block, uintptr_t addr) {
|
||||
// pass 2, instruction size
|
||||
arm_pass2(&helper, addr);
|
||||
// ok, now allocate mapped memory, with executable flag on
|
||||
int sz = helper.arm_size + helper.table64size*sizeof(uint64_t);
|
||||
size_t sz = helper.arm_size + helper.table64size*sizeof(uint64_t);
|
||||
void* p = (void*)AllocDynarecMap(block, sz);
|
||||
if(p==NULL) {
|
||||
dynarec_log(LOG_DEBUG, "AllocDynarecMap(%p, %d) failed, cancelling block\n", block, sz);
|
||||
dynarec_log(LOG_DEBUG, "AllocDynarecMap(%p, %zu) failed, cancelling block\n", block, sz);
|
||||
free(helper.insts);
|
||||
free(helper.next);
|
||||
free(helper.table64);
|
||||
@ -401,17 +401,17 @@ void* FillBlock64(dynablock_t* block, uintptr_t addr) {
|
||||
}
|
||||
// pass 3, emit (log emit arm opcode)
|
||||
if(box64_dynarec_dump) {
|
||||
dynarec_log(LOG_NONE, "%s%04d|Emitting %d bytes for %d x64 bytes", (box64_dynarec_dump>1)?"\e[01;36m":"", GetTID(), helper.arm_size, helper.isize);
|
||||
dynarec_log(LOG_NONE, "%s%04d|Emitting %zu bytes for %u x64 bytes", (box64_dynarec_dump>1)?"\e[01;36m":"", GetTID(), helper.arm_size, helper.isize);
|
||||
printFunctionAddr(helper.start, " => ");
|
||||
dynarec_log(LOG_NONE, "%s\n", (box64_dynarec_dump>1)?"\e[m":"");
|
||||
}
|
||||
int oldtable64size = helper.table64size;
|
||||
int oldarmsize = helper.arm_size;
|
||||
size_t oldarmsize = helper.arm_size;
|
||||
helper.arm_size = 0;
|
||||
helper.table64size = 0; // reset table64 (but not the cap)
|
||||
arm_pass3(&helper, addr);
|
||||
if((oldarmsize!=helper.arm_size) || (oldtable64size<helper.table64size)) {
|
||||
printf_log(LOG_NONE, "BOX64: Warning, size difference in block between pass2 (%d) & pass3 (%d)!\n", sz, helper.arm_size+helper.table64size*8);
|
||||
printf_log(LOG_NONE, "BOX64: Warning, size difference in block between pass2 (%zu) & pass3 (%zu)!\n", sz, helper.arm_size+helper.table64size*8);
|
||||
uint8_t *dump = (uint8_t*)helper.start;
|
||||
printf_log(LOG_NONE, "Dump of %d x64 opcodes:\n", helper.size);
|
||||
for(int i=0; i<helper.size; ++i) {
|
||||
@ -451,8 +451,6 @@ void* FillBlock64(dynablock_t* block, uintptr_t addr) {
|
||||
block->need_test = 0;
|
||||
//block->x64_addr = (void*)start;
|
||||
block->x64_size = end-start+1;
|
||||
if(box64_dynarec_largest<block->x64_size)
|
||||
box64_dynarec_largest = block->x64_size;
|
||||
block->hash = X31_hash_code(block->x64_addr, block->x64_size);
|
||||
// Check if something changed, to abbort if it as
|
||||
if(block->hash != hash) {
|
||||
@ -493,4 +491,4 @@ void* FillBlock64(dynablock_t* block, uintptr_t addr) {
|
||||
free(helper.sons_arm);
|
||||
block->done = 1;
|
||||
return (void*)block;
|
||||
}
|
||||
}
|
||||
|
@ -29,21 +29,19 @@ uintptr_t dynarec64_00(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin
|
||||
uint8_t nextop, opcode;
|
||||
uint8_t gd, ed;
|
||||
int8_t i8;
|
||||
int32_t i32, j32, tmp;
|
||||
int64_t i64;
|
||||
int32_t i32, tmp;
|
||||
int64_t i64, j64;
|
||||
uint8_t u8;
|
||||
uint8_t gb1, gb2, eb1, eb2;
|
||||
uint32_t u32;
|
||||
uint64_t u64;
|
||||
uint8_t wback, wb1, wb2;
|
||||
int fixedaddress;
|
||||
int64_t fixedaddress;
|
||||
|
||||
opcode = F8;
|
||||
MAYUSE(eb1);
|
||||
MAYUSE(eb2);
|
||||
MAYUSE(wb2);
|
||||
MAYUSE(tmp);
|
||||
MAYUSE(j32);
|
||||
MAYUSE(j64);
|
||||
|
||||
switch(opcode) {
|
||||
case 0x00:
|
||||
@ -2212,4 +2210,3 @@ uintptr_t dynarec64_00(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin
|
||||
|
||||
return addr;
|
||||
}
|
||||
|
||||
|
@ -64,33 +64,32 @@
|
||||
|
||||
uintptr_t dynarec64_0F(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int ninst, rex_t rex, int rep, int* ok, int* need_epilog)
|
||||
{
|
||||
(void)ip; (void)rep; (void)need_epilog;
|
||||
|
||||
uint8_t opcode = F8;
|
||||
uint8_t nextop, u8;
|
||||
int32_t i32, i32_, j32;
|
||||
uint8_t gd, ed;
|
||||
uint8_t wback, wb2;
|
||||
uint8_t eb1, eb2;
|
||||
uint64_t tmp64u;
|
||||
int32_t i32, i32_;
|
||||
int v0, v1;
|
||||
int q0, q1;
|
||||
int d0, d1;
|
||||
int s0;
|
||||
int fixedaddress;
|
||||
MAYUSE(s0);
|
||||
uint64_t tmp64u;
|
||||
int64_t j64;
|
||||
int64_t fixedaddress;
|
||||
MAYUSE(wb2);
|
||||
MAYUSE(eb1);
|
||||
MAYUSE(eb2);
|
||||
MAYUSE(q0);
|
||||
MAYUSE(q1);
|
||||
MAYUSE(v0);
|
||||
MAYUSE(v1);
|
||||
MAYUSE(d0);
|
||||
MAYUSE(d1);
|
||||
MAYUSE(eb2);
|
||||
MAYUSE(eb1);
|
||||
MAYUSE(wb2);
|
||||
MAYUSE(j32);
|
||||
MAYUSE(i32);
|
||||
MAYUSE(u8);
|
||||
#if STEP == 3
|
||||
//static const int8_t mask_shift8[] = { -7, -6, -5, -4, -3, -2, -1, 0 };
|
||||
MAYUSE(s0);
|
||||
MAYUSE(j64);
|
||||
#if 0//STEP == 3
|
||||
static const int8_t mask_shift8[] = { -7, -6, -5, -4, -3, -2, -1, 0 };
|
||||
#endif
|
||||
|
||||
switch(opcode) {
|
||||
@ -1564,4 +1563,3 @@ uintptr_t dynarec64_0F(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin
|
||||
}
|
||||
return addr;
|
||||
}
|
||||
|
||||
|
@ -25,17 +25,18 @@
|
||||
|
||||
uintptr_t dynarec64_64(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int ninst, rex_t rex, int rep, int* ok, int* need_epilog)
|
||||
{
|
||||
(void)ip; (void)rep; (void)need_epilog;
|
||||
|
||||
uint8_t opcode = F8;
|
||||
uint8_t nextop;
|
||||
uint8_t u8;
|
||||
int32_t j32;
|
||||
uint8_t gd, ed, eb1, eb2;
|
||||
uint8_t wback;
|
||||
int64_t i64;
|
||||
int fixedaddress;
|
||||
MAYUSE(j32);
|
||||
int64_t i64, j64;
|
||||
int64_t fixedaddress;
|
||||
MAYUSE(eb1);
|
||||
MAYUSE(eb2);
|
||||
MAYUSE(j64);
|
||||
|
||||
while((opcode==0xF2) || (opcode==0xF3)) {
|
||||
rep = opcode-0xF1;
|
||||
@ -219,4 +220,3 @@ uintptr_t dynarec64_64(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin
|
||||
}
|
||||
return addr;
|
||||
}
|
||||
|
||||
|
@ -27,15 +27,16 @@ uintptr_t dynarec64_66(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin
|
||||
{
|
||||
uint8_t opcode = F8;
|
||||
uint8_t nextop, u8;
|
||||
int32_t i32, j32;
|
||||
int16_t i16;
|
||||
uint16_t u16;
|
||||
int32_t i32;
|
||||
int64_t j64;
|
||||
uint8_t gd, ed;
|
||||
uint8_t wback, wb1;
|
||||
int fixedaddress;
|
||||
MAYUSE(u16);
|
||||
int64_t fixedaddress;
|
||||
MAYUSE(u8);
|
||||
MAYUSE(j32);
|
||||
MAYUSE(u16);
|
||||
MAYUSE(j64);
|
||||
|
||||
while((opcode==0x2E) || (opcode==0x66)) // ignoring CS: or multiple 0x66
|
||||
opcode = F8;
|
||||
@ -814,4 +815,3 @@ uintptr_t dynarec64_66(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin
|
||||
}
|
||||
return addr;
|
||||
}
|
||||
|
||||
|
@ -44,17 +44,20 @@
|
||||
|
||||
uintptr_t dynarec64_660F(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int ninst, rex_t rex, int rep, int* ok, int* need_epilog)
|
||||
{
|
||||
(void)ip; (void)rep; (void)need_epilog;
|
||||
|
||||
uint8_t opcode = F8;
|
||||
uint8_t nextop, u8;
|
||||
int32_t i32, j32;
|
||||
int32_t i32;
|
||||
uint8_t gd, ed;
|
||||
uint8_t wback, wb1;
|
||||
uint8_t eb1, eb2;
|
||||
int64_t j64;
|
||||
uint64_t tmp64u;
|
||||
int v0, v1;
|
||||
int q0, q1;
|
||||
int d0, d1;
|
||||
int fixedaddress;
|
||||
int64_t fixedaddress;
|
||||
|
||||
MAYUSE(d0);
|
||||
MAYUSE(d1);
|
||||
@ -62,7 +65,7 @@ uintptr_t dynarec64_660F(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int n
|
||||
MAYUSE(q1);
|
||||
MAYUSE(eb1);
|
||||
MAYUSE(eb2);
|
||||
MAYUSE(j32);
|
||||
MAYUSE(j64);
|
||||
#if 0//STEP > 1
|
||||
static const int8_t mask_shift8[] = { -7, -6, -5, -4, -3, -2, -1, 0 };
|
||||
#endif
|
||||
@ -1401,4 +1404,3 @@ uintptr_t dynarec64_660F(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int n
|
||||
}
|
||||
return addr;
|
||||
}
|
||||
|
||||
|
@ -25,10 +25,12 @@
|
||||
|
||||
uintptr_t dynarec64_6664(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int ninst, rex_t rex, int rep, int* ok, int* need_epilog)
|
||||
{
|
||||
(void)ip; (void)rep; (void)need_epilog;
|
||||
|
||||
uint8_t opcode = F8;
|
||||
uint8_t nextop;
|
||||
uint8_t gd, ed;
|
||||
int fixedaddress;
|
||||
int64_t fixedaddress;
|
||||
|
||||
// REX prefix before the 66 are ignored
|
||||
rex.rex = 0;
|
||||
@ -76,4 +78,3 @@ uintptr_t dynarec64_6664(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int n
|
||||
}
|
||||
return addr;
|
||||
}
|
||||
|
||||
|
@ -25,14 +25,17 @@
|
||||
|
||||
uintptr_t dynarec64_67(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int ninst, rex_t rex, int rep, int* ok, int* need_epilog)
|
||||
{
|
||||
(void)ip; (void)rep; (void)need_epilog;
|
||||
|
||||
uint8_t opcode = F8;
|
||||
uint8_t nextop;
|
||||
uint8_t gd, ed;
|
||||
int fixedaddress;
|
||||
int64_t fixedaddress;
|
||||
int8_t i8;
|
||||
int32_t i32, j32;
|
||||
MAYUSE(j32);
|
||||
int32_t i32;
|
||||
int64_t j64;
|
||||
MAYUSE(i32);
|
||||
MAYUSE(j64);
|
||||
|
||||
// REX prefix before the 67 are ignored
|
||||
rex.rex = 0;
|
||||
@ -117,4 +120,3 @@ uintptr_t dynarec64_67(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin
|
||||
}
|
||||
return addr;
|
||||
}
|
||||
|
||||
|
@ -26,9 +26,11 @@
|
||||
|
||||
uintptr_t dynarec64_D8(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int ninst, rex_t rex, int rep, int* ok, int* need_epilog)
|
||||
{
|
||||
(void)ip; (void)rep; (void)need_epilog;
|
||||
|
||||
uint8_t nextop = F8;
|
||||
uint8_t ed;
|
||||
int fixedaddress;
|
||||
int64_t fixedaddress;
|
||||
int v1, v2;
|
||||
int s0;
|
||||
|
||||
@ -228,4 +230,3 @@ uintptr_t dynarec64_D8(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin
|
||||
}
|
||||
return addr;
|
||||
}
|
||||
|
||||
|
@ -26,10 +26,12 @@
|
||||
|
||||
uintptr_t dynarec64_D9(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int ninst, rex_t rex, int rep, int* ok, int* need_epilog)
|
||||
{
|
||||
(void)ip; (void)rep; (void)need_epilog;
|
||||
|
||||
uint8_t nextop = F8;
|
||||
uint8_t ed;
|
||||
uint8_t wback, wb1;
|
||||
int fixedaddress;
|
||||
int64_t fixedaddress;
|
||||
int v1, v2;
|
||||
int s0;
|
||||
int i1, i2, i3;
|
||||
@ -336,4 +338,3 @@ uintptr_t dynarec64_D9(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin
|
||||
}
|
||||
return addr;
|
||||
}
|
||||
|
||||
|
@ -26,19 +26,21 @@
|
||||
|
||||
uintptr_t dynarec64_DB(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int ninst, rex_t rex, int rep, int* ok, int* need_epilog)
|
||||
{
|
||||
(void)ip; (void)rep; (void)need_epilog;
|
||||
|
||||
uint8_t nextop = F8;
|
||||
uint8_t ed;
|
||||
uint8_t wback;
|
||||
uint8_t u8;
|
||||
int fixedaddress;
|
||||
int64_t fixedaddress;
|
||||
int v1, v2;
|
||||
int s0;
|
||||
int j32;
|
||||
int64_t j64;
|
||||
|
||||
MAYUSE(s0);
|
||||
MAYUSE(v2);
|
||||
MAYUSE(v1);
|
||||
MAYUSE(j32);
|
||||
MAYUSE(j64);
|
||||
|
||||
switch(nextop) {
|
||||
case 0xC0:
|
||||
@ -286,4 +288,3 @@ uintptr_t dynarec64_DB(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin
|
||||
}
|
||||
return addr;
|
||||
}
|
||||
|
||||
|
@ -26,9 +26,11 @@
|
||||
|
||||
uintptr_t dynarec64_DD(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int ninst, rex_t rex, int rep, int* ok, int* need_epilog)
|
||||
{
|
||||
(void)ip; (void)rep; (void)need_epilog;
|
||||
|
||||
uint8_t nextop = F8;
|
||||
uint8_t ed;
|
||||
int fixedaddress;
|
||||
int64_t fixedaddress;
|
||||
int v1, v2;
|
||||
int s0;
|
||||
|
||||
@ -195,4 +197,3 @@ uintptr_t dynarec64_DD(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin
|
||||
}
|
||||
return addr;
|
||||
}
|
||||
|
||||
|
@ -26,17 +26,19 @@
|
||||
|
||||
uintptr_t dynarec64_DF(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int ninst, rex_t rex, int rep, int* ok, int* need_epilog)
|
||||
{
|
||||
(void)ip; (void)rep; (void)need_epilog;
|
||||
|
||||
uint8_t nextop = F8;
|
||||
uint8_t ed, wback, u8;
|
||||
int fixedaddress;
|
||||
int v1, v2;
|
||||
int j32;
|
||||
int s0;
|
||||
int64_t j64;
|
||||
int64_t fixedaddress;
|
||||
|
||||
MAYUSE(s0);
|
||||
MAYUSE(v2);
|
||||
MAYUSE(v1);
|
||||
MAYUSE(j32);
|
||||
MAYUSE(j64);
|
||||
|
||||
switch(nextop) {
|
||||
case 0xC0:
|
||||
@ -286,4 +288,3 @@ uintptr_t dynarec64_DF(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin
|
||||
}
|
||||
return addr;
|
||||
}
|
||||
|
||||
|
@ -23,9 +23,10 @@
|
||||
#include "dynarec_arm64_functions.h"
|
||||
#include "dynarec_arm64_helper.h"
|
||||
|
||||
// emit OR32 instruction, from s1 , s2, store result in s1 using s3 and s4 as scratch
|
||||
// emit OR32 instruction, from s1, s2, store result in s1 using s3 and s4 as scratch
|
||||
void emit_or32(dynarec_arm_t* dyn, int ninst, rex_t rex, int s1, int s2, int s3, int s4)
|
||||
{
|
||||
MAYUSE(s2);
|
||||
IFX(X_PEND) {
|
||||
STRxw_U12(s1, xEmu, offsetof(x64emu_t, op1));
|
||||
STRxw_U12(s2, xEmu, offsetof(x64emu_t, op2));
|
||||
@ -89,9 +90,10 @@ void emit_or32c(dynarec_arm_t* dyn, int ninst, rex_t rex, int s1, int64_t c, int
|
||||
}
|
||||
}
|
||||
|
||||
// emit XOR32 instruction, from s1 , s2, store result in s1 using s3 and s4 as scratch
|
||||
// emit XOR32 instruction, from s1, s2, store result in s1 using s3 and s4 as scratch
|
||||
void emit_xor32(dynarec_arm_t* dyn, int ninst, rex_t rex, int s1, int s2, int s3, int s4)
|
||||
{
|
||||
MAYUSE(s2);
|
||||
IFX(X_PEND) {
|
||||
STRxw_U12(s1, xEmu, offsetof(x64emu_t, op1));
|
||||
STRxw_U12(s2, xEmu, offsetof(x64emu_t, op2));
|
||||
@ -155,9 +157,10 @@ void emit_xor32c(dynarec_arm_t* dyn, int ninst, rex_t rex, int s1, int64_t c, in
|
||||
}
|
||||
}
|
||||
|
||||
// emit AND32 instruction, from s1 , s2, store result in s1 using s3 and s4 as scratch
|
||||
// emit AND32 instruction, from s1, s2, store result in s1 using s3 and s4 as scratch
|
||||
void emit_and32(dynarec_arm_t* dyn, int ninst, rex_t rex, int s1, int s2, int s3, int s4)
|
||||
{
|
||||
MAYUSE(s2);
|
||||
IFX(X_PEND) {
|
||||
STRxw_U12(s1, xEmu, offsetof(x64emu_t, op1));
|
||||
STRxw_U12(s2, xEmu, offsetof(x64emu_t, op2));
|
||||
@ -227,9 +230,10 @@ void emit_and32c(dynarec_arm_t* dyn, int ninst, rex_t rex, int s1, int64_t c, in
|
||||
}
|
||||
}
|
||||
|
||||
// emit OR8 instruction, from s1 , s2, store result in s1 using s3 and s4 as scratch, s4 can be same as s2 (and so s2 destroyed)
|
||||
// emit OR8 instruction, from s1, s2, store result in s1 using s3 and s4 as scratch, s4 can be same as s2 (and so s2 destroyed)
|
||||
void emit_or8(dynarec_arm_t* dyn, int ninst, int s1, int s2, int s3, int s4)
|
||||
{
|
||||
MAYUSE(s2);
|
||||
IFX(X_PEND) {
|
||||
STRB_U12(s1, xEmu, offsetof(x64emu_t, op1));
|
||||
STRB_U12(s2, xEmu, offsetof(x64emu_t, op2));
|
||||
@ -292,9 +296,10 @@ void emit_or8c(dynarec_arm_t* dyn, int ninst, int s1, int32_t c, int s3, int s4)
|
||||
}
|
||||
}
|
||||
|
||||
// emit XOR8 instruction, from s1 , s2, store result in s1 using s3 and s4 as scratch, s4 can be same as s2 (and so s2 destroyed)
|
||||
// emit XOR8 instruction, from s1, s2, store result in s1 using s3 and s4 as scratch, s4 can be same as s2 (and so s2 destroyed)
|
||||
void emit_xor8(dynarec_arm_t* dyn, int ninst, int s1, int s2, int s3, int s4)
|
||||
{
|
||||
MAYUSE(s2);
|
||||
IFX(X_PEND) {
|
||||
STRB_U12(s1, xEmu, offsetof(x64emu_t, op1));
|
||||
STRB_U12(s2, xEmu, offsetof(x64emu_t, op2));
|
||||
@ -357,9 +362,10 @@ void emit_xor8c(dynarec_arm_t* dyn, int ninst, int s1, int32_t c, int s3, int s4
|
||||
}
|
||||
}
|
||||
|
||||
// emit AND8 instruction, from s1 , s2, store result in s1 using s3 and s4 as scratch, s4 can be same as s2 (and so s2 destroyed)
|
||||
// emit AND8 instruction, from s1, s2, store result in s1 using s3 and s4 as scratch, s4 can be same as s2 (and so s2 destroyed)
|
||||
void emit_and8(dynarec_arm_t* dyn, int ninst, int s1, int s2, int s3, int s4)
|
||||
{
|
||||
MAYUSE(s2);
|
||||
IFX(X_PEND) {
|
||||
STRB_U12(s1, xEmu, offsetof(x64emu_t, op1));
|
||||
STRB_U12(s2, xEmu, offsetof(x64emu_t, op2));
|
||||
@ -429,9 +435,10 @@ void emit_and8c(dynarec_arm_t* dyn, int ninst, int s1, int32_t c, int s3, int s4
|
||||
}
|
||||
|
||||
|
||||
// emit OR16 instruction, from s1 , s2, store result in s1 using s3 and s4 as scratch, s4 can be same as s2 (and so s2 destroyed)
|
||||
// emit OR16 instruction, from s1, s2, store result in s1 using s3 and s4 as scratch, s4 can be same as s2 (and so s2 destroyed)
|
||||
void emit_or16(dynarec_arm_t* dyn, int ninst, int s1, int s2, int s3, int s4)
|
||||
{
|
||||
MAYUSE(s2);
|
||||
IFX(X_PEND) {
|
||||
STRH_U12(s1, xEmu, offsetof(x64emu_t, op1));
|
||||
STRH_U12(s2, xEmu, offsetof(x64emu_t, op2));
|
||||
@ -507,9 +514,10 @@ void emit_or16(dynarec_arm_t* dyn, int ninst, int s1, int s2, int s3, int s4)
|
||||
// }
|
||||
//}
|
||||
|
||||
// emit XOR16 instruction, from s1 , s2, store result in s1 using s3 and s4 as scratch, s4 can be same as s2 (and so s2 destroyed)
|
||||
// emit XOR16 instruction, from s1, s2, store result in s1 using s3 and s4 as scratch, s4 can be same as s2 (and so s2 destroyed)
|
||||
void emit_xor16(dynarec_arm_t* dyn, int ninst, int s1, int s2, int s3, int s4)
|
||||
{
|
||||
MAYUSE(s2);
|
||||
IFX(X_PEND) {
|
||||
STRH_U12(s1, xEmu, offsetof(x64emu_t, op1));
|
||||
STRH_U12(s2, xEmu, offsetof(x64emu_t, op2));
|
||||
@ -585,9 +593,10 @@ void emit_xor16(dynarec_arm_t* dyn, int ninst, int s1, int s2, int s3, int s4)
|
||||
// }
|
||||
//}
|
||||
|
||||
// emit AND16 instruction, from s1 , s2, store result in s1 using s3 and s4 as scratch, s4 can be same as s2 (and so s2 destroyed)
|
||||
// emit AND16 instruction, from s1, s2, store result in s1 using s3 and s4 as scratch, s4 can be same as s2 (and so s2 destroyed)
|
||||
void emit_and16(dynarec_arm_t* dyn, int ninst, int s1, int s2, int s3, int s4)
|
||||
{
|
||||
MAYUSE(s2);
|
||||
IFX(X_PEND) {
|
||||
STRH_U12(s1, xEmu, offsetof(x64emu_t, op1));
|
||||
STRH_U12(s2, xEmu, offsetof(x64emu_t, op2));
|
||||
@ -667,4 +676,4 @@ void emit_and16(dynarec_arm_t* dyn, int ninst, int s1, int s2, int s3, int s4)
|
||||
// IFX(X_PF) {
|
||||
// emit_pf(dyn, ninst, s1, s3, s4);
|
||||
// }
|
||||
//}
|
||||
//}
|
||||
|
@ -23,9 +23,10 @@
|
||||
#include "dynarec_arm64_functions.h"
|
||||
#include "dynarec_arm64_helper.h"
|
||||
|
||||
// emit ADD32 instruction, from s1 , s2, store result in s1 using s3 and s4 as scratch
|
||||
// emit ADD32 instruction, from s1, s2, store result in s1 using s3 and s4 as scratch
|
||||
void emit_add32(dynarec_arm_t* dyn, int ninst, rex_t rex, int s1, int s2, int s3, int s4)
|
||||
{
|
||||
MAYUSE(s2);
|
||||
IFX(X_PEND) {
|
||||
STRxw_U12(s1, xEmu, offsetof(x64emu_t, op1));
|
||||
STRxw_U12(s2, xEmu, offsetof(x64emu_t, op2));
|
||||
@ -76,9 +77,10 @@ void emit_add32(dynarec_arm_t* dyn, int ninst, rex_t rex, int s1, int s2, int s3
|
||||
}
|
||||
}
|
||||
|
||||
// emit ADD32 instruction, from s1 , constant c, store result in s1 using s3 and s4 as scratch
|
||||
// emit ADD32 instruction, from s1, constant c, store result in s1 using s3 and s4 as scratch
|
||||
void emit_add32c(dynarec_arm_t* dyn, int ninst, rex_t rex, int s1, int64_t c, int s3, int s4, int s5)
|
||||
{
|
||||
MAYUSE(s5);
|
||||
if(s1==xRSP && (!dyn->insts || dyn->insts[ninst].x64.need_flags==X_PEND))
|
||||
{
|
||||
// special case when doing math on ESP and only PEND is needed: ignoring it!
|
||||
@ -151,9 +153,10 @@ void emit_add32c(dynarec_arm_t* dyn, int ninst, rex_t rex, int s1, int64_t c, in
|
||||
}
|
||||
}
|
||||
|
||||
// emit SUB32 instruction, from s1 , s2, store result in s1 using s3 and s4 as scratch
|
||||
// emit SUB32 instruction, from s1, s2, store result in s1 using s3 and s4 as scratch
|
||||
void emit_sub32(dynarec_arm_t* dyn, int ninst, rex_t rex, int s1, int s2, int s3, int s4)
|
||||
{
|
||||
MAYUSE(s2);
|
||||
IFX(X_PEND) {
|
||||
STRxw_U12(s1, xEmu, offsetof(x64emu_t, op1));
|
||||
STRxw_U12(s2, xEmu, offsetof(x64emu_t, op2));
|
||||
@ -206,9 +209,10 @@ void emit_sub32(dynarec_arm_t* dyn, int ninst, rex_t rex, int s1, int s2, int s3
|
||||
}
|
||||
}
|
||||
|
||||
// emit SUB32 instruction, from s1 , constant c, store result in s1 using s3 and s4 as scratch
|
||||
// emit SUB32 instruction, from s1, constant c, store result in s1 using s3 and s4 as scratch
|
||||
void emit_sub32c(dynarec_arm_t* dyn, int ninst, rex_t rex, int s1, int64_t c, int s3, int s4, int s5)
|
||||
{
|
||||
MAYUSE(s5);
|
||||
if(s1==xRSP && (!dyn->insts || dyn->insts[ninst].x64.need_flags==X_PEND))
|
||||
{
|
||||
// special case when doing math on RSP and only PEND is needed: ignoring it!
|
||||
@ -282,9 +286,10 @@ void emit_sub32c(dynarec_arm_t* dyn, int ninst, rex_t rex, int s1, int64_t c, in
|
||||
}
|
||||
}
|
||||
|
||||
// emit ADD8 instruction, from s1 , s2, store result in s1 using s3 and s4 as scratch, with save_s4 is s4 need to be saved
|
||||
// emit ADD8 instruction, from s1, s2, store result in s1 using s3 and s4 as scratch
|
||||
void emit_add8(dynarec_arm_t* dyn, int ninst, int s1, int s2, int s3, int s4)
|
||||
{
|
||||
MAYUSE(s2);
|
||||
IFX(X_PEND) {
|
||||
STRB_U12(s1, xEmu, offsetof(x64emu_t, op1));
|
||||
STRB_U12(s2, xEmu, offsetof(x64emu_t, op2));
|
||||
@ -331,7 +336,7 @@ void emit_add8(dynarec_arm_t* dyn, int ninst, int s1, int s2, int s3, int s4)
|
||||
}
|
||||
}
|
||||
|
||||
// emit ADD8 instruction, from s1 , const c, store result in s1 using s3 and s4 as scratch, with save_s4 is s4 need to be saved
|
||||
// emit ADD8 instruction, from s1, const c, store result in s1 using s3 and s4 as scratch
|
||||
void emit_add8c(dynarec_arm_t* dyn, int ninst, int s1, int c, int s3, int s4)
|
||||
{
|
||||
IFX(X_PEND) {
|
||||
@ -383,9 +388,10 @@ void emit_add8c(dynarec_arm_t* dyn, int ninst, int s1, int c, int s3, int s4)
|
||||
}
|
||||
}
|
||||
|
||||
// emit SUB8 instruction, from s1 , s2, store result in s1 using s3 and s4 as scratch, with save_s4 is s4 need to be saved
|
||||
// emit SUB8 instruction, from s1, s2, store result in s1 using s3 and s4 as scratch
|
||||
void emit_sub8(dynarec_arm_t* dyn, int ninst, int s1, int s2, int s3, int s4)
|
||||
{
|
||||
MAYUSE(s2);
|
||||
IFX(X_PEND) {
|
||||
STRB_U12(s1, xEmu, offsetof(x64emu_t, op1));
|
||||
STRB_U12(s2, xEmu, offsetof(x64emu_t, op2));
|
||||
@ -434,9 +440,10 @@ void emit_sub8(dynarec_arm_t* dyn, int ninst, int s1, int s2, int s3, int s4)
|
||||
}
|
||||
}
|
||||
|
||||
// emit SUB8 instruction, from s1 , constant c, store result in s1 using s3 and s4 as scratch
|
||||
// emit SUB8 instruction, from s1, constant c, store result in s1 using s3 and s4 as scratch
|
||||
void emit_sub8c(dynarec_arm_t* dyn, int ninst, int s1, int c, int s3, int s4, int s5)
|
||||
{
|
||||
MAYUSE(s5);
|
||||
IFX(X_ALL|X_PEND) {
|
||||
MOV32w(s5, c&0xff);
|
||||
}
|
||||
@ -491,9 +498,10 @@ void emit_sub8c(dynarec_arm_t* dyn, int ninst, int s1, int c, int s3, int s4, in
|
||||
}
|
||||
}
|
||||
|
||||
// emit ADD16 instruction, from s1 , s2, store result in s1 using s3 and s4 as scratch, with save_s4 is s4 need to be saved
|
||||
// emit ADD16 instruction, from s1, s2, store result in s1 using s3 and s4 as scratch
|
||||
void emit_add16(dynarec_arm_t* dyn, int ninst, int s1, int s2, int s3, int s4)
|
||||
{
|
||||
MAYUSE(s2);
|
||||
IFX(X_PEND) {
|
||||
STRH_U12(s1, xEmu, offsetof(x64emu_t, op1));
|
||||
STRH_U12(s2, xEmu, offsetof(x64emu_t, op2));
|
||||
@ -541,7 +549,7 @@ void emit_add16(dynarec_arm_t* dyn, int ninst, int s1, int s2, int s3, int s4)
|
||||
}
|
||||
}
|
||||
|
||||
// emit ADD16 instruction, from s1 , const c, store result in s1 using s3 and s4 as scratch, with save_s4 is s4 need to be saved
|
||||
// emit ADD16 instruction, from s1, const c, store result in s1 using s3 and s4 as scratch
|
||||
//void emit_add16c(dynarec_arm_t* dyn, int ninst, int s1, int c, int s3, int s4)
|
||||
//{
|
||||
// IFX(X_PEND) {
|
||||
@ -608,9 +616,10 @@ void emit_add16(dynarec_arm_t* dyn, int ninst, int s1, int s2, int s3, int s4)
|
||||
// }
|
||||
//}
|
||||
|
||||
// emit SUB16 instruction, from s1 , s2, store result in s1 using s3 and s4 as scratch, with save_s4 is s4 need to be saved
|
||||
// emit SUB16 instruction, from s1, s2, store result in s1 using s3 and s4 as scratch
|
||||
void emit_sub16(dynarec_arm_t* dyn, int ninst, int s1, int s2, int s3, int s4)
|
||||
{
|
||||
MAYUSE(s2);
|
||||
IFX(X_PEND) {
|
||||
STRH_U12(s1, xEmu, offsetof(x64emu_t, op1));
|
||||
STRH_U12(s2, xEmu, offsetof(x64emu_t, op2));
|
||||
@ -659,7 +668,7 @@ void emit_sub16(dynarec_arm_t* dyn, int ninst, int s1, int s2, int s3, int s4)
|
||||
}
|
||||
}
|
||||
|
||||
// emit SUB16 instruction, from s1 , constant c, store result in s1 using s3 and s4 as scratch
|
||||
// emit SUB16 instruction, from s1, constant c, store result in s1 using s3 and s4 as scratch
|
||||
//void emit_sub16c(dynarec_arm_t* dyn, int ninst, int s1, int c, int s3, int s4)
|
||||
//{
|
||||
// IFX(X_PEND) {
|
||||
@ -777,7 +786,7 @@ void emit_inc32(dynarec_arm_t* dyn, int ninst, rex_t rex, int s1, int s3, int s4
|
||||
}
|
||||
}
|
||||
|
||||
// emit INC8 instruction, from s1, store result in s1 using s3 and s4 as scratch, with save_s4 is s4 need to be saved
|
||||
// emit INC8 instruction, from s1, store result in s1 using s3 and s4 as scratch
|
||||
void emit_inc8(dynarec_arm_t* dyn, int ninst, int s1, int s3, int s4)
|
||||
{
|
||||
IFX(X_PEND) {
|
||||
@ -822,7 +831,7 @@ void emit_inc8(dynarec_arm_t* dyn, int ninst, int s1, int s3, int s4)
|
||||
}
|
||||
}
|
||||
|
||||
// emit INC16 instruction, from s1 , store result in s1 using s3 and s4 as scratch, with save_s4 is s4 need to be saved
|
||||
// emit INC16 instruction, from s1, store result in s1 using s3 and s4 as scratch
|
||||
void emit_inc16(dynarec_arm_t* dyn, int ninst, int s1, int s3, int s4)
|
||||
{
|
||||
IFX(X_PEND) {
|
||||
@ -1009,9 +1018,10 @@ void emit_dec16(dynarec_arm_t* dyn, int ninst, int s1, int s3, int s4)
|
||||
}
|
||||
}
|
||||
|
||||
// emit ADC32 instruction, from s1 , s2, store result in s1 using s3 and s4 as scratch
|
||||
// emit ADC32 instruction, from s1, s2, store result in s1 using s3 and s4 as scratch
|
||||
void emit_adc32(dynarec_arm_t* dyn, int ninst, rex_t rex, int s1, int s2, int s3, int s4)
|
||||
{
|
||||
MAYUSE(s2);
|
||||
IFX(X_PEND) {
|
||||
STRxw_U12(s1, xEmu, offsetof(x64emu_t, op1));
|
||||
STRxw_U12(s2, xEmu, offsetof(x64emu_t, op2));
|
||||
@ -1062,7 +1072,7 @@ void emit_adc32(dynarec_arm_t* dyn, int ninst, rex_t rex, int s1, int s2, int s3
|
||||
}
|
||||
}
|
||||
|
||||
// emit ADC32 instruction, from s1 , constant c, store result in s1 using s3 and s4 as scratch
|
||||
// emit ADC32 instruction, from s1, constant c, store result in s1 using s3 and s4 as scratch
|
||||
//void emit_adc32c(dynarec_arm_t* dyn, int ninst, int s1, int32_t c, int s3, int s4)
|
||||
//{
|
||||
// IFX(X_PEND) {
|
||||
@ -1132,9 +1142,10 @@ void emit_adc32(dynarec_arm_t* dyn, int ninst, rex_t rex, int s1, int s2, int s3
|
||||
// }
|
||||
//}
|
||||
|
||||
// emit ADC8 instruction, from s1 , s2, store result in s1 using s3 and s4 as scratch, with save_s4 is s4 need to be saved
|
||||
// emit ADC8 instruction, from s1, s2, store result in s1 using s3 and s4 as scratch
|
||||
void emit_adc8(dynarec_arm_t* dyn, int ninst, int s1, int s2, int s3, int s4)
|
||||
{
|
||||
MAYUSE(s2);
|
||||
IFX(X_PEND) {
|
||||
STRB_U12(s1, xEmu, offsetof(x64emu_t, op1));
|
||||
STRB_U12(s2, xEmu, offsetof(x64emu_t, op2));
|
||||
@ -1185,9 +1196,10 @@ void emit_adc8(dynarec_arm_t* dyn, int ninst, int s1, int s2, int s3, int s4)
|
||||
}
|
||||
}
|
||||
|
||||
// emit ADC8 instruction, from s1 , const c, store result in s1 using s3 and s4 as scratch, with save_s4 is s4 need to be saved
|
||||
// emit ADC8 instruction, from s1, const c, store result in s1 using s3 and s4 as scratch
|
||||
void emit_adc8c(dynarec_arm_t* dyn, int ninst, int s1, int c, int s3, int s4, int s5)
|
||||
{
|
||||
MAYUSE(s5);
|
||||
MOV32w(s5, c&0xff);
|
||||
IFX(X_PEND) {
|
||||
STRB_U12(s1, xEmu, offsetof(x64emu_t, op1));
|
||||
@ -1239,9 +1251,10 @@ void emit_adc8c(dynarec_arm_t* dyn, int ninst, int s1, int c, int s3, int s4, in
|
||||
}
|
||||
}
|
||||
|
||||
// emit ADC16 instruction, from s1 , s2, store result in s1 using s3 and s4 as scratch, with save_s4 is s4 need to be saved
|
||||
// emit ADC16 instruction, from s1, s2, store result in s1 using s3 and s4 as scratch
|
||||
void emit_adc16(dynarec_arm_t* dyn, int ninst, int s1, int s2, int s3, int s4)
|
||||
{
|
||||
MAYUSE(s2);
|
||||
IFX(X_PEND) {
|
||||
STRH_U12(s1, xEmu, offsetof(x64emu_t, op1));
|
||||
STRH_U12(s2, xEmu, offsetof(x64emu_t, op2));
|
||||
@ -1292,7 +1305,7 @@ void emit_adc16(dynarec_arm_t* dyn, int ninst, int s1, int s2, int s3, int s4)
|
||||
}
|
||||
}
|
||||
|
||||
// emit ADC16 instruction, from s1 , const c, store result in s1 using s3 and s4 as scratch, with save_s4 is s4 need to be saved
|
||||
// emit ADC16 instruction, from s1, const c, store result in s1 using s3 and s4 as scratch
|
||||
//void emit_adc16c(dynarec_arm_t* dyn, int ninst, int s1, int c, int s3, int s4)
|
||||
//{
|
||||
// IFX(X_PEND) {
|
||||
@ -1359,9 +1372,10 @@ void emit_adc16(dynarec_arm_t* dyn, int ninst, int s1, int s2, int s3, int s4)
|
||||
// }
|
||||
//}
|
||||
|
||||
// emit SBB32 instruction, from s1 , s2, store result in s1 using s3 and s4 as scratch
|
||||
// emit SBB32 instruction, from s1, s2, store result in s1 using s3 and s4 as scratch
|
||||
void emit_sbb32(dynarec_arm_t* dyn, int ninst, rex_t rex, int s1, int s2, int s3, int s4)
|
||||
{
|
||||
MAYUSE(s2);
|
||||
IFX(X_PEND) {
|
||||
STRxw_U12(s1, xEmu, offsetof(x64emu_t, op1));
|
||||
STRxw_U12(s2, xEmu, offsetof(x64emu_t, op2));
|
||||
@ -1414,7 +1428,7 @@ void emit_sbb32(dynarec_arm_t* dyn, int ninst, rex_t rex, int s1, int s2, int s3
|
||||
}
|
||||
}
|
||||
|
||||
// emit SBB32 instruction, from s1 , constant c, store result in s1 using s3 and s4 as scratch
|
||||
// emit SBB32 instruction, from s1, constant c, store result in s1 using s3 and s4 as scratch
|
||||
//void emit_sbb32c(dynarec_arm_t* dyn, int ninst, int s1, int32_t c, int s3, int s4)
|
||||
//{
|
||||
// IFX(X_PEND) {
|
||||
@ -1486,9 +1500,10 @@ void emit_sbb32(dynarec_arm_t* dyn, int ninst, rex_t rex, int s1, int s2, int s3
|
||||
// }
|
||||
//}
|
||||
|
||||
// emit SBB8 instruction, from s1 , s2, store result in s1 using s3 and s4 as scratch, with save_s4 is s4 need to be saved
|
||||
// emit SBB8 instruction, from s1, s2, store result in s1 using s3 and s4 as scratch
|
||||
void emit_sbb8(dynarec_arm_t* dyn, int ninst, int s1, int s2, int s3, int s4)
|
||||
{
|
||||
MAYUSE(s2);
|
||||
IFX(X_PEND) {
|
||||
STRB_U12(s1, xEmu, offsetof(x64emu_t, op1));
|
||||
STRB_U12(s2, xEmu, offsetof(x64emu_t, op2));
|
||||
@ -1540,9 +1555,10 @@ void emit_sbb8(dynarec_arm_t* dyn, int ninst, int s1, int s2, int s3, int s4)
|
||||
}
|
||||
}
|
||||
|
||||
// emit SBB8 instruction, from s1 , constant c, store result in s1 using s3 and s4 as scratch
|
||||
// emit SBB8 instruction, from s1, constant c, store result in s1 using s3 and s4 as scratch
|
||||
void emit_sbb8c(dynarec_arm_t* dyn, int ninst, int s1, int c, int s3, int s4, int s5)
|
||||
{
|
||||
MAYUSE(s5);
|
||||
MOV32w(s5, c&0xff);
|
||||
IFX(X_PEND) {
|
||||
STRB_U12(s1, xEmu, offsetof(x64emu_t, op1));
|
||||
@ -1595,9 +1611,10 @@ void emit_sbb8c(dynarec_arm_t* dyn, int ninst, int s1, int c, int s3, int s4, in
|
||||
}
|
||||
}
|
||||
|
||||
// emit SBB16 instruction, from s1 , s2, store result in s1 using s3 and s4 as scratch, with save_s4 is s4 need to be saved
|
||||
// emit SBB16 instruction, from s1, s2, store result in s1 using s3 and s4 as scratch
|
||||
void emit_sbb16(dynarec_arm_t* dyn, int ninst, int s1, int s2, int s3, int s4)
|
||||
{
|
||||
MAYUSE(s2);
|
||||
IFX(X_PEND) {
|
||||
STRH_U12(s1, xEmu, offsetof(x64emu_t, op1));
|
||||
STRH_U12(s2, xEmu, offsetof(x64emu_t, op2));
|
||||
@ -1649,7 +1666,7 @@ void emit_sbb16(dynarec_arm_t* dyn, int ninst, int s1, int s2, int s3, int s4)
|
||||
}
|
||||
}
|
||||
|
||||
// emit SBB16 instruction, from s1 , constant c, store result in s1 using s3 and s4 as scratch
|
||||
// emit SBB16 instruction, from s1, constant c, store result in s1 using s3 and s4 as scratch
|
||||
//void emit_sbb16c(dynarec_arm_t* dyn, int ninst, int s1, int c, int s3, int s4)
|
||||
//{
|
||||
// IFX(X_PEND) {
|
||||
@ -1853,4 +1870,4 @@ void emit_neg8(dynarec_arm_t* dyn, int ninst, int s1, int s3, int s4)
|
||||
IFX(X_PF) {
|
||||
emit_pf(dyn, ninst, s1, s3, s4);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -26,8 +26,9 @@
|
||||
// emit SHL32 instruction, from s1 , shift s2, store result in s1 using s3 and s4 as scratch. s3 can be same as s2
|
||||
void emit_shl32(dynarec_arm_t* dyn, int ninst, rex_t rex, int s1, int s2, int s3, int s4)
|
||||
{
|
||||
int32_t j32;
|
||||
MAYUSE(j32);
|
||||
MAYUSE(s2);
|
||||
int64_t j64;
|
||||
MAYUSE(j64);
|
||||
|
||||
IFX(X_PEND) {
|
||||
STRxw_U12(s1, xEmu, offsetof(x64emu_t, op1));
|
||||
@ -138,8 +139,9 @@ void emit_shl32c(dynarec_arm_t* dyn, int ninst, rex_t rex, int s1, int32_t c, in
|
||||
// emit SHR32 instruction, from s1 , s2, store result in s1 using s3 and s4 as scratch, s2 can be same as s3
|
||||
void emit_shr32(dynarec_arm_t* dyn, int ninst, rex_t rex, int s1, int s2, int s3, int s4)
|
||||
{
|
||||
int32_t j32;
|
||||
MAYUSE(j32);
|
||||
MAYUSE(s2);
|
||||
int64_t j64;
|
||||
MAYUSE(j64);
|
||||
|
||||
IFX(X_PEND) {
|
||||
STRxw_U12(s1, xEmu, offsetof(x64emu_t, op1));
|
||||
@ -281,6 +283,7 @@ void emit_sar32c(dynarec_arm_t* dyn, int ninst, rex_t rex, int s1, int32_t c, in
|
||||
// emit ROL32 instruction, from s1 , constant c, store result in s1 using s3 and s4 as scratch
|
||||
void emit_rol32c(dynarec_arm_t* dyn, int ninst, rex_t rex, int s1, int32_t c, int s3, int s4)
|
||||
{
|
||||
MAYUSE(rex); MAYUSE(s1); MAYUSE(s3); MAYUSE(s4);
|
||||
IFX(X_PEND) {
|
||||
MOV32w(s3, c);
|
||||
STRxw_U12(s3, xEmu, offsetof(x64emu_t, op2));
|
||||
@ -312,6 +315,7 @@ void emit_rol32c(dynarec_arm_t* dyn, int ninst, rex_t rex, int s1, int32_t c, in
|
||||
// emit ROR32 instruction, from s1 , constant c, store result in s1 using s3 and s4 as scratch
|
||||
void emit_ror32c(dynarec_arm_t* dyn, int ninst, rex_t rex, int s1, int32_t c, int s3, int s4)
|
||||
{
|
||||
MAYUSE(s1); MAYUSE(s3); MAYUSE(s4);
|
||||
IFX(X_PEND) {
|
||||
MOV32w(s3, c);
|
||||
STRxw_U12(s3, xEmu, offsetof(x64emu_t, op2));
|
||||
|
@ -23,9 +23,10 @@
|
||||
#include "dynarec_arm64_functions.h"
|
||||
#include "dynarec_arm64_helper.h"
|
||||
|
||||
// emit CMP32 instruction, from cmp s1 , s2, using s3 and s4 as scratch
|
||||
// emit CMP32 instruction, from cmp s1, s2, using s3 and s4 as scratch
|
||||
void emit_cmp32(dynarec_arm_t* dyn, int ninst, rex_t rex, int s1, int s2, int s3, int s4, int s5)
|
||||
{
|
||||
MAYUSE(s1); MAYUSE(s2);
|
||||
IFX_PENDOR0 {
|
||||
STRxw_U12(s1, xEmu, offsetof(x64emu_t, op1));
|
||||
STRxw_U12(s2, xEmu, offsetof(x64emu_t, op2));
|
||||
@ -110,9 +111,10 @@ void emit_cmp32_0(dynarec_arm_t* dyn, int ninst, rex_t rex, int s1, int s3, int
|
||||
}
|
||||
}
|
||||
|
||||
// emit CMP16 instruction, from cmp s1 , s2, using s3 and s4 as scratch
|
||||
// emit CMP16 instruction, from cmp s1, s2, using s3 and s4 as scratch
|
||||
void emit_cmp16(dynarec_arm_t* dyn, int ninst, int s1, int s2, int s3, int s4, int s5)
|
||||
{
|
||||
MAYUSE(s1); MAYUSE(s2);
|
||||
IFX_PENDOR0 {
|
||||
STRH_U12(s1, xEmu, offsetof(x64emu_t, op1));
|
||||
STRH_U12(s2, xEmu, offsetof(x64emu_t, op2));
|
||||
@ -189,9 +191,10 @@ void emit_cmp16_0(dynarec_arm_t* dyn, int ninst, int s1, int s3, int s4)
|
||||
emit_pf(dyn, ninst, s1, s3, s4);
|
||||
}
|
||||
}
|
||||
// emit CMP8 instruction, from cmp s1 , s2, using s3 and s4 as scratch
|
||||
// emit CMP8 instruction, from cmp s1, s2, using s3 and s4 as scratch
|
||||
void emit_cmp8(dynarec_arm_t* dyn, int ninst, int s1, int s2, int s3, int s4, int s5)
|
||||
{
|
||||
MAYUSE(s1); MAYUSE(s2);
|
||||
IFX_PENDOR0 {
|
||||
STRB_U12(s1, xEmu, offsetof(x64emu_t, op1));
|
||||
STRB_U12(s2, xEmu, offsetof(x64emu_t, op2));
|
||||
@ -270,9 +273,10 @@ void emit_cmp8_0(dynarec_arm_t* dyn, int ninst, int s1, int s3, int s4)
|
||||
}
|
||||
}
|
||||
|
||||
// emit TEST32 instruction, from test s1 , s2, using s3 and s4 as scratch
|
||||
// emit TEST32 instruction, from test s1, s2, using s3 and s4 as scratch
|
||||
void emit_test32(dynarec_arm_t* dyn, int ninst, rex_t rex, int s1, int s2, int s3, int s4)
|
||||
{
|
||||
MAYUSE(s1); MAYUSE(s2); MAYUSE(s3); MAYUSE(s4);
|
||||
IFX_PENDOR0 {
|
||||
SET_DF(s3, rex.w?d_tst64:d_tst32);
|
||||
} else {
|
||||
@ -307,9 +311,10 @@ void emit_test32(dynarec_arm_t* dyn, int ninst, rex_t rex, int s1, int s2, int s
|
||||
}
|
||||
}
|
||||
|
||||
// emit TEST16 instruction, from test s1 , s2, using s3 and s4 as scratch
|
||||
// emit TEST16 instruction, from test s1, s2, using s3 and s4 as scratch
|
||||
void emit_test16(dynarec_arm_t* dyn, int ninst, int s1, int s2, int s3, int s4, int s5)
|
||||
{
|
||||
MAYUSE(s1); MAYUSE(s2);
|
||||
IFX_PENDOR0 {
|
||||
SET_DF(s3, d_tst16);
|
||||
} else {
|
||||
@ -339,9 +344,10 @@ void emit_test16(dynarec_arm_t* dyn, int ninst, int s1, int s2, int s3, int s4,
|
||||
}
|
||||
}
|
||||
|
||||
// emit TEST8 instruction, from test s1 , s2, using s3 and s4 as scratch
|
||||
// emit TEST8 instruction, from test s1, s2, using s3 and s4 as scratch
|
||||
void emit_test8(dynarec_arm_t* dyn, int ninst, int s1, int s2, int s3, int s4, int s5)
|
||||
{
|
||||
MAYUSE(s1); MAYUSE(s2);
|
||||
IFX_PENDOR0 {
|
||||
SET_DF(s3, d_tst8);
|
||||
} else {
|
||||
|
@ -25,17 +25,18 @@
|
||||
|
||||
uintptr_t dynarec64_F0(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int ninst, rex_t rex, int rep, int* ok, int* need_epilog)
|
||||
{
|
||||
(void)ip; (void)rep; (void)need_epilog;
|
||||
|
||||
uint8_t opcode = F8;
|
||||
uint8_t nextop;
|
||||
int32_t j32;
|
||||
uint8_t gd, ed;
|
||||
uint8_t wback, wb2, gb1, gb2;
|
||||
int64_t i64;
|
||||
int fixedaddress;
|
||||
int64_t i64, j64;
|
||||
int64_t fixedaddress;
|
||||
MAYUSE(gb1);
|
||||
MAYUSE(gb2);
|
||||
MAYUSE(wb2);
|
||||
MAYUSE(j32);
|
||||
MAYUSE(j64);
|
||||
|
||||
while((opcode==0xF2) || (opcode==0xF3)) {
|
||||
rep = opcode-0xF1;
|
||||
@ -441,4 +442,3 @@ uintptr_t dynarec64_F0(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin
|
||||
}
|
||||
return addr;
|
||||
}
|
||||
|
||||
|
@ -42,6 +42,8 @@
|
||||
|
||||
uintptr_t dynarec64_F20F(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int ninst, rex_t rex, int* ok, int* need_epilog)
|
||||
{
|
||||
(void)ip; (void)need_epilog;
|
||||
|
||||
uint8_t opcode = F8;
|
||||
uint8_t nextop;
|
||||
uint8_t gd, ed;
|
||||
@ -51,7 +53,7 @@ uintptr_t dynarec64_F20F(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int n
|
||||
int v0, v1;
|
||||
int q0;
|
||||
int d0, d1;
|
||||
int fixedaddress;
|
||||
int64_t fixedaddress;
|
||||
|
||||
#ifdef PRECISE_CVT
|
||||
int j32;
|
||||
@ -316,4 +318,3 @@ uintptr_t dynarec64_F20F(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int n
|
||||
}
|
||||
return addr;
|
||||
}
|
||||
|
||||
|
@ -42,6 +42,8 @@
|
||||
|
||||
uintptr_t dynarec64_F30F(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int ninst, rex_t rex, int* ok, int* need_epilog)
|
||||
{
|
||||
(void)ip; (void)need_epilog;
|
||||
|
||||
uint8_t opcode = F8;
|
||||
uint8_t nextop, u8;
|
||||
uint8_t gd, ed;
|
||||
@ -50,8 +52,8 @@ uintptr_t dynarec64_F30F(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int n
|
||||
int v0, v1;
|
||||
int q0, q1;
|
||||
int d0, d1;
|
||||
int fixedaddress;
|
||||
int j32;
|
||||
int64_t fixedaddress;
|
||||
int64_t j64;
|
||||
|
||||
MAYUSE(d0);
|
||||
MAYUSE(d1);
|
||||
@ -59,7 +61,7 @@ uintptr_t dynarec64_F30F(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int n
|
||||
MAYUSE(q1);
|
||||
MAYUSE(v0);
|
||||
MAYUSE(v1);
|
||||
MAYUSE(j32);
|
||||
MAYUSE(j64);
|
||||
|
||||
switch(opcode) {
|
||||
|
||||
@ -396,4 +398,3 @@ uintptr_t dynarec64_F30F(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int n
|
||||
}
|
||||
return addr;
|
||||
}
|
||||
|
||||
|
@ -36,7 +36,8 @@ void arm_fstp(x64emu_t* emu, void* p)
|
||||
|
||||
void arm_print_armreg(x64emu_t* emu, uintptr_t reg, uintptr_t n)
|
||||
{
|
||||
dynarec_log(LOG_DEBUG, "R%ld=0x%lx (%ld)\n", n, reg, reg);
|
||||
(void)emu;
|
||||
dynarec_log(LOG_DEBUG, "R%lu=0x%lx (%lu)\n", n, reg, reg);
|
||||
}
|
||||
|
||||
void arm_f2xm1(x64emu_t* emu)
|
||||
@ -118,7 +119,7 @@ void arm_fbstp(x64emu_t* emu, uint8_t* ed)
|
||||
void arm_fistp64(x64emu_t* emu, int64_t* ed)
|
||||
{
|
||||
// used of memcpy to avoid aligments issues
|
||||
if(STll(0).ref==ST(0).q) {
|
||||
if((uint64_t)STll(0).ref==ST(0).q) {
|
||||
memcpy(ed, &STll(0).ll, sizeof(int64_t));
|
||||
} else {
|
||||
int64_t tmp;
|
||||
@ -244,6 +245,7 @@ void fpu_reset_reg(dynarec_arm_t* dyn)
|
||||
// Get if ED will have the correct parity. Not emiting anything. Parity is 2 for DWORD or 3 for QWORD
|
||||
int getedparity(dynarec_arm_t* dyn, int ninst, uintptr_t addr, uint8_t nextop, int parity, int delta)
|
||||
{
|
||||
(void)dyn; (void)ninst;
|
||||
|
||||
uint32_t tested = (1<<parity)-1;
|
||||
if((nextop&0xC0)==0xC0)
|
||||
@ -282,6 +284,8 @@ int getedparity(dynarec_arm_t* dyn, int ninst, uintptr_t addr, uint8_t nextop, i
|
||||
// Do the GETED, but don't emit anything...
|
||||
uintptr_t fakeed(dynarec_arm_t* dyn, uintptr_t addr, int ninst, uint8_t nextop)
|
||||
{
|
||||
(void)dyn; (void)addr; (void)ninst;
|
||||
|
||||
if((nextop&0xC0)==0xC0)
|
||||
return addr;
|
||||
if(!(nextop&0xC0)) {
|
||||
@ -310,6 +314,8 @@ uintptr_t fakeed(dynarec_arm_t* dyn, uintptr_t addr, int ninst, uint8_t nextop)
|
||||
|
||||
int isNativeCall(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t* calladdress, int* retn)
|
||||
{
|
||||
(void)dyn;
|
||||
|
||||
#define PK(a) *(uint8_t*)(addr+a)
|
||||
#define PK32(a) *(int32_t*)(addr+a)
|
||||
|
||||
@ -332,4 +338,3 @@ int isNativeCall(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t* calladdress, int
|
||||
#undef PK32
|
||||
#undef PK
|
||||
}
|
||||
|
||||
|
@ -26,8 +26,10 @@
|
||||
#include "dynarec_arm64_helper.h"
|
||||
|
||||
/* setup r2 to address pointed by ED, also fixaddress is an optionnal delta in the range [-absmax, +absmax], with delta&mask==0 to be added to ed for LDR/STR */
|
||||
uintptr_t geted(dynarec_arm_t* dyn, uintptr_t addr, int ninst, uint8_t nextop, uint8_t* ed, uint8_t hint, int* fixaddress, int absmax, uint32_t mask, rex_t rex, int s, int delta)
|
||||
uintptr_t geted(dynarec_arm_t* dyn, uintptr_t addr, int ninst, uint8_t nextop, uint8_t* ed, uint8_t hint, int64_t* fixaddress, int absmax, uint32_t mask, rex_t rex, int s, int delta)
|
||||
{
|
||||
MAYUSE(dyn); MAYUSE(ninst); MAYUSE(delta);
|
||||
|
||||
uint8_t ret = x2;
|
||||
uint8_t scratch = x2;
|
||||
*fixaddress = 0;
|
||||
@ -41,7 +43,7 @@ uintptr_t geted(dynarec_arm_t* dyn, uintptr_t addr, int ninst, uint8_t nextop, u
|
||||
uint8_t sib = F8;
|
||||
int sib_reg = ((sib>>3)&7)+(rex.x<<3);
|
||||
if((sib&0x7)==5) {
|
||||
uint64_t tmp = F32S64;
|
||||
int64_t tmp = F32S;
|
||||
if (sib_reg!=4) {
|
||||
if(tmp && ((tmp<absmin) || (tmp>absmax) || (tmp&mask))) {
|
||||
MOV64x(scratch, tmp);
|
||||
@ -141,8 +143,10 @@ uintptr_t geted(dynarec_arm_t* dyn, uintptr_t addr, int ninst, uint8_t nextop, u
|
||||
}
|
||||
|
||||
/* setup r2 to address pointed by ED, also fixaddress is an optionnal delta in the range [-absmax, +absmax], with delta&mask==0 to be added to ed for LDR/STR */
|
||||
uintptr_t geted32(dynarec_arm_t* dyn, uintptr_t addr, int ninst, uint8_t nextop, uint8_t* ed, uint8_t hint, int* fixaddress, int absmax, uint32_t mask, rex_t rex, int s, int delta)
|
||||
uintptr_t geted32(dynarec_arm_t* dyn, uintptr_t addr, int ninst, uint8_t nextop, uint8_t* ed, uint8_t hint, int64_t* fixaddress, int absmax, uint32_t mask, rex_t rex, int s, int delta)
|
||||
{
|
||||
MAYUSE(dyn); MAYUSE(ninst); MAYUSE(delta);
|
||||
|
||||
uint8_t ret = x2;
|
||||
uint8_t scratch = x2;
|
||||
*fixaddress = 0;
|
||||
@ -156,17 +160,17 @@ uintptr_t geted32(dynarec_arm_t* dyn, uintptr_t addr, int ninst, uint8_t nextop,
|
||||
uint8_t sib = F8;
|
||||
int sib_reg = ((sib>>3)&7)+(rex.x<<3);
|
||||
if((sib&0x7)==5) {
|
||||
uint32_t tmp = F32;
|
||||
int64_t tmp = F32S;
|
||||
if (sib_reg!=4) {
|
||||
if(tmp && ((tmp<absmin) || (tmp>absmax) || (tmp&mask))) {
|
||||
MOV32w(scratch, tmp);
|
||||
MOV64x(scratch, tmp);
|
||||
ADDw_REG_LSL(ret, scratch, xRAX+sib_reg, (sib>>6));
|
||||
} else {
|
||||
LSLw(ret, xRAX+sib_reg, (sib>>6));
|
||||
*fixaddress = tmp;
|
||||
}
|
||||
} else {
|
||||
MOV32w(ret, tmp);
|
||||
MOV64x(ret, tmp);
|
||||
}
|
||||
} else {
|
||||
if (sib_reg!=4) {
|
||||
@ -260,8 +264,10 @@ uintptr_t geted32(dynarec_arm_t* dyn, uintptr_t addr, int ninst, uint8_t nextop,
|
||||
}
|
||||
|
||||
/* setup r2 to address pointed by ED, r3 as scratch also fixaddress is an optionnal delta in the range [-absmax, +absmax], with delta&mask==0 to be added to ed for LDR/STR */
|
||||
uintptr_t geted16(dynarec_arm_t* dyn, uintptr_t addr, int ninst, uint8_t nextop, uint8_t* ed, uint8_t hint, int* fixaddress, int absmax, uint32_t mask, int s)
|
||||
uintptr_t geted16(dynarec_arm_t* dyn, uintptr_t addr, int ninst, uint8_t nextop, uint8_t* ed, uint8_t hint, int64_t* fixaddress, int absmax, uint32_t mask, int s)
|
||||
{
|
||||
MAYUSE(dyn); MAYUSE(ninst);
|
||||
|
||||
uint8_t ret = x2;
|
||||
uint8_t scratch = x3;
|
||||
*fixaddress = 0;
|
||||
@ -270,7 +276,7 @@ uintptr_t geted16(dynarec_arm_t* dyn, uintptr_t addr, int ninst, uint8_t nextop,
|
||||
MAYUSE(scratch);
|
||||
uint32_t m = nextop&0xC7;
|
||||
uint32_t n = (m>>6)&3;
|
||||
int32_t offset = 0;
|
||||
int64_t offset = 0;
|
||||
int absmin = 0;
|
||||
if(s) absmin = -absmax;
|
||||
if(!n && m==6) {
|
||||
@ -338,7 +344,9 @@ uintptr_t geted16(dynarec_arm_t* dyn, uintptr_t addr, int ninst, uint8_t nextop,
|
||||
|
||||
void jump_to_epilog(dynarec_arm_t* dyn, uintptr_t ip, int reg, int ninst)
|
||||
{
|
||||
MAYUSE(dyn); MAYUSE(ip); MAYUSE(ninst);
|
||||
MESSAGE(LOG_DUMP, "Jump to epilog\n");
|
||||
|
||||
if(reg) {
|
||||
if(reg!=xRIP) {
|
||||
MOVx_REG(xRIP, reg);
|
||||
@ -352,6 +360,7 @@ void jump_to_epilog(dynarec_arm_t* dyn, uintptr_t ip, int reg, int ninst)
|
||||
|
||||
void jump_to_next(dynarec_arm_t* dyn, uintptr_t ip, int reg, int ninst)
|
||||
{
|
||||
MAYUSE(dyn); MAYUSE(ninst);
|
||||
MESSAGE(LOG_DUMP, "Jump to next\n");
|
||||
|
||||
if(reg) {
|
||||
@ -387,7 +396,8 @@ void jump_to_next(dynarec_arm_t* dyn, uintptr_t ip, int reg, int ninst)
|
||||
|
||||
void ret_to_epilog(dynarec_arm_t* dyn, int ninst)
|
||||
{
|
||||
MESSAGE(LOG_DUMP, "Ret next\n");
|
||||
MAYUSE(dyn); MAYUSE(ninst);
|
||||
MESSAGE(LOG_DUMP, "Ret to epilog\n");
|
||||
POP1(xRIP);
|
||||
uintptr_t tbl = getJumpTable64();
|
||||
MOV64x(x2, tbl);
|
||||
@ -405,7 +415,8 @@ void ret_to_epilog(dynarec_arm_t* dyn, int ninst)
|
||||
|
||||
void retn_to_epilog(dynarec_arm_t* dyn, int ninst, int n)
|
||||
{
|
||||
MESSAGE(LOG_DUMP, "Retn epilog\n");
|
||||
MAYUSE(dyn); MAYUSE(ninst);
|
||||
MESSAGE(LOG_DUMP, "Retn to epilog\n");
|
||||
POP1(xRIP);
|
||||
if(n>0xfff) {
|
||||
MOV32w(w1, n);
|
||||
@ -429,7 +440,8 @@ void retn_to_epilog(dynarec_arm_t* dyn, int ninst, int n)
|
||||
|
||||
void iret_to_epilog(dynarec_arm_t* dyn, int ninst)
|
||||
{
|
||||
MESSAGE(LOG_DUMP, "IRet epilog\n");
|
||||
MAYUSE(ninst);
|
||||
MESSAGE(LOG_DUMP, "IRet to epilog\n");
|
||||
// POP IP
|
||||
POP1(xRIP);
|
||||
// POP CS
|
||||
@ -450,6 +462,7 @@ void iret_to_epilog(dynarec_arm_t* dyn, int ninst)
|
||||
|
||||
void call_c(dynarec_arm_t* dyn, int ninst, void* fnc, int reg, int ret, int saveflags, int savereg)
|
||||
{
|
||||
MAYUSE(fnc);
|
||||
if(savereg==0)
|
||||
savereg = 7;
|
||||
if(saveflags) {
|
||||
@ -494,8 +507,9 @@ void call_c(dynarec_arm_t* dyn, int ninst, void* fnc, int reg, int ret, int save
|
||||
|
||||
void grab_segdata(dynarec_arm_t* dyn, uintptr_t addr, int ninst, int reg, int segment)
|
||||
{
|
||||
int32_t j32;
|
||||
MAYUSE(j32);
|
||||
(void)addr;
|
||||
int64_t j64;
|
||||
MAYUSE(j64);
|
||||
MESSAGE(LOG_DUMP, "Get %s Offset\n", (segment==_FS)?"FS":"GS");
|
||||
int t1 = x1, t2 = x4;
|
||||
if(reg==t1) ++t1;
|
||||
@ -519,16 +533,20 @@ void grab_segdata(dynarec_arm_t* dyn, uintptr_t addr, int ninst, int reg, int se
|
||||
// x87 stuffs
|
||||
static void x87_reset(dynarec_arm_t* dyn, int ninst)
|
||||
{
|
||||
(void)ninst;
|
||||
#if STEP > 1
|
||||
for (int i=0; i<8; ++i)
|
||||
dyn->x87cache[i] = -1;
|
||||
dyn->x87stack = 0;
|
||||
#else
|
||||
(void)dyn;
|
||||
#endif
|
||||
}
|
||||
|
||||
void x87_stackcount(dynarec_arm_t* dyn, int ninst, int scratch)
|
||||
{
|
||||
#if STEP > 1
|
||||
MAYUSE(scratch);
|
||||
if(!dyn->x87stack)
|
||||
return;
|
||||
MESSAGE(LOG_DUMP, "\tSynch x87 Stackcount (%d)\n", dyn->x87stack);
|
||||
@ -553,11 +571,14 @@ void x87_stackcount(dynarec_arm_t* dyn, int ninst, int scratch)
|
||||
// reset x87stack
|
||||
dyn->x87stack = 0;
|
||||
MESSAGE(LOG_DUMP, "\t------x87 Stackcount\n");
|
||||
#else
|
||||
(void)dyn; (void)ninst; (void)scratch;
|
||||
#endif
|
||||
}
|
||||
|
||||
int x87_do_push(dynarec_arm_t* dyn, int ninst)
|
||||
{
|
||||
(void)ninst;
|
||||
#if STEP > 1
|
||||
dyn->x87stack+=1;
|
||||
// move all regs in cache, and find a free one
|
||||
@ -571,6 +592,7 @@ int x87_do_push(dynarec_arm_t* dyn, int ninst)
|
||||
}
|
||||
return ret;
|
||||
#else
|
||||
(void)dyn;
|
||||
return 0;
|
||||
#endif
|
||||
}
|
||||
@ -584,10 +606,13 @@ void x87_do_push_empty(dynarec_arm_t* dyn, int ninst, int s1)
|
||||
++dyn->x87cache[i];
|
||||
if(s1)
|
||||
x87_stackcount(dyn, ninst, s1);
|
||||
#else
|
||||
(void)dyn; (void)ninst; (void)s1;
|
||||
#endif
|
||||
}
|
||||
void x87_do_pop(dynarec_arm_t* dyn, int ninst)
|
||||
{
|
||||
(void)ninst;
|
||||
#if STEP > 1
|
||||
dyn->x87stack-=1;
|
||||
// move all regs in cache, poping ST0
|
||||
@ -599,12 +624,16 @@ void x87_do_pop(dynarec_arm_t* dyn, int ninst)
|
||||
dyn->x87reg[i] = -1;
|
||||
}
|
||||
}
|
||||
#else
|
||||
(void)dyn;
|
||||
#endif
|
||||
}
|
||||
|
||||
void x87_purgecache(dynarec_arm_t* dyn, int ninst, int s1, int s2, int s3)
|
||||
{
|
||||
(void)ninst;
|
||||
#if STEP > 1
|
||||
MAYUSE(s1); MAYUSE(s2); MAYUSE(s3);
|
||||
int ret = 0;
|
||||
for (int i=0; i<8 && !ret; ++i)
|
||||
if(dyn->x87cache[i] != -1)
|
||||
@ -666,6 +695,8 @@ void x87_purgecache(dynarec_arm_t* dyn, int ninst, int s1, int s2, int s3)
|
||||
dyn->x87cache[i] = -1;
|
||||
}
|
||||
}
|
||||
#else
|
||||
(void)dyn; (void)s1; (void)s2; (void)s3;
|
||||
#endif
|
||||
}
|
||||
|
||||
@ -673,6 +704,7 @@ void x87_purgecache(dynarec_arm_t* dyn, int ninst, int s1, int s2, int s3)
|
||||
static void x87_reflectcache(dynarec_arm_t* dyn, int ninst, int s1, int s2, int s3)
|
||||
{
|
||||
#if STEP > 1
|
||||
MAYUSE(s2); MAYUSE(s3);
|
||||
x87_stackcount(dyn, ninst, s1);
|
||||
int ret = 0;
|
||||
for (int i=0; (i<8) && (!ret); ++i)
|
||||
@ -691,13 +723,17 @@ static void x87_reflectcache(dynarec_arm_t* dyn, int ninst, int s1, int s2, int
|
||||
ANDw_mask(s3, s3, 0, 2); // mask=7 // (emu->top + i)&7
|
||||
VSTR64_REG_LSL3(dyn->x87reg[i], s1, s3);
|
||||
}
|
||||
#else
|
||||
(void)dyn; (void)ninst; (void)s1; (void)s2; (void)s3;
|
||||
#endif
|
||||
}
|
||||
#endif
|
||||
|
||||
int x87_get_cache(dynarec_arm_t* dyn, int ninst, int s1, int s2, int st)
|
||||
{
|
||||
#if STEP > 1
|
||||
(void)ninst;
|
||||
#if STEP > 1
|
||||
MAYUSE(s1); MAYUSE(s2);
|
||||
// search in cache first
|
||||
for (int i=0; i<8; ++i)
|
||||
if(dyn->x87cache[i]==st)
|
||||
@ -727,6 +763,7 @@ int x87_get_cache(dynarec_arm_t* dyn, int ninst, int s1, int s2, int st)
|
||||
|
||||
return ret;
|
||||
#else
|
||||
(void)dyn; (void)s1; (void)s2; (void)st;
|
||||
return 0;
|
||||
#endif
|
||||
}
|
||||
@ -736,6 +773,7 @@ int x87_get_st(dynarec_arm_t* dyn, int ninst, int s1, int s2, int a)
|
||||
#if STEP > 1
|
||||
return dyn->x87reg[x87_get_cache(dyn, ninst, s1, s2, a)];
|
||||
#else
|
||||
(void)dyn; (void)ninst; (void)s1; (void)s2; (void)a;
|
||||
return 0;
|
||||
#endif
|
||||
}
|
||||
@ -744,6 +782,7 @@ int x87_get_st(dynarec_arm_t* dyn, int ninst, int s1, int s2, int a)
|
||||
void x87_refresh(dynarec_arm_t* dyn, int ninst, int s1, int s2, int st)
|
||||
{
|
||||
#if STEP > 1
|
||||
MAYUSE(s2);
|
||||
x87_stackcount(dyn, ninst, s1);
|
||||
int ret = -1;
|
||||
for (int i=0; (i<8) && (ret==-1); ++i)
|
||||
@ -763,12 +802,15 @@ void x87_refresh(dynarec_arm_t* dyn, int ninst, int s1, int s2, int st)
|
||||
}
|
||||
VLDR64_REG_LSL3(dyn->x87reg[ret], s1, s2);
|
||||
MESSAGE(LOG_DUMP, "\t--------x87 Cache for ST%d\n", st);
|
||||
#else
|
||||
(void)dyn; (void)ninst; (void)s1; (void)s2; (void)st;
|
||||
#endif
|
||||
}
|
||||
|
||||
void x87_forget(dynarec_arm_t* dyn, int ninst, int s1, int s2, int st)
|
||||
{
|
||||
#if STEP > 1
|
||||
MAYUSE(s2);
|
||||
x87_stackcount(dyn, ninst, s1);
|
||||
int ret = -1;
|
||||
for (int i=0; (i<8) && (ret==-1); ++i)
|
||||
@ -792,12 +834,16 @@ void x87_forget(dynarec_arm_t* dyn, int ninst, int s1, int s2, int st)
|
||||
fpu_free_reg(dyn, dyn->x87reg[ret]);
|
||||
dyn->x87cache[ret] = -1;
|
||||
dyn->x87reg[ret] = -1;
|
||||
#else
|
||||
(void)dyn; (void)ninst; (void)s1; (void)s2; (void)st;
|
||||
#endif
|
||||
}
|
||||
|
||||
void x87_reget_st(dynarec_arm_t* dyn, int ninst, int s1, int s2, int st)
|
||||
{
|
||||
(void)ninst;
|
||||
#if STEP > 1
|
||||
MAYUSE(s1); MAYUSE(s2);
|
||||
// search in cache first
|
||||
for (int i=0; i<8; ++i)
|
||||
if(dyn->x87cache[i]==st) {
|
||||
@ -838,6 +884,8 @@ void x87_reget_st(dynarec_arm_t* dyn, int ninst, int s1, int s2, int st)
|
||||
ANDw_mask(s2, s2, 0, 2); //mask=7 // (emu->top + i)&7
|
||||
VLDR64_REG_LSL3(dyn->x87reg[ret], s1, s2);
|
||||
MESSAGE(LOG_DUMP, "\t-------x87 Cache for ST%d\n", st);
|
||||
#else
|
||||
(void)dyn; (void)s1; (void)s2; (void)st;
|
||||
#endif
|
||||
}
|
||||
|
||||
@ -846,6 +894,8 @@ static int round_map[] = {0, 2, 1, 3}; // map x64 -> arm round flag
|
||||
// Set rounding according to cw flags, return reg to restore flags
|
||||
int x87_setround(dynarec_arm_t* dyn, int ninst, int s1, int s2, int s3)
|
||||
{
|
||||
MAYUSE(dyn); MAYUSE(ninst);
|
||||
MAYUSE(s1); MAYUSE(s2);
|
||||
LDRH_U12(s1, xEmu, offsetof(x64emu_t, cw));
|
||||
UBFXx(s2, s1, 10, 2); // extract round...
|
||||
MOV64x(s1, (uintptr_t)round_map);
|
||||
@ -860,6 +910,8 @@ int x87_setround(dynarec_arm_t* dyn, int ninst, int s1, int s2, int s3)
|
||||
// Set rounding according to mxcsr flags, return reg to restore flags
|
||||
int sse_setround(dynarec_arm_t* dyn, int ninst, int s1, int s2, int s3)
|
||||
{
|
||||
MAYUSE(dyn); MAYUSE(ninst);
|
||||
MAYUSE(s1); MAYUSE(s2);
|
||||
LDRH_U12(s1, xEmu, offsetof(x64emu_t, mxcsr));
|
||||
UBFXx(s2, s1, 13, 2); // extract round...
|
||||
MOV64x(s1, (uintptr_t)round_map);
|
||||
@ -874,20 +926,27 @@ int sse_setround(dynarec_arm_t* dyn, int ninst, int s1, int s2, int s3)
|
||||
// Restore round flag
|
||||
void x87_restoreround(dynarec_arm_t* dyn, int ninst, int s1)
|
||||
{
|
||||
MAYUSE(dyn); MAYUSE(ninst);
|
||||
MAYUSE(s1);
|
||||
MSR_fpcr(s1); // put back fpscr
|
||||
}
|
||||
|
||||
// MMX helpers
|
||||
static void mmx_reset(dynarec_arm_t* dyn, int ninst)
|
||||
{
|
||||
(void)ninst;
|
||||
#if STEP > 1
|
||||
MAYUSE(dyn);
|
||||
for (int i=0; i<8; ++i)
|
||||
dyn->mmxcache[i] = -1;
|
||||
#else
|
||||
(void)dyn;
|
||||
#endif
|
||||
}
|
||||
// get neon register for a MMX reg, create the entry if needed
|
||||
int mmx_get_reg(dynarec_arm_t* dyn, int ninst, int s1, int a)
|
||||
{
|
||||
(void)ninst; (void)s1;
|
||||
#if STEP > 1
|
||||
if(dyn->mmxcache[a]!=-1)
|
||||
return dyn->mmxcache[a];
|
||||
@ -895,24 +954,28 @@ int mmx_get_reg(dynarec_arm_t* dyn, int ninst, int s1, int a)
|
||||
VLDR64_U12(ret, xEmu, offsetof(x64emu_t, mmx87[a]));
|
||||
return ret;
|
||||
#else
|
||||
(void)dyn; (void)a;
|
||||
return 0;
|
||||
#endif
|
||||
}
|
||||
// get neon register for a MMX reg, but don't try to synch it if it needed to be created
|
||||
int mmx_get_reg_empty(dynarec_arm_t* dyn, int ninst, int s1, int a)
|
||||
{
|
||||
(void)ninst; (void)s1;
|
||||
#if STEP > 1
|
||||
if(dyn->mmxcache[a]!=-1)
|
||||
return dyn->mmxcache[a];
|
||||
int ret = dyn->mmxcache[a] = fpu_get_reg_emm(dyn, a);
|
||||
return ret;
|
||||
#else
|
||||
(void)dyn; (void)a;
|
||||
return 0;
|
||||
#endif
|
||||
}
|
||||
// purge the MMX cache only(needs 3 scratch registers)
|
||||
void mmx_purgecache(dynarec_arm_t* dyn, int ninst, int s1)
|
||||
{
|
||||
(void)ninst; (void)s1;
|
||||
#if STEP > 1
|
||||
int old = -1;
|
||||
for (int i=0; i<8; ++i)
|
||||
@ -928,16 +991,21 @@ void mmx_purgecache(dynarec_arm_t* dyn, int ninst, int s1)
|
||||
if(old!=-1) {
|
||||
MESSAGE(LOG_DUMP, "\t------ Purge MMX Cache\n");
|
||||
}
|
||||
#else
|
||||
(void)dyn;
|
||||
#endif
|
||||
}
|
||||
#ifdef HAVE_TRACE
|
||||
static void mmx_reflectcache(dynarec_arm_t* dyn, int ninst, int s1)
|
||||
{
|
||||
(void) ninst; (void)s1;
|
||||
#if STEP > 1
|
||||
for (int i=0; i<8; ++i)
|
||||
if(dyn->mmxcache[i]!=-1) {
|
||||
VLDR64_U12(dyn->mmxcache[i], xEmu, offsetof(x64emu_t, mmx87[i]));
|
||||
}
|
||||
#else
|
||||
(void)dyn;
|
||||
#endif
|
||||
}
|
||||
#endif
|
||||
@ -946,14 +1014,18 @@ static void mmx_reflectcache(dynarec_arm_t* dyn, int ninst, int s1)
|
||||
// SSE / SSE2 helpers
|
||||
static void sse_reset(dynarec_arm_t* dyn, int ninst)
|
||||
{
|
||||
(void)ninst;
|
||||
#if STEP > 1
|
||||
for (int i=0; i<16; ++i)
|
||||
dyn->ssecache[i] = -1;
|
||||
#else
|
||||
(void)dyn;
|
||||
#endif
|
||||
}
|
||||
// get neon register for a SSE reg, create the entry if needed
|
||||
int sse_get_reg(dynarec_arm_t* dyn, int ninst, int s1, int a)
|
||||
{
|
||||
(void) ninst; (void)s1;
|
||||
#if STEP > 1
|
||||
if(dyn->ssecache[a]!=-1)
|
||||
return dyn->ssecache[a];
|
||||
@ -961,24 +1033,28 @@ int sse_get_reg(dynarec_arm_t* dyn, int ninst, int s1, int a)
|
||||
VLDR128_U12(ret, xEmu, offsetof(x64emu_t, xmm[a]));
|
||||
return ret;
|
||||
#else
|
||||
(void)dyn; (void)a;
|
||||
return 0;
|
||||
#endif
|
||||
}
|
||||
// get neon register for a SSE reg, but don't try to synch it if it needed to be created
|
||||
int sse_get_reg_empty(dynarec_arm_t* dyn, int ninst, int s1, int a)
|
||||
{
|
||||
(void) ninst; (void)s1;
|
||||
#if STEP > 1
|
||||
if(dyn->ssecache[a]!=-1)
|
||||
return dyn->ssecache[a];
|
||||
int ret = dyn->ssecache[a] = fpu_get_reg_xmm(dyn, a);
|
||||
return ret;
|
||||
#else
|
||||
(void)dyn; (void)a;
|
||||
return 0;
|
||||
#endif
|
||||
}
|
||||
// purge the SSE cache for XMM0..XMM7 (to use before function native call)
|
||||
void sse_purge07cache(dynarec_arm_t* dyn, int ninst, int s1)
|
||||
{
|
||||
(void) ninst; (void)s1;
|
||||
#if STEP > 1
|
||||
int old = -1;
|
||||
for (int i=0; i<8; ++i)
|
||||
@ -994,12 +1070,15 @@ void sse_purge07cache(dynarec_arm_t* dyn, int ninst, int s1)
|
||||
if(old!=-1) {
|
||||
MESSAGE(LOG_DUMP, "\t------ Purge XMM0..7 Cache\n");
|
||||
}
|
||||
#else
|
||||
(void)dyn;
|
||||
#endif
|
||||
}
|
||||
|
||||
// purge the SSE cache only
|
||||
static void sse_purgecache(dynarec_arm_t* dyn, int ninst, int s1)
|
||||
{
|
||||
(void) ninst; (void)s1;
|
||||
#if STEP > 1
|
||||
int old = -1;
|
||||
for (int i=0; i<16; ++i)
|
||||
@ -1015,22 +1094,28 @@ static void sse_purgecache(dynarec_arm_t* dyn, int ninst, int s1)
|
||||
if(old!=-1) {
|
||||
MESSAGE(LOG_DUMP, "\t------ Purge SSE Cache\n");
|
||||
}
|
||||
#else
|
||||
(void)dyn;
|
||||
#endif
|
||||
}
|
||||
#ifdef HAVE_TRACE
|
||||
static void sse_reflectcache(dynarec_arm_t* dyn, int ninst, int s1)
|
||||
{
|
||||
(void) ninst; (void)s1;
|
||||
#if STEP > 1
|
||||
for (int i=0; i<16; ++i)
|
||||
if(dyn->ssecache[i]!=-1) {
|
||||
VSTR128_U12(dyn->ssecache[i], xEmu, offsetof(x64emu_t, xmm[i]));
|
||||
}
|
||||
#else
|
||||
(void)dyn;
|
||||
#endif
|
||||
}
|
||||
#endif
|
||||
|
||||
void fpu_pushcache(dynarec_arm_t* dyn, int ninst, int s1)
|
||||
{
|
||||
(void) ninst; (void)s1;
|
||||
#if STEP > 1
|
||||
// only SSE regs needs to be push back to xEmu
|
||||
int n=0;
|
||||
@ -1045,11 +1130,14 @@ void fpu_pushcache(dynarec_arm_t* dyn, int ninst, int s1)
|
||||
VSTR128_U12(dyn->ssecache[i], xEmu, offsetof(x64emu_t, xmm[i]));
|
||||
}
|
||||
MESSAGE(LOG_DUMP, "\t------- Push XMM Cache (%d)\n", n);
|
||||
#else
|
||||
(void)dyn;
|
||||
#endif
|
||||
}
|
||||
|
||||
void fpu_popcache(dynarec_arm_t* dyn, int ninst, int s1)
|
||||
{
|
||||
(void) ninst; (void)s1;
|
||||
#if STEP > 1
|
||||
// only SSE regs needs to be pop back from xEmu
|
||||
int n=0;
|
||||
@ -1064,6 +1152,8 @@ void fpu_popcache(dynarec_arm_t* dyn, int ninst, int s1)
|
||||
VLDR128_U12(dyn->ssecache[i], xEmu, offsetof(x64emu_t, xmm[i]));
|
||||
}
|
||||
MESSAGE(LOG_DUMP, "\t------- Pop XMM Cache (%d)\n", n);
|
||||
#else
|
||||
(void)dyn;
|
||||
#endif
|
||||
}
|
||||
|
||||
@ -1096,6 +1186,8 @@ void fpu_reset(dynarec_arm_t* dyn, int ninst)
|
||||
|
||||
void emit_pf(dynarec_arm_t* dyn, int ninst, int s1, int s3, int s4)
|
||||
{
|
||||
MAYUSE(dyn); MAYUSE(ninst);
|
||||
MAYUSE(s1); MAYUSE(s3); MAYUSE(s4);
|
||||
// PF: (((emu->x64emu_parity_tab[(res) / 32] >> ((res) % 32)) & 1) == 0)
|
||||
ANDw_mask(s3, s1, 0b011011, 0b000010); // mask=0xE0
|
||||
LSRw(s3, s3, 5);
|
||||
@ -1105,4 +1197,4 @@ void emit_pf(dynarec_arm_t* dyn, int ninst, int s1, int s3, int s4)
|
||||
LSRw_REG(s4, s4, s3);
|
||||
MVNw_REG(s4, s4);
|
||||
BFIw(xFlags, s4, F_PF, 1);
|
||||
}
|
||||
}
|
||||
|
@ -243,115 +243,115 @@
|
||||
// R0 will not be pushed/popd if ret is -2. Flags are not save/restored
|
||||
#define CALL_S(F, ret) call_c(dyn, ninst, F, x7, ret, 0, 0)
|
||||
|
||||
#define MARK if(dyn->insts) {dyn->insts[ninst].mark = (uintptr_t)dyn->arm_size;}
|
||||
#define MARK if(dyn->insts) {dyn->insts[ninst].mark = dyn->arm_size;}
|
||||
#define GETMARK ((dyn->insts)?dyn->insts[ninst].mark:(dyn->arm_size+4))
|
||||
#define MARK2 if(dyn->insts) {dyn->insts[ninst].mark2 = (uintptr_t)dyn->arm_size;}
|
||||
#define MARK2 if(dyn->insts) {dyn->insts[ninst].mark2 = dyn->arm_size;}
|
||||
#define GETMARK2 ((dyn->insts)?dyn->insts[ninst].mark2:(dyn->arm_size+4))
|
||||
#define MARK3 if(dyn->insts) {dyn->insts[ninst].mark3 = (uintptr_t)dyn->arm_size;}
|
||||
#define MARK3 if(dyn->insts) {dyn->insts[ninst].mark3 = dyn->arm_size;}
|
||||
#define GETMARK3 ((dyn->insts)?dyn->insts[ninst].mark3:(dyn->arm_size+4))
|
||||
#define MARKF if(dyn->insts) {dyn->insts[ninst].markf = (uintptr_t)dyn->arm_size;}
|
||||
#define MARKF if(dyn->insts) {dyn->insts[ninst].markf = dyn->arm_size;}
|
||||
#define GETMARKF ((dyn->insts)?dyn->insts[ninst].markf:(dyn->arm_size+4))
|
||||
#define MARKSEG if(dyn->insts) {dyn->insts[ninst].markseg = (uintptr_t)dyn->arm_size;}
|
||||
#define MARKSEG if(dyn->insts) {dyn->insts[ninst].markseg = dyn->arm_size;}
|
||||
#define GETMARKSEG ((dyn->insts)?dyn->insts[ninst].markseg:(dyn->arm_size+4))
|
||||
#define MARKLOCK if(dyn->insts) {dyn->insts[ninst].marklock = (uintptr_t)dyn->arm_size;}
|
||||
#define MARKLOCK if(dyn->insts) {dyn->insts[ninst].marklock = dyn->arm_size;}
|
||||
#define GETMARKLOCK ((dyn->insts)?dyn->insts[ninst].marklock:(dyn->arm_size+4))
|
||||
|
||||
// Branch to MARK if cond (use j32)
|
||||
// Branch to MARK if cond (use j64)
|
||||
#define B_MARK(cond) \
|
||||
j32 = GETMARK-(dyn->arm_size); \
|
||||
Bcond(cond, j32)
|
||||
// Branch to MARK unconditionnal (use j32)
|
||||
j64 = GETMARK-(dyn->arm_size); \
|
||||
Bcond(cond, j64)
|
||||
// Branch to MARK unconditionnal (use j64)
|
||||
#define B_MARK_nocond \
|
||||
j32 = GETMARK-(dyn->arm_size); \
|
||||
B(j32)
|
||||
// Branch to MARK if reg is 0 (use j32)
|
||||
j64 = GETMARK-(dyn->arm_size); \
|
||||
B(j64)
|
||||
// Branch to MARK if reg is 0 (use j64)
|
||||
#define CBZxw_MARK(reg) \
|
||||
j32 = GETMARK-(dyn->arm_size); \
|
||||
CBZxw(reg, j32)
|
||||
// Branch to MARK if reg is not 0 (use j32)
|
||||
j64 = GETMARK-(dyn->arm_size); \
|
||||
CBZxw(reg, j64)
|
||||
// Branch to MARK if reg is not 0 (use j64)
|
||||
#define CBNZx_MARK(reg) \
|
||||
j32 = GETMARK-(dyn->arm_size); \
|
||||
CBNZx(reg, j32)
|
||||
// Branch to MARK if reg is not 0 (use j32)
|
||||
j64 = GETMARK-(dyn->arm_size); \
|
||||
CBNZx(reg, j64)
|
||||
// Branch to MARK if reg is not 0 (use j64)
|
||||
#define CBNZw_MARK(reg) \
|
||||
j32 = GETMARK-(dyn->arm_size); \
|
||||
CBNZw(reg, j32)
|
||||
// Branch to MARK2 if cond (use j32)
|
||||
j64 = GETMARK-(dyn->arm_size); \
|
||||
CBNZw(reg, j64)
|
||||
// Branch to MARK2 if cond (use j64)
|
||||
#define B_MARK2(cond) \
|
||||
j32 = GETMARK2-(dyn->arm_size); \
|
||||
Bcond(cond, j32)
|
||||
// Branch to MARK2 unconditionnal (use j32)
|
||||
j64 = GETMARK2-(dyn->arm_size); \
|
||||
Bcond(cond, j64)
|
||||
// Branch to MARK2 unconditionnal (use j64)
|
||||
#define B_MARK2_nocond \
|
||||
j32 = GETMARK2-(dyn->arm_size); \
|
||||
B(j32)
|
||||
// Branch to MARK2 if reg is not 0 (use j32)
|
||||
j64 = GETMARK2-(dyn->arm_size); \
|
||||
B(j64)
|
||||
// Branch to MARK2 if reg is not 0 (use j64)
|
||||
#define CBNZx_MARK2(reg) \
|
||||
j32 = GETMARK2-(dyn->arm_size); \
|
||||
CBNZx(reg, j32)
|
||||
j64 = GETMARK2-(dyn->arm_size); \
|
||||
CBNZx(reg, j64)
|
||||
// Test bit N of A and branch to MARK2 if set
|
||||
#define TBNZ_MARK2(A, N) \
|
||||
j32 = GETMARK2-(dyn->arm_size); \
|
||||
TBNZ(A, N, j32)
|
||||
// Branch to MARK3 if cond (use j32)
|
||||
j64 = GETMARK2-(dyn->arm_size); \
|
||||
TBNZ(A, N, j64)
|
||||
// Branch to MARK3 if cond (use j64)
|
||||
#define B_MARK3(cond) \
|
||||
j32 = GETMARK3-(dyn->arm_size); \
|
||||
Bcond(cond, j32)
|
||||
// Branch to MARK3 unconditionnal (use j32)
|
||||
j64 = GETMARK3-(dyn->arm_size); \
|
||||
Bcond(cond, j64)
|
||||
// Branch to MARK3 unconditionnal (use j64)
|
||||
#define B_MARK3_nocond \
|
||||
j32 = GETMARK3-(dyn->arm_size); \
|
||||
B(j32)
|
||||
// Branch to MARK3 if reg is not 0 (use j32)
|
||||
j64 = GETMARK3-(dyn->arm_size); \
|
||||
B(j64)
|
||||
// Branch to MARK3 if reg is not 0 (use j64)
|
||||
#define CBNZx_MARK3(reg) \
|
||||
j32 = GETMARK3-(dyn->arm_size); \
|
||||
CBNZx(reg, j32)
|
||||
j64 = GETMARK3-(dyn->arm_size); \
|
||||
CBNZx(reg, j64)
|
||||
// Test bit N of A and branch to MARK3 if set
|
||||
#define TBNZ_MARK3(A, N) \
|
||||
j32 = GETMARK3-(dyn->arm_size); \
|
||||
TBNZ(A, N, j32)
|
||||
j64 = GETMARK3-(dyn->arm_size); \
|
||||
TBNZ(A, N, j64)
|
||||
// Test bit N of A and branch to MARK3 if not set
|
||||
#define TBZ_MARK3(A, N) \
|
||||
j32 = GETMARK3-(dyn->arm_size); \
|
||||
TBZ(A, N, j32)
|
||||
// Branch to next instruction if cond (use j32)
|
||||
j64 = GETMARK3-(dyn->arm_size); \
|
||||
TBZ(A, N, j64)
|
||||
// Branch to next instruction if cond (use j64)
|
||||
#define B_NEXT(cond) \
|
||||
j32 = (dyn->insts)?(dyn->insts[ninst].epilog-(dyn->arm_size)):0; \
|
||||
Bcond(cond, j32)
|
||||
// Branch to next instruction unconditionnal (use j32)
|
||||
j64 = (dyn->insts)?(dyn->insts[ninst].epilog-(dyn->arm_size)):0; \
|
||||
Bcond(cond, j64)
|
||||
// Branch to next instruction unconditionnal (use j64)
|
||||
#define B_NEXT_nocond \
|
||||
j32 = (dyn->insts)?(dyn->insts[ninst].epilog-(dyn->arm_size)):0;\
|
||||
B(j32)
|
||||
// Branch to next instruction if reg is 0 (use j32)
|
||||
j64 = (dyn->insts)?(dyn->insts[ninst].epilog-(dyn->arm_size)):0;\
|
||||
B(j64)
|
||||
// Branch to next instruction if reg is 0 (use j64)
|
||||
#define CBZw_NEXT(reg) \
|
||||
j32 = (dyn->insts)?(dyn->insts[ninst].epilog-(dyn->arm_size)):0; \
|
||||
CBZw(reg, j32)
|
||||
// Branch to next instruction if reg is 0 (use j32)
|
||||
j64 = (dyn->insts)?(dyn->insts[ninst].epilog-(dyn->arm_size)):0; \
|
||||
CBZw(reg, j64)
|
||||
// Branch to next instruction if reg is 0 (use j64)
|
||||
#define CBZx_NEXT(reg) \
|
||||
j32 = (dyn->insts)?(dyn->insts[ninst].epilog-(dyn->arm_size)):0; \
|
||||
CBZx(reg, j32)
|
||||
j64 = (dyn->insts)?(dyn->insts[ninst].epilog-(dyn->arm_size)):0; \
|
||||
CBZx(reg, j64)
|
||||
// Test bit N of A and branch to next instruction if not set
|
||||
#define TBZ_NEXT(A, N) \
|
||||
j32 = (dyn->insts)?(dyn->insts[ninst].epilog-(dyn->arm_size)):0; \
|
||||
TBZ(A, N, j32)
|
||||
// Branch to MARKSEG if cond (use j32)
|
||||
j64 = (dyn->insts)?(dyn->insts[ninst].epilog-(dyn->arm_size)):0; \
|
||||
TBZ(A, N, j64)
|
||||
// Branch to MARKSEG if cond (use j64)
|
||||
#define B_MARKSEG(cond) \
|
||||
j32 = GETMARKSEG-(dyn->arm_size); \
|
||||
Bcond(cond, j32)
|
||||
// Branch to MARKSEG if reg is 0 (use j32)
|
||||
j64 = GETMARKSEG-(dyn->arm_size); \
|
||||
Bcond(cond, j64)
|
||||
// Branch to MARKSEG if reg is 0 (use j64)
|
||||
#define CBZw_MARKSEG(reg) \
|
||||
j32 = GETMARKSEG-(dyn->arm_size); \
|
||||
CBZw(reg, j32)
|
||||
// Branch to MARKSEG if reg is not 0 (use j32)
|
||||
j64 = GETMARKSEG-(dyn->arm_size); \
|
||||
CBZw(reg, j64)
|
||||
// Branch to MARKSEG if reg is not 0 (use j64)
|
||||
#define CBNZw_MARKSEG(reg) \
|
||||
j32 = GETMARKSEG-(dyn->arm_size); \
|
||||
CBNZw(reg, j32)
|
||||
// Branch to MARKLOCK if cond (use j32)
|
||||
j64 = GETMARKSEG-(dyn->arm_size); \
|
||||
CBNZw(reg, j64)
|
||||
// Branch to MARKLOCK if cond (use j64)
|
||||
#define B_MARKLOCK(cond) \
|
||||
j32 = GETMARKLOCK-(dyn->arm_size); \
|
||||
Bcond(cond, j32)
|
||||
// Branch to MARKLOCK if reg is not 0 (use j32)
|
||||
j64 = GETMARKLOCK-(dyn->arm_size); \
|
||||
Bcond(cond, j64)
|
||||
// Branch to MARKLOCK if reg is not 0 (use j64)
|
||||
#define CBNZx_MARKLOCK(reg) \
|
||||
j32 = GETMARKLOCK-(dyn->arm_size); \
|
||||
CBNZx(reg, j32)
|
||||
j64 = GETMARKLOCK-(dyn->arm_size); \
|
||||
CBNZx(reg, j64)
|
||||
|
||||
#define IFX(A) if(dyn->insts && (dyn->insts[ninst].x64.need_flags&(A)))
|
||||
#define IFX_PENDOR0 if(dyn->insts && (dyn->insts[ninst].x64.need_flags&(X_PEND) || !dyn->insts[ninst].x64.need_flags))
|
||||
@ -480,8 +480,8 @@
|
||||
if(((A)!=X_PEND) && dyn->state_flags!=SF_SET) { \
|
||||
if(dyn->state_flags!=SF_PENDING) { \
|
||||
LDRw_U12(x3, xEmu, offsetof(x64emu_t, df)); \
|
||||
j32 = (GETMARKF)-(dyn->arm_size); \
|
||||
CBZw(x3, j32); \
|
||||
j64 = (GETMARKF)-(dyn->arm_size); \
|
||||
CBZw(x3, j64); \
|
||||
} \
|
||||
CALL_(UpdateFlags, -1, 0); \
|
||||
MARKF; \
|
||||
@ -595,16 +595,16 @@ void* arm64_next(x64emu_t* emu, uintptr_t addr);
|
||||
#define dynarec64_F20F STEPNAME(dynarec64_F20F)
|
||||
#define dynarec64_F30F STEPNAME(dynarec64_F30F)
|
||||
|
||||
#define geted STEPNAME(geted_)
|
||||
#define geted32 STEPNAME(geted32_)
|
||||
#define geted16 STEPNAME(geted16_)
|
||||
#define jump_to_epilog STEPNAME(jump_to_epilog_)
|
||||
#define jump_to_next STEPNAME(jump_to_next_)
|
||||
#define ret_to_epilog STEPNAME(ret_to_epilog_)
|
||||
#define retn_to_epilog STEPNAME(retn_to_epilog_)
|
||||
#define iret_to_epilog STEPNAME(iret_to_epilog_)
|
||||
#define call_c STEPNAME(call_c_)
|
||||
#define grab_segdata STEPNAME(grab_segdata_)
|
||||
#define geted STEPNAME(geted)
|
||||
#define geted32 STEPNAME(geted32)
|
||||
#define geted16 STEPNAME(geted16)
|
||||
#define jump_to_epilog STEPNAME(jump_to_epilog)
|
||||
#define jump_to_next STEPNAME(jump_to_next)
|
||||
#define ret_to_epilog STEPNAME(ret_to_epilog)
|
||||
#define retn_to_epilog STEPNAME(retn_to_epilog)
|
||||
#define iret_to_epilog STEPNAME(iret_to_epilog)
|
||||
#define call_c STEPNAME(call_c)
|
||||
#define grab_segdata STEPNAME(grab_segdata)
|
||||
#define emit_cmp8 STEPNAME(emit_cmp8)
|
||||
#define emit_cmp16 STEPNAME(emit_cmp16)
|
||||
#define emit_cmp32 STEPNAME(emit_cmp32)
|
||||
@ -706,13 +706,13 @@ void* arm64_next(x64emu_t* emu, uintptr_t addr);
|
||||
#endif
|
||||
|
||||
/* setup r2 to address pointed by */
|
||||
uintptr_t geted(dynarec_arm_t* dyn, uintptr_t addr, int ninst, uint8_t nextop, uint8_t* ed, uint8_t hint, int* fixaddress, int absmax, uint32_t mask, rex_t rex, int s, int delta);
|
||||
uintptr_t geted(dynarec_arm_t* dyn, uintptr_t addr, int ninst, uint8_t nextop, uint8_t* ed, uint8_t hint, int64_t* fixaddress, int absmax, uint32_t mask, rex_t rex, int s, int delta);
|
||||
|
||||
/* setup r2 to address pointed by */
|
||||
uintptr_t geted32(dynarec_arm_t* dyn, uintptr_t addr, int ninst, uint8_t nextop, uint8_t* ed, uint8_t hint, int* fixaddress, int absmax, uint32_t mask, rex_t rex, int s, int delta);
|
||||
uintptr_t geted32(dynarec_arm_t* dyn, uintptr_t addr, int ninst, uint8_t nextop, uint8_t* ed, uint8_t hint, int64_t* fixaddress, int absmax, uint32_t mask, rex_t rex, int s, int delta);
|
||||
|
||||
/* setup r2 to address pointed by */
|
||||
uintptr_t geted16(dynarec_arm_t* dyn, uintptr_t addr, int ninst, uint8_t nextop, uint8_t* ed, uint8_t hint, int* fixaddress, int absmax, uint32_t mask, int s);
|
||||
uintptr_t geted16(dynarec_arm_t* dyn, uintptr_t addr, int ninst, uint8_t nextop, uint8_t* ed, uint8_t hint, int64_t* fixaddress, int absmax, uint32_t mask, int s);
|
||||
|
||||
|
||||
// generic x64 helper
|
||||
@ -978,4 +978,4 @@ uintptr_t dynarec64_F30F(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int n
|
||||
, cNE, cEQ, X_SF|X_OF|X_ZF) \
|
||||
break
|
||||
|
||||
#endif //__DYNAREC_ARM64_HELPER_H__
|
||||
#endif //__DYNAREC_ARM64_HELPER_H__
|
||||
|
@ -28,7 +28,7 @@ typedef struct dynarec_arm_s {
|
||||
uint32_t isize; // size in byte of x64 instructions included
|
||||
void* block; // memory pointer where next instruction is emited
|
||||
uintptr_t arm_start; // start of the arm code
|
||||
int arm_size; // size of emitted arm code
|
||||
size_t arm_size; // size of emitted arm code
|
||||
int state_flags;// actual state for on-demand flags
|
||||
uintptr_t last_ip; // last set IP in RIP (or NULL if unclean state)
|
||||
int8_t x87cache[8];// cache status for the 8 x87 register behind the fpu stack
|
||||
@ -59,4 +59,4 @@ int is_instructions(dynarec_arm_t *dyn, uintptr_t addr, int n);
|
||||
|
||||
int Table64(dynarec_arm_t *dyn, uint64_t val); // add a value to etable64 (if needed) and gives back the imm19 to use in LDR_literal
|
||||
|
||||
#endif //__DYNAREC_ARM_PRIVATE_H_
|
||||
#endif //__DYNAREC_ARM_PRIVATE_H_
|
||||
|
@ -1178,6 +1178,7 @@ void* GetTLSPointer(box64context_t* context, elfheader_t* h)
|
||||
#ifdef DYNAREC
|
||||
dynablocklist_t* GetDynablocksFromAddress(box64context_t *context, uintptr_t addr)
|
||||
{
|
||||
(void)context;
|
||||
// if we are here, the there is not block in standard "space"
|
||||
/*dynablocklist_t* ret = getDBFromAddress(addr);
|
||||
if(ret) {
|
||||
|
@ -1400,7 +1400,7 @@ void idiv64(x64emu_t *emu, uint64_t s)
|
||||
}
|
||||
quot = dvd/(int64_t)s;
|
||||
mod = dvd%(int64_t)s;
|
||||
if (llabs(quot) > 0x7fffffffffffffffL) {
|
||||
if ((quot > 0x7fffffffffffffffLL) || (quot < -0x7fffffffffffffffLL)) {
|
||||
INTR_RAISE_DIV0(emu);
|
||||
return;
|
||||
}
|
||||
|
@ -21,10 +21,10 @@ typedef struct dynablock_s dynablock_t;
|
||||
typedef struct dynablocklist_s dynablocklist_t;
|
||||
// custom protection flag to mark Page that are Write protected for Dynarec purpose
|
||||
uintptr_t AllocDynarecMap(dynablock_t* db, size_t size);
|
||||
void FreeDynarecMap(dynablock_t* db, uintptr_t addr, uint32_t size);
|
||||
void FreeDynarecMap(dynablock_t* db, uintptr_t addr, size_t size);
|
||||
|
||||
void addDBFromAddressRange(uintptr_t addr, uintptr_t size);
|
||||
void cleanDBFromAddressRange(uintptr_t addr, uintptr_t size, int destroy);
|
||||
void addDBFromAddressRange(uintptr_t addr, size_t size);
|
||||
void cleanDBFromAddressRange(uintptr_t addr, size_t size, int destroy);
|
||||
|
||||
dynablocklist_t* getDB(uintptr_t idx);
|
||||
void addJumpTableIfDefault64(void* addr, void* jmp);
|
||||
@ -38,14 +38,14 @@ uintptr_t getJumpTableAddress64(uintptr_t addr);
|
||||
#define PROT_ALLOC 0x40
|
||||
#define PROT_CUSTOM (PROT_DYNAREC|PROT_ALLOC)
|
||||
|
||||
void updateProtection(uintptr_t addr, uintptr_t size, uint32_t prot);
|
||||
void setProtection(uintptr_t addr, uintptr_t size, uint32_t prot);
|
||||
void freeProtection(uintptr_t addr, uintptr_t size);
|
||||
void updateProtection(uintptr_t addr, size_t size, uint32_t prot);
|
||||
void setProtection(uintptr_t addr, size_t size, uint32_t prot);
|
||||
void freeProtection(uintptr_t addr, size_t size);
|
||||
uint32_t getProtection(uintptr_t addr);
|
||||
#ifdef DYNAREC
|
||||
void protectDB(uintptr_t addr, uintptr_t size);
|
||||
void protectDBnolock(uintptr_t addr, uintptr_t size);
|
||||
void unprotectDB(uintptr_t addr, uintptr_t size);
|
||||
void protectDB(uintptr_t addr, size_t size);
|
||||
void protectDBnolock(uintptr_t addr, size_t size);
|
||||
void unprotectDB(uintptr_t addr, size_t size);
|
||||
void lockDB();
|
||||
void unlockDB();
|
||||
#endif
|
||||
|
@ -11,7 +11,6 @@ extern int box64_pagesize;
|
||||
extern int box64_dynarec_dump;
|
||||
extern int box64_dynarec_trace;
|
||||
extern int box64_dynarec_forced;
|
||||
extern int box64_dynarec_largest;
|
||||
extern uintptr_t box64_nodynarec_start, box64_nodynarec_end;
|
||||
#endif
|
||||
extern int dlsym_error; // log dlsym error
|
||||
|
@ -556,7 +556,7 @@ void my_sigactionhandler_oldcode(int32_t sig, siginfo_t* info, void * ucntx, int
|
||||
sigcontext->uc_mcontext.gregs[X64_TRAPNO] = (info->si_code == SEGV_ACCERR)?13:14;
|
||||
} else if(info->si_code==SEGV_ACCERR && !(prot&PROT_WRITE)) {
|
||||
sigcontext->uc_mcontext.gregs[X64_ERR] = 0x0002; // write flag issue
|
||||
if(abs((intptr_t)info->si_addr-(intptr_t)sigcontext->uc_mcontext.gregs[X64_RSP])<16)
|
||||
if(labs((intptr_t)info->si_addr-(intptr_t)sigcontext->uc_mcontext.gregs[X64_RSP])<16)
|
||||
sigcontext->uc_mcontext.gregs[X64_TRAPNO] = 12; // stack overflow probably
|
||||
else
|
||||
sigcontext->uc_mcontext.gregs[X64_TRAPNO] = 14; // PAGE_FAULT
|
||||
|
@ -33,7 +33,6 @@ int box64_pagesize;
|
||||
int box64_dynarec = 1;
|
||||
int box64_dynarec_dump = 0;
|
||||
int box64_dynarec_forced = 0;
|
||||
int box64_dynarec_largest = 0;
|
||||
uintptr_t box64_nodynarec_start = 0;
|
||||
uintptr_t box64_nodynarec_end = 0;
|
||||
#else //DYNAREC
|
||||
|
Loading…
x
Reference in New Issue
Block a user