ARM system emulation (Paul Brook)

git-svn-id: svn://svn.savannah.nongnu.org/qemu/trunk@1661 c046a42c-6fe2-441c-8c8c-71466251a162
This commit is contained in:
bellard 2005-11-26 10:38:39 +00:00
parent 0e43e99c04
commit b5ff1b3127
20 changed files with 2539 additions and 248 deletions

View File

@ -1,5 +1,8 @@
version 0.7.3:
- ARM system emulation: Arm Integrator/CP board with an arm1026ej-s
cpu (Paul Brook)
- SMP support
- Mac OS X cocoa improvements (Mike Kronenberg)
- Mac OS X CoreAudio driver (Mike Kronenberg)
- DirectSound driver (malc)
@ -10,7 +13,6 @@ version 0.7.3:
- Linux host serial port access
- Linux host low level parallel port access
- New network emulation code supporting VLANs.
- SMP support
version 0.7.2:

View File

@ -211,7 +211,7 @@ LIBOBJS+= op_helper.o helper.o
endif
ifeq ($(TARGET_BASE_ARCH), arm)
LIBOBJS+= op_helper.o
LIBOBJS+= op_helper.o helper.o
endif
# NOTE: the disassembler code is only needed for debugging
@ -324,6 +324,9 @@ VL_OBJS+= sun4m.o tcx.o lance.o iommu.o m48t59.o magic-load.o slavio_intctl.o
VL_OBJS+= slavio_timer.o slavio_serial.o slavio_misc.o fdc.o esp.o
endif
endif
ifeq ($(TARGET_BASE_ARCH), arm)
VL_OBJS+= integratorcp.o ps2.o
endif
ifdef CONFIG_GDBSTUB
VL_OBJS+=gdbstub.o
endif

2
configure vendored
View File

@ -227,7 +227,7 @@ fi
if test -z "$target_list" ; then
# these targets are portable
target_list="i386-softmmu ppc-softmmu sparc-softmmu x86_64-softmmu mips-softmmu"
target_list="i386-softmmu ppc-softmmu sparc-softmmu x86_64-softmmu mips-softmmu arm-softmmu"
# the following are Linux specific
if [ "$linux" = "yes" ] ; then
target_list="i386-user arm-user armeb-user sparc-user ppc-user $target_list"

View File

@ -172,7 +172,9 @@ static inline TranslationBlock *tb_find_fast(void)
pc = cs_base + env->eip;
#elif defined(TARGET_ARM)
flags = env->thumb | (env->vfp.vec_len << 1)
| (env->vfp.vec_stride << 4);
| (env->vfp.vec_stride << 4);
if ((env->uncached_cpsr & CPSR_M) != ARM_CPU_MODE_USR)
flags |= (1 << 6);
cs_base = 0;
pc = env->regs[15];
#elif defined(TARGET_SPARC)
@ -322,15 +324,6 @@ int cpu_exec(CPUState *env1)
CC_OP = CC_OP_EFLAGS;
env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
#elif defined(TARGET_ARM)
{
unsigned int psr;
psr = env->cpsr;
env->CF = (psr >> 29) & 1;
env->NZF = (psr & 0xc0000000) ^ 0x40000000;
env->VF = (psr << 3) & 0x80000000;
env->QF = (psr >> 27) & 1;
env->cpsr = psr & ~CACHED_CPSR_BITS;
}
#elif defined(TARGET_SPARC)
#if defined(reg_REGWPTR)
saved_regwptr = REGWPTR;
@ -379,6 +372,8 @@ int cpu_exec(CPUState *env1)
do_interrupt(env);
#elif defined(TARGET_SPARC)
do_interrupt(env->exception_index);
#elif defined(TARGET_ARM)
do_interrupt(env);
#endif
}
env->exception_index = -1;
@ -508,8 +503,19 @@ int cpu_exec(CPUState *env1)
//do_interrupt(0, 0, 0, 0, 0);
env->interrupt_request &= ~CPU_INTERRUPT_TIMER;
}
#elif defined(TARGET_ARM)
if (interrupt_request & CPU_INTERRUPT_FIQ
&& !(env->uncached_cpsr & CPSR_F)) {
env->exception_index = EXCP_FIQ;
do_interrupt(env);
}
if (interrupt_request & CPU_INTERRUPT_HARD
&& !(env->uncached_cpsr & CPSR_I)) {
env->exception_index = EXCP_IRQ;
do_interrupt(env);
}
#endif
if (interrupt_request & CPU_INTERRUPT_EXITTB) {
if (env->interrupt_request & CPU_INTERRUPT_EXITTB) {
env->interrupt_request &= ~CPU_INTERRUPT_EXITTB;
/* ensure that no TB jump will be modified as
the program flow was changed */
@ -526,7 +532,7 @@ int cpu_exec(CPUState *env1)
}
}
#ifdef DEBUG_EXEC
if ((loglevel & CPU_LOG_EXEC)) {
if ((loglevel & CPU_LOG_TB_CPU)) {
#if defined(TARGET_I386)
/* restore flags in standard format */
#ifdef reg_EAX
@ -557,9 +563,7 @@ int cpu_exec(CPUState *env1)
cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
#elif defined(TARGET_ARM)
env->cpsr = compute_cpsr();
cpu_dump_state(env, logfile, fprintf, 0);
env->cpsr &= ~CACHED_CPSR_BITS;
#elif defined(TARGET_SPARC)
REGWPTR = env->regbase + (env->cwp * 16);
env->regwptr = REGWPTR;
@ -760,7 +764,6 @@ int cpu_exec(CPUState *env1)
EDI = saved_EDI;
#endif
#elif defined(TARGET_ARM)
env->cpsr = compute_cpsr();
/* XXX: Save/restore host fpu exception state?. */
#elif defined(TARGET_SPARC)
#if defined(reg_REGWPTR)

View File

@ -549,8 +549,10 @@ static inline target_ulong get_phys_addr_code(CPUState *env, target_ulong addr)
is_user = ((env->hflags & MIPS_HFLAG_MODE) == MIPS_HFLAG_UM);
#elif defined (TARGET_SPARC)
is_user = (env->psrs == 0);
#elif defined (TARGET_ARM)
is_user = ((env->uncached_cpsr & CPSR_M) == ARM_CPU_MODE_USR);
#else
#error "Unimplemented !"
#error unimplemented CPU
#endif
if (__builtin_expect(env->tlb_read[is_user][index].address !=
(addr & TARGET_PAGE_MASK), 0)) {

4
exec.c
View File

@ -1868,14 +1868,14 @@ int cpu_register_io_memory(int io_index,
int i;
if (io_index <= 0) {
if (io_index >= IO_MEM_NB_ENTRIES)
if (io_mem_nb >= IO_MEM_NB_ENTRIES)
return -1;
io_index = io_mem_nb++;
} else {
if (io_index >= IO_MEM_NB_ENTRIES)
return -1;
}
for(i = 0;i < 3; i++) {
io_mem_read[io_index][i] = mem_read[i];
io_mem_write[io_index][i] = mem_write[i];

View File

@ -399,7 +399,7 @@ static int cpu_gdb_read_registers(CPUState *env, uint8_t *mem_buf)
memset (ptr, 0, 8 * 12 + 4);
ptr += 8 * 12 + 4;
/* CPSR (4 bytes). */
*(uint32_t *)ptr = tswapl (env->cpsr);
*(uint32_t *)ptr = tswapl (cpsr_read(env));
ptr += 4;
return ptr - mem_buf;
@ -419,7 +419,7 @@ static void cpu_gdb_write_registers(CPUState *env, uint8_t *mem_buf, int size)
}
/* Ignore FPA regs and scr. */
ptr += 8 * 12 + 4;
env->cpsr = tswapl(*(uint32_t *)ptr);
cpsr_write (env, tswapl(*(uint32_t *)ptr), 0xffffffff);
}
#else
static int cpu_gdb_read_registers(CPUState *env, uint8_t *mem_buf)
@ -463,6 +463,8 @@ static int gdb_handle_packet(GDBState *s, CPUState *env, const char *line_buf)
#elif defined (TARGET_SPARC)
env->pc = addr;
env->npc = addr + 4;
#elif defined (TARGET_ARM)
env->regs[15] = addr;
#endif
}
#ifdef CONFIG_USER_ONLY
@ -481,6 +483,8 @@ static int gdb_handle_packet(GDBState *s, CPUState *env, const char *line_buf)
#elif defined (TARGET_SPARC)
env->pc = addr;
env->npc = addr + 4;
#elif defined (TARGET_ARM)
env->regs[15] = addr;
#endif
}
cpu_single_step(env, 1);

1232
hw/integratorcp.c Normal file

File diff suppressed because it is too large Load Diff

View File

@ -331,6 +331,7 @@ void cpu_loop(CPUARMState *env)
int trapnr;
unsigned int n, insn;
target_siginfo_t info;
uint32_t addr;
for(;;) {
trapnr = cpu_arm_exec(env);
@ -397,13 +398,18 @@ void cpu_loop(CPUARMState *env)
/* just indicate that signals should be handled asap */
break;
case EXCP_PREFETCH_ABORT:
addr = env->cp15.c6_data;
goto do_segv;
case EXCP_DATA_ABORT:
addr = env->cp15.c6_insn;
goto do_segv;
do_segv:
{
info.si_signo = SIGSEGV;
info.si_errno = 0;
/* XXX: check env->error_code */
info.si_code = TARGET_SEGV_MAPERR;
info._sifields._sigfault._addr = env->cp15_6;
info._sifields._sigfault._addr = addr;
queue_signal(info.si_signo, &info);
}
break;
@ -1190,10 +1196,10 @@ int main(int argc, char **argv)
#elif defined(TARGET_ARM)
{
int i;
cpsr_write(env, regs->uregs[16], 0xffffffff);
for(i = 0; i < 16; i++) {
env->regs[i] = regs->uregs[i];
}
env->cpsr = regs->uregs[16];
ts->stack_base = info->start_stack;
ts->heap_base = info->brk;
/* This will be filled in on the first SYS_HEAPINFO call. */

View File

@ -1003,7 +1003,7 @@ setup_sigcontext(struct target_sigcontext *sc, /*struct _fpstate *fpstate,*/
__put_user_error(env->regs[14], &sc->arm_lr, err);
__put_user_error(env->regs[15], &sc->arm_pc, err);
#ifdef TARGET_CONFIG_CPU_32
__put_user_error(env->cpsr, &sc->arm_cpsr, err);
__put_user_error(cpsr_read(env), &sc->arm_cpsr, err);
#endif
__put_user_error(/* current->thread.trap_no */ 0, &sc->trap_no, err);
@ -1040,9 +1040,9 @@ setup_return(CPUState *env, struct emulated_sigaction *ka,
target_ulong retcode;
int thumb = 0;
#if defined(TARGET_CONFIG_CPU_32)
#if 0
target_ulong cpsr = env->cpsr;
#if 0
/*
* Maybe we need to deliver a 32-bit signal to a 26-bit task.
*/
@ -1088,8 +1088,10 @@ setup_return(CPUState *env, struct emulated_sigaction *ka,
env->regs[14] = retcode;
env->regs[15] = handler & (thumb ? ~1 : ~3);
#if 0
#ifdef TARGET_CONFIG_CPU_32
env->cpsr = cpsr;
#endif
#endif
return 0;
@ -1157,6 +1159,7 @@ static int
restore_sigcontext(CPUState *env, struct target_sigcontext *sc)
{
int err = 0;
uint32_t cpsr;
__get_user_error(env->regs[0], &sc->arm_r0, err);
__get_user_error(env->regs[1], &sc->arm_r1, err);
@ -1175,7 +1178,8 @@ restore_sigcontext(CPUState *env, struct target_sigcontext *sc)
__get_user_error(env->regs[14], &sc->arm_lr, err);
__get_user_error(env->regs[15], &sc->arm_pc, err);
#ifdef TARGET_CONFIG_CPU_32
__get_user_error(env->cpsr, &sc->arm_cpsr, err);
__get_user_error(cpsr, &sc->arm_cpsr, err);
cpsr_write(env, cpsr, 0xffffffff);
#endif
err |= !valid_user_regs(env);

View File

@ -59,6 +59,10 @@
#define CPU_MEM_INDEX ((env->hflags & MIPS_HFLAG_MODE) == MIPS_HFLAG_UM)
#elif defined (TARGET_SPARC)
#define CPU_MEM_INDEX ((env->psrs) == 0)
#elif defined (TARGET_ARM)
#define CPU_MEM_INDEX ((env->uncached_cpsr & CPSR_M) == ARM_CPU_MODE_USR)
#else
#error unsupported CPU
#endif
#define MMUSUFFIX _mmu
@ -72,6 +76,10 @@
#define CPU_MEM_INDEX ((env->hflags & MIPS_HFLAG_MODE) == MIPS_HFLAG_UM)
#elif defined (TARGET_SPARC)
#define CPU_MEM_INDEX ((env->psrs) == 0)
#elif defined (TARGET_ARM)
#define CPU_MEM_INDEX ((env->uncached_cpsr & CPSR_M) == ARM_CPU_MODE_USR)
#else
#error unsupported CPU
#endif
#define MMUSUFFIX _cmmu

View File

@ -32,6 +32,8 @@
#define EXCP_SWI 2 /* software interrupt */
#define EXCP_PREFETCH_ABORT 3
#define EXCP_DATA_ABORT 4
#define EXCP_IRQ 5
#define EXCP_FIQ 6
/* We currently assume float and double are IEEE single and double
precision respectively.
@ -42,8 +44,22 @@
*/
typedef struct CPUARMState {
/* Regs for current mode. */
uint32_t regs[16];
uint32_t cpsr;
/* Frequently accessed CPSR bits are stored separately for efficiently.
This contains all the other bits. Use cpsr_{read,write} to accless
the whole CPSR. */
uint32_t uncached_cpsr;
uint32_t spsr;
/* Banked registers. */
uint32_t banked_spsr[6];
uint32_t banked_r13[6];
uint32_t banked_r14[6];
/* These hold r8-r12. */
uint32_t usr_regs[5];
uint32_t fiq_regs[5];
/* cpsr flag cache for faster execution */
uint32_t CF; /* 0 or 1 */
@ -53,8 +69,21 @@ typedef struct CPUARMState {
int thumb; /* 0 = arm mode, 1 = thumb mode */
/* coprocessor 15 (MMU) status */
uint32_t cp15_6;
/* System control coprocessor (cp15) */
struct {
uint32_t c1_sys; /* System control register. */
uint32_t c1_coproc; /* Coprocessor access register. */
uint32_t c2; /* MMU translation table base. */
uint32_t c3; /* MMU domain access control register. */
uint32_t c5_insn; /* Fault status registers. */
uint32_t c5_data;
uint32_t c6_insn; /* Fault address registers. */
uint32_t c6_data;
uint32_t c9_insn; /* Cache lockdown registers. */
uint32_t c9_data;
uint32_t c13_fcse; /* FCSE PID. */
uint32_t c13_context; /* Context ID. */
} cp15;
/* exception/interrupt handling */
jmp_buf jmp_env;
@ -87,6 +116,9 @@ typedef struct CPUARMState {
CPUARMState *cpu_arm_init(void);
int cpu_arm_exec(CPUARMState *s);
void cpu_arm_close(CPUARMState *s);
void do_interrupt(CPUARMState *);
void switch_mode(CPUARMState *, int);
/* you can call this signal handler from your SIGBUS and SIGSEGV
signal handlers to inform the virtual CPU of exceptions. non zero
is returned if the signal was handled by the virtual CPU. */
@ -94,7 +126,69 @@ struct siginfo;
int cpu_arm_signal_handler(int host_signum, struct siginfo *info,
void *puc);
#define CPSR_M (0x1f)
#define CPSR_T (1 << 5)
#define CPSR_F (1 << 6)
#define CPSR_I (1 << 7)
#define CPSR_A (1 << 8)
#define CPSR_E (1 << 9)
#define CPSR_IT_2_7 (0xfc00)
/* Bits 20-23 reserved. */
#define CPSR_J (1 << 24)
#define CPSR_IT_0_1 (3 << 25)
#define CPSR_Q (1 << 27)
#define CPSR_NZCV (0xf << 28)
#define CACHED_CPSR_BITS (CPSR_T | CPSR_Q | CPSR_NZCV)
/* Return the current CPSR value. */
static inline uint32_t cpsr_read(CPUARMState *env)
{
int ZF;
ZF = (env->NZF == 0);
return env->uncached_cpsr | (env->NZF & 0x80000000) | (ZF << 30) |
(env->CF << 29) | ((env->VF & 0x80000000) >> 3) | (env->QF << 27)
| (env->thumb << 5);
}
/* Set the CPSR. Note that some bits of mask must be all-set or all-clear. */
static inline void cpsr_write(CPUARMState *env, uint32_t val, uint32_t mask)
{
/* NOTE: N = 1 and Z = 1 cannot be stored currently */
if (mask & CPSR_NZCV) {
env->NZF = (val & 0xc0000000) ^ 0x40000000;
env->CF = (val >> 29) & 1;
env->VF = (val << 3) & 0x80000000;
}
if (mask & CPSR_Q)
env->QF = ((val & CPSR_Q) != 0);
if (mask & CPSR_T)
env->thumb = ((val & CPSR_T) != 0);
if ((env->uncached_cpsr ^ val) & mask & CPSR_M) {
switch_mode(env, val & CPSR_M);
}
mask &= ~CACHED_CPSR_BITS;
env->uncached_cpsr = (env->uncached_cpsr & ~mask) | (val & mask);
}
enum arm_cpu_mode {
ARM_CPU_MODE_USR = 0x10,
ARM_CPU_MODE_FIQ = 0x11,
ARM_CPU_MODE_IRQ = 0x12,
ARM_CPU_MODE_SVC = 0x13,
ARM_CPU_MODE_ABT = 0x17,
ARM_CPU_MODE_UND = 0x1b,
ARM_CPU_MODE_SYS = 0x1f
};
#if defined(CONFIG_USER_ONLY)
#define TARGET_PAGE_BITS 12
#else
/* The ARM MMU allows 1k pages. */
/* ??? Linux doesn't actually use these, and they're deprecated in recent
architecture revisions. Maybe an a configure option to disable them. */
#define TARGET_PAGE_BITS 10
#endif
#include "cpu-all.h"
#endif

View File

@ -34,16 +34,6 @@ register uint32_t T2 asm(AREG3);
#include "cpu.h"
#include "exec-all.h"
/* Implemented CPSR bits. */
#define CACHED_CPSR_BITS 0xf8000000
static inline int compute_cpsr(void)
{
int ZF;
ZF = (env->NZF == 0);
return env->cpsr | (env->NZF & 0x80000000) | (ZF << 30) |
(env->CF << 29) | ((env->VF & 0x80000000) >> 3) | (env->QF << 27);
}
static inline void env_to_regs(void)
{
}
@ -55,10 +45,17 @@ static inline void regs_to_env(void)
int cpu_arm_handle_mmu_fault (CPUState *env, target_ulong address, int rw,
int is_user, int is_softmmu);
#if !defined(CONFIG_USER_ONLY)
#include "softmmu_exec.h"
#endif
/* In op_helper.c */
void cpu_lock(void);
void cpu_unlock(void);
void helper_set_cp15(CPUState *, uint32_t, uint32_t);
uint32_t helper_get_cp15(CPUState *, uint32_t);
void cpu_loop_exit(void);
void raise_exception(int);

555
target-arm/helper.c Normal file
View File

@ -0,0 +1,555 @@
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include "cpu.h"
#include "exec-all.h"
#if defined(CONFIG_USER_ONLY)
void do_interrupt (CPUState *env)
{
env->exception_index = -1;
}
int cpu_arm_handle_mmu_fault (CPUState *env, target_ulong address, int rw,
int is_user, int is_softmmu)
{
if (rw == 2) {
env->exception_index = EXCP_PREFETCH_ABORT;
env->cp15.c6_insn = address;
} else {
env->exception_index = EXCP_DATA_ABORT;
env->cp15.c6_data = address;
}
return 1;
}
target_ulong cpu_get_phys_page_debug(CPUState *env, target_ulong addr)
{
return addr;
}
/* These should probably raise undefined insn exceptions. */
void helper_set_cp15(CPUState *env, uint32_t insn, uint32_t val)
{
cpu_abort(env, "cp15 insn %08x\n", insn);
}
uint32_t helper_get_cp15(CPUState *env, uint32_t insn)
{
cpu_abort(env, "cp15 insn %08x\n", insn);
return 0;
}
void switch_mode(CPUState *env, int mode)
{
if (mode != ARM_CPU_MODE_USR)
cpu_abort(env, "Tried to switch out of user mode\n");
}
#else
/* Map CPU modes onto saved register banks. */
static inline int bank_number (int mode)
{
switch (mode) {
case ARM_CPU_MODE_USR:
case ARM_CPU_MODE_SYS:
return 0;
case ARM_CPU_MODE_SVC:
return 1;
case ARM_CPU_MODE_ABT:
return 2;
case ARM_CPU_MODE_UND:
return 3;
case ARM_CPU_MODE_IRQ:
return 4;
case ARM_CPU_MODE_FIQ:
return 5;
}
cpu_abort(cpu_single_env, "Bad mode %x\n", mode);
return -1;
}
void switch_mode(CPUState *env, int mode)
{
int old_mode;
int i;
old_mode = env->uncached_cpsr & CPSR_M;
if (mode == old_mode)
return;
if (old_mode == ARM_CPU_MODE_FIQ) {
memcpy (env->fiq_regs, env->regs + 8, 5 * sizeof(uint32_t));
memcpy (env->regs, env->usr_regs + 8, 5 * sizeof(uint32_t));
} else if (mode == ARM_CPU_MODE_FIQ) {
memcpy (env->usr_regs, env->regs + 8, 5 * sizeof(uint32_t));
memcpy (env->regs, env->fiq_regs + 8, 5 * sizeof(uint32_t));
}
i = bank_number(old_mode);
env->banked_r13[i] = env->regs[13];
env->banked_r14[i] = env->regs[14];
env->banked_spsr[i] = env->spsr;
i = bank_number(mode);
env->regs[13] = env->banked_r13[i];
env->regs[14] = env->banked_r14[i];
env->spsr = env->banked_spsr[i];
}
/* Handle a CPU exception. */
void do_interrupt(CPUARMState *env)
{
uint32_t addr;
uint32_t mask;
int new_mode;
uint32_t offset;
/* TODO: Vectored interrupt controller. */
switch (env->exception_index) {
case EXCP_UDEF:
new_mode = ARM_CPU_MODE_UND;
addr = 0x04;
mask = CPSR_I;
if (env->thumb)
offset = 2;
else
offset = 4;
break;
case EXCP_SWI:
new_mode = ARM_CPU_MODE_SVC;
addr = 0x08;
mask = CPSR_I;
/* The PC already points to the next instructon. */
offset = 0;
break;
case EXCP_PREFETCH_ABORT:
new_mode = ARM_CPU_MODE_ABT;
addr = 0x0c;
mask = CPSR_A | CPSR_I;
offset = 4;
break;
case EXCP_DATA_ABORT:
new_mode = ARM_CPU_MODE_ABT;
addr = 0x10;
mask = CPSR_A | CPSR_I;
offset = 8;
break;
case EXCP_IRQ:
new_mode = ARM_CPU_MODE_IRQ;
addr = 0x18;
/* Disable IRQ and imprecise data aborts. */
mask = CPSR_A | CPSR_I;
offset = 4;
break;
case EXCP_FIQ:
new_mode = ARM_CPU_MODE_FIQ;
addr = 0x1c;
/* Disable FIQ, IRQ and imprecise data aborts. */
mask = CPSR_A | CPSR_I | CPSR_F;
offset = 4;
break;
default:
cpu_abort(env, "Unhandled exception 0x%x\n", env->exception_index);
return; /* Never happens. Keep compiler happy. */
}
/* High vectors. */
if (env->cp15.c1_sys & (1 << 13)) {
addr += 0xffff0000;
}
switch_mode (env, new_mode);
env->spsr = cpsr_read(env);
/* Switch to the new mode, and clear the thumb bit. */
/* ??? Thumb interrupt handlers not implemented. */
env->uncached_cpsr = (env->uncached_cpsr & ~(CPSR_M | CPSR_T)) | new_mode;
env->uncached_cpsr |= mask;
env->regs[14] = env->regs[15] + offset;
env->regs[15] = addr;
env->interrupt_request |= CPU_INTERRUPT_EXITTB;
}
/* Check section/page access permissions.
Returns the page protection flags, or zero if the access is not
permitted. */
static inline int check_ap(CPUState *env, int ap, int domain, int access_type,
int is_user)
{
if (domain == 3)
return PAGE_READ | PAGE_WRITE;
switch (ap) {
case 0:
if (access_type != 1)
return 0;
switch ((env->cp15.c1_sys >> 8) & 3) {
case 1:
return is_user ? 0 : PAGE_READ;
case 2:
return PAGE_READ;
default:
return 0;
}
case 1:
return is_user ? 0 : PAGE_READ | PAGE_WRITE;
case 2:
if (is_user)
return (access_type == 1) ? 0 : PAGE_READ;
else
return PAGE_READ | PAGE_WRITE;
case 3:
return PAGE_READ | PAGE_WRITE;
default:
abort();
}
}
static int get_phys_addr(CPUState *env, uint32_t address, int access_type,
int is_user, uint32_t *phys_ptr, int *prot)
{
int code;
uint32_t table;
uint32_t desc;
int type;
int ap;
int domain;
uint32_t phys_addr;
/* Fast Context Switch Extension. */
if (address < 0x02000000)
address += env->cp15.c13_fcse;
if ((env->cp15.c1_sys & 1) == 0) {
/* MMU diusabled. */
*phys_ptr = address;
*prot = PAGE_READ | PAGE_WRITE;
} else {
/* Pagetable walk. */
/* Lookup l1 descriptor. */
table = (env->cp15.c2 & 0xffffc000) | ((address >> 18) & 0x3ffc);
desc = ldl_phys(table);
type = (desc & 3);
domain = (env->cp15.c3 >> ((desc >> 4) & 0x1e)) & 3;
if (type == 0) {
/* Secton translation fault. */
code = 5;
goto do_fault;
}
if (domain == 0 || domain == 2) {
if (type == 2)
code = 9; /* Section domain fault. */
else
code = 11; /* Page domain fault. */
goto do_fault;
}
if (type == 2) {
/* 1Mb section. */
phys_addr = (desc & 0xfff00000) | (address & 0x000fffff);
ap = (desc >> 10) & 3;
code = 13;
} else {
/* Lookup l2 entry. */
table = (desc & 0xfffffc00) | ((address >> 10) & 0x3fc);
desc = ldl_phys(table);
switch (desc & 3) {
case 0: /* Page translation fault. */
code = 7;
goto do_fault;
case 1: /* 64k page. */
phys_addr = (desc & 0xffff0000) | (address & 0xffff);
ap = (desc >> (4 + ((address >> 13) & 6))) & 3;
break;
case 2: /* 4k page. */
phys_addr = (desc & 0xfffff000) | (address & 0xfff);
ap = (desc >> (4 + ((address >> 13) & 6))) & 3;
break;
case 3: /* 1k page. */
if (type == 1) {
/* Page translation fault. */
code = 7;
goto do_fault;
}
phys_addr = (desc & 0xfffffc00) | (address & 0x3ff);
ap = (desc >> 4) & 3;
break;
default:
/* Never happens, but compiler isn't smart enough to tell. */
abort();
}
code = 15;
}
*prot = check_ap(env, ap, domain, access_type, is_user);
if (!*prot) {
/* Access permission fault. */
goto do_fault;
}
*phys_ptr = phys_addr;
}
return 0;
do_fault:
return code | (domain << 4);
}
int cpu_arm_handle_mmu_fault (CPUState *env, target_ulong address,
int access_type, int is_user, int is_softmmu)
{
uint32_t phys_addr;
int prot;
int ret;
ret = get_phys_addr(env, address, access_type, is_user, &phys_addr, &prot);
if (ret == 0) {
/* Map a single [sub]page. */
phys_addr &= ~(uint32_t)0x3ff;
address &= ~(uint32_t)0x3ff;
return tlb_set_page (env, address, phys_addr, prot, is_user,
is_softmmu);
}
if (access_type == 2) {
env->cp15.c5_insn = ret;
env->cp15.c6_insn = address;
env->exception_index = EXCP_PREFETCH_ABORT;
} else {
env->cp15.c5_data = ret;
env->cp15.c6_data = address;
env->exception_index = EXCP_DATA_ABORT;
}
return 1;
}
target_ulong cpu_get_phys_page_debug(CPUState *env, target_ulong addr)
{
uint32_t phys_addr;
int prot;
int ret;
ret = get_phys_addr(env, addr, 0, 0, &phys_addr, &prot);
if (ret != 0)
return -1;
return phys_addr;
}
void helper_set_cp15(CPUState *env, uint32_t insn, uint32_t val)
{
uint32_t op2;
op2 = (insn >> 5) & 7;
switch ((insn >> 16) & 0xf) {
case 0: /* ID codes. */
goto bad_reg;
case 1: /* System configuration. */
switch (op2) {
case 0:
env->cp15.c1_sys = val;
/* ??? Lots of these bits are not implemented. */
/* This may enable/disable the MMU, so do a TLB flush. */
tlb_flush(env, 1);
break;
case 2:
env->cp15.c1_coproc = val;
/* ??? Is this safe when called from within a TB? */
tb_flush(env);
default:
goto bad_reg;
}
break;
case 2: /* MMU Page table control. */
env->cp15.c2 = val;
break;
case 3: /* MMU Domain access control. */
env->cp15.c3 = val;
break;
case 4: /* Reserved. */
goto bad_reg;
case 5: /* MMU Fault status. */
switch (op2) {
case 0:
env->cp15.c5_data = val;
break;
case 1:
env->cp15.c5_insn = val;
break;
default:
goto bad_reg;
}
break;
case 6: /* MMU Fault address. */
switch (op2) {
case 0:
env->cp15.c6_data = val;
break;
case 1:
env->cp15.c6_insn = val;
break;
default:
goto bad_reg;
}
break;
case 7: /* Cache control. */
/* No cache, so nothing to do. */
break;
case 8: /* MMU TLB control. */
switch (op2) {
case 0: /* Invalidate all. */
tlb_flush(env, 0);
break;
case 1: /* Invalidate single TLB entry. */
#if 0
/* ??? This is wrong for large pages and sections. */
/* As an ugly hack to make linux work we always flush a 4K
pages. */
val &= 0xfffff000;
tlb_flush_page(env, val);
tlb_flush_page(env, val + 0x400);
tlb_flush_page(env, val + 0x800);
tlb_flush_page(env, val + 0xc00);
#else
tlb_flush(env, 1);
#endif
break;
default:
goto bad_reg;
}
break;
case 9: /* Cache lockdown. */
switch (op2) {
case 0:
env->cp15.c9_data = val;
break;
case 1:
env->cp15.c9_insn = val;
break;
default:
goto bad_reg;
}
break;
case 10: /* MMU TLB lockdown. */
/* ??? TLB lockdown not implemented. */
break;
case 11: /* TCM DMA control. */
case 12: /* Reserved. */
goto bad_reg;
case 13: /* Process ID. */
switch (op2) {
case 0:
env->cp15.c9_data = val;
break;
case 1:
env->cp15.c9_insn = val;
break;
default:
goto bad_reg;
}
break;
case 14: /* Reserved. */
goto bad_reg;
case 15: /* Implementation specific. */
/* ??? Internal registers not implemented. */
break;
}
return;
bad_reg:
/* ??? For debugging only. Should raise illegal instruction exception. */
cpu_abort(env, "Unimplemented cp15 register read\n");
}
uint32_t helper_get_cp15(CPUState *env, uint32_t insn)
{
uint32_t op2;
op2 = (insn >> 5) & 7;
switch ((insn >> 16) & 0xf) {
case 0: /* ID codes. */
switch (op2) {
default: /* Device ID. */
return 0x4106a262;
case 1: /* Cache Type. */
return 0x1dd20d2;
case 2: /* TCM status. */
return 0;
}
case 1: /* System configuration. */
switch (op2) {
case 0: /* Control register. */
return env->cp15.c1_sys;
case 1: /* Auxiliary control register. */
return 1;
case 2: /* Coprocessor access register. */
return env->cp15.c1_coproc;
default:
goto bad_reg;
}
case 2: /* MMU Page table control. */
return env->cp15.c2;
case 3: /* MMU Domain access control. */
return env->cp15.c3;
case 4: /* Reserved. */
goto bad_reg;
case 5: /* MMU Fault status. */
switch (op2) {
case 0:
return env->cp15.c5_data;
case 1:
return env->cp15.c5_insn;
default:
goto bad_reg;
}
case 6: /* MMU Fault address. */
switch (op2) {
case 0:
return env->cp15.c6_data;
case 1:
return env->cp15.c6_insn;
default:
goto bad_reg;
}
case 7: /* Cache control. */
/* ??? This is for test, clean and invaidate operations that set the
Z flag. We can't represent N = Z = 1, so it also clears clears
the N flag. Oh well. */
env->NZF = 0;
return 0;
case 8: /* MMU TLB control. */
goto bad_reg;
case 9: /* Cache lockdown. */
switch (op2) {
case 0:
return env->cp15.c9_data;
case 1:
return env->cp15.c9_insn;
default:
goto bad_reg;
}
case 10: /* MMU TLB lockdown. */
/* ??? TLB lockdown not implemented. */
return 0;
case 11: /* TCM DMA control. */
case 12: /* Reserved. */
goto bad_reg;
case 13: /* Process ID. */
switch (op2) {
case 0:
return env->cp15.c13_fcse;
case 1:
return env->cp15.c13_context;
default:
goto bad_reg;
}
case 14: /* Reserved. */
goto bad_reg;
case 15: /* Implementation specific. */
/* ??? Internal registers not implemented. */
return 0;
}
bad_reg:
/* ??? For debugging only. Should raise illegal instruction exception. */
cpu_abort(env, "Unimplemented cp15 register read\n");
return 0;
}
#endif

View File

@ -101,6 +101,11 @@ void OPPROTO op_movl_T0_im(void)
T0 = PARAM1;
}
void OPPROTO op_movl_T0_T1(void)
{
T0 = T1;
}
void OPPROTO op_movl_T1_im(void)
{
T1 = PARAM1;
@ -361,20 +366,27 @@ void OPPROTO op_exit_tb(void)
EXIT_TB();
}
void OPPROTO op_movl_T0_psr(void)
void OPPROTO op_movl_T0_cpsr(void)
{
T0 = compute_cpsr();
T0 = cpsr_read(env);
FORCE_RET();
}
/* NOTE: N = 1 and Z = 1 cannot be stored currently */
void OPPROTO op_movl_psr_T0(void)
void OPPROTO op_movl_T0_spsr(void)
{
unsigned int psr;
psr = T0;
env->CF = (psr >> 29) & 1;
env->NZF = (psr & 0xc0000000) ^ 0x40000000;
env->VF = (psr << 3) & 0x80000000;
/* for user mode we do not update other state info */
T0 = env->spsr;
}
void OPPROTO op_movl_spsr_T0(void)
{
uint32_t mask = PARAM1;
env->spsr = (env->spsr & ~mask) | (T0 & mask);
}
void OPPROTO op_movl_cpsr_T0(void)
{
cpsr_write(env, T0, PARAM1);
FORCE_RET();
}
void OPPROTO op_mul_T0_T1(void)
@ -433,67 +445,15 @@ void OPPROTO op_logicq_cc(void)
/* memory access */
void OPPROTO op_ldub_T0_T1(void)
{
T0 = ldub((void *)T1);
}
#define MEMSUFFIX _raw
#include "op_mem.h"
void OPPROTO op_ldsb_T0_T1(void)
{
T0 = ldsb((void *)T1);
}
void OPPROTO op_lduw_T0_T1(void)
{
T0 = lduw((void *)T1);
}
void OPPROTO op_ldsw_T0_T1(void)
{
T0 = ldsw((void *)T1);
}
void OPPROTO op_ldl_T0_T1(void)
{
T0 = ldl((void *)T1);
}
void OPPROTO op_stb_T0_T1(void)
{
stb((void *)T1, T0);
}
void OPPROTO op_stw_T0_T1(void)
{
stw((void *)T1, T0);
}
void OPPROTO op_stl_T0_T1(void)
{
stl((void *)T1, T0);
}
void OPPROTO op_swpb_T0_T1(void)
{
int tmp;
cpu_lock();
tmp = ldub((void *)T1);
stb((void *)T1, T0);
T0 = tmp;
cpu_unlock();
}
void OPPROTO op_swpl_T0_T1(void)
{
int tmp;
cpu_lock();
tmp = ldl((void *)T1);
stl((void *)T1, T0);
T0 = tmp;
cpu_unlock();
}
#if !defined(CONFIG_USER_ONLY)
#define MEMSUFFIX _user
#include "op_mem.h"
#define MEMSUFFIX _kernel
#include "op_mem.h"
#endif
/* shifts */
@ -744,17 +704,48 @@ void OPPROTO op_sarl_T0_im(void)
T0 = (int32_t)T0 >> PARAM1;
}
/* 16->32 Sign extend */
void OPPROTO op_sxl_T0(void)
/* Sign/zero extend */
void OPPROTO op_sxth_T0(void)
{
T0 = (int16_t)T0;
}
void OPPROTO op_sxl_T1(void)
void OPPROTO op_sxth_T1(void)
{
T1 = (int16_t)T1;
}
void OPPROTO op_sxtb_T1(void)
{
T1 = (int8_t)T1;
}
void OPPROTO op_uxtb_T1(void)
{
T1 = (uint8_t)T1;
}
void OPPROTO op_uxth_T1(void)
{
T1 = (uint16_t)T1;
}
void OPPROTO op_sxtb16_T1(void)
{
uint32_t res;
res = (uint16_t)(int8_t)T1;
res |= (uint32_t)(int8_t)(T1 >> 16) << 16;
T1 = res;
}
void OPPROTO op_uxtb16_T1(void)
{
uint32_t res;
res = (uint16_t)(uint8_t)T1;
res |= (uint32_t)(uint8_t)(T1 >> 16) << 16;
T1 = res;
}
#define SIGNBIT (uint32_t)0x80000000
/* saturating arithmetic */
void OPPROTO op_addl_T0_T1_setq(void)
@ -1128,23 +1119,52 @@ void OPPROTO op_vfp_mdrr(void)
FT0d = u.d;
}
/* Floating point load/store. Address is in T1 */
void OPPROTO op_vfp_lds(void)
/* Copy the most significant bit to T0 to all bits of T1. */
void OPPROTO op_signbit_T1_T0(void)
{
FT0s = ldfl((void *)T1);
T1 = (int32_t)T0 >> 31;
}
void OPPROTO op_vfp_ldd(void)
void OPPROTO op_movl_cp15_T0(void)
{
FT0d = ldfq((void *)T1);
helper_set_cp15(env, PARAM1, T0);
FORCE_RET();
}
void OPPROTO op_vfp_sts(void)
void OPPROTO op_movl_T0_cp15(void)
{
stfl((void *)T1, FT0s);
T0 = helper_get_cp15(env, PARAM1);
FORCE_RET();
}
void OPPROTO op_vfp_std(void)
/* Access to user mode registers from privileged modes. */
void OPPROTO op_movl_T0_user(void)
{
stfq((void *)T1, FT0d);
int regno = PARAM1;
if (regno == 13) {
T0 = env->banked_r13[0];
} else if (regno == 14) {
T0 = env->banked_r14[0];
} else if ((env->uncached_cpsr & 0x1f) == ARM_CPU_MODE_FIQ) {
T0 = env->usr_regs[regno - 8];
} else {
T0 = env->regs[regno];
}
FORCE_RET();
}
void OPPROTO op_movl_user_T0(void)
{
int regno = PARAM1;
if (regno == 13) {
env->banked_r13[0] = T0;
} else if (regno == 14) {
env->banked_r14[0] = T0;
} else if ((env->uncached_cpsr & 0x1f) == ARM_CPU_MODE_FIQ) {
env->usr_regs[regno - 8] = T0;
} else {
env->regs[regno] = T0;
}
FORCE_RET();
}

View File

@ -172,3 +172,54 @@ void do_vfp_get_fpscr(void)
i = get_float_exception_flags(&env->vfp.fp_status);
T0 |= vfp_exceptbits_from_host(i);
}
#if !defined(CONFIG_USER_ONLY)
#define MMUSUFFIX _mmu
#define GETPC() (__builtin_return_address(0))
#define SHIFT 0
#include "softmmu_template.h"
#define SHIFT 1
#include "softmmu_template.h"
#define SHIFT 2
#include "softmmu_template.h"
#define SHIFT 3
#include "softmmu_template.h"
/* try to fill the TLB and return an exception if error. If retaddr is
NULL, it means that the function was called in C code (i.e. not
from generated code or from helper.c) */
/* XXX: fix it to restore all registers */
void tlb_fill (target_ulong addr, int is_write, int is_user, void *retaddr)
{
TranslationBlock *tb;
CPUState *saved_env;
target_phys_addr_t pc;
int ret;
/* XXX: hack to restore env in all cases, even if not called from
generated code */
saved_env = env;
env = cpu_single_env;
ret = cpu_arm_handle_mmu_fault(env, addr, is_write, is_user, 1);
if (__builtin_expect(ret, 0)) {
if (retaddr) {
/* now we have a real cpu fault */
pc = (target_phys_addr_t)retaddr;
tb = tb_find_pc(pc);
if (tb) {
/* the PC is inside the translated code. It means that we have
a virtual CPU fault */
cpu_restore_state(tb, env, pc, NULL);
}
}
raise_exception(env->exception_index);
}
env = saved_env;
}
#endif

70
target-arm/op_mem.h Normal file
View File

@ -0,0 +1,70 @@
/* ARM memory operations. */
/* Load from address T1 into T0. */
#define MEM_LD_OP(name) \
void OPPROTO glue(op_ld##name,MEMSUFFIX)(void) \
{ \
T0 = glue(ld##name,MEMSUFFIX)(T1); \
FORCE_RET(); \
}
MEM_LD_OP(ub)
MEM_LD_OP(sb)
MEM_LD_OP(uw)
MEM_LD_OP(sw)
MEM_LD_OP(l)
#undef MEM_LD_OP
/* Store T0 to address T1. */
#define MEM_ST_OP(name) \
void OPPROTO glue(op_st##name,MEMSUFFIX)(void) \
{ \
glue(st##name,MEMSUFFIX)(T1, T0); \
FORCE_RET(); \
}
MEM_ST_OP(b)
MEM_ST_OP(w)
MEM_ST_OP(l)
#undef MEM_ST_OP
/* Swap T0 with memory at address T1. */
/* ??? Is this exception safe? */
#define MEM_SWP_OP(name, lname) \
void OPPROTO glue(op_swp##name,MEMSUFFIX)(void) \
{ \
uint32_t tmp; \
cpu_lock(); \
tmp = glue(ld##lname,MEMSUFFIX)(T1); \
glue(st##name,MEMSUFFIX)(T1, T0); \
T0 = tmp; \
cpu_unlock(); \
FORCE_RET(); \
}
MEM_SWP_OP(b, ub)
MEM_SWP_OP(l, l)
#undef MEM_SWP_OP
/* Floating point load/store. Address is in T1 */
#define VFP_MEM_OP(p, w) \
void OPPROTO glue(op_vfp_ld##p,MEMSUFFIX)(void) \
{ \
FT0##p = glue(ldf##w,MEMSUFFIX)(T1); \
FORCE_RET(); \
} \
void OPPROTO glue(op_vfp_st##p,MEMSUFFIX)(void) \
{ \
glue(stf##w,MEMSUFFIX)(T1, FT0##p); \
FORCE_RET(); \
}
VFP_MEM_OP(s,l)
VFP_MEM_OP(d,q)
#undef VFP_MEM_OP
#undef MEMSUFFIX

View File

@ -28,6 +28,12 @@
#include "exec-all.h"
#include "disas.h"
#define ENABLE_ARCH_5J 0
#define ENABLE_ARCH_6 1
#define ENABLE_ARCH_6T2 1
#define ARCH(x) if (!ENABLE_ARCH_##x) goto illegal_op;
/* internal defines */
typedef struct DisasContext {
target_ulong pc;
@ -39,8 +45,17 @@ typedef struct DisasContext {
struct TranslationBlock *tb;
int singlestep_enabled;
int thumb;
#if !defined(CONFIG_USER_ONLY)
int user;
#endif
} DisasContext;
#if defined(CONFIG_USER_ONLY)
#define IS_USER(s) 1
#else
#define IS_USER(s) (s->user)
#endif
#define DISAS_JUMP_NEXT 4
#ifdef USE_DIRECT_JUMP
@ -270,6 +285,18 @@ static inline void gen_bx(DisasContext *s)
gen_op_bx_T0();
}
#if defined(CONFIG_USER_ONLY)
#define gen_ldst(name, s) gen_op_##name##_raw()
#else
#define gen_ldst(name, s) do { \
if (IS_USER(s)) \
gen_op_##name##_user(); \
else \
gen_op_##name##_kernel(); \
} while (0)
#endif
static inline void gen_movl_TN_reg(DisasContext *s, int reg, int t)
{
int val;
@ -319,6 +346,14 @@ static inline void gen_movl_reg_T1(DisasContext *s, int reg)
gen_movl_reg_TN(s, reg, 1);
}
/* Force a TB lookup after an instruction that changes the CPU state. */
static inline void gen_lookup_tb(DisasContext *s)
{
gen_op_movl_T0_im(s->pc);
gen_movl_reg_T0(s, 15);
s->is_jmp = DISAS_UPDATE;
}
static inline void gen_add_data_offset(DisasContext *s, unsigned int insn)
{
int val, rm, shift, shiftop;
@ -395,11 +430,25 @@ VFP_OP(toui)
VFP_OP(touiz)
VFP_OP(tosi)
VFP_OP(tosiz)
VFP_OP(ld)
VFP_OP(st)
#undef VFP_OP
static inline void gen_vfp_ld(DisasContext *s, int dp)
{
if (dp)
gen_ldst(vfp_ldd, s);
else
gen_ldst(vfp_lds, s);
}
static inline void gen_vfp_st(DisasContext *s, int dp)
{
if (dp)
gen_ldst(vfp_std, s);
else
gen_ldst(vfp_sts, s);
}
static inline long
vfp_reg_offset (int dp, int reg)
{
@ -437,6 +486,30 @@ static inline void gen_mov_vreg_F0(int dp, int reg)
gen_op_vfp_setreg_F0s(vfp_reg_offset(dp, reg));
}
/* Disassemble system coprocessor (cp15) instruction. Return nonzero if
instruction is not defined. */
static int disas_cp15_insn(DisasContext *s, uint32_t insn)
{
uint32_t rd;
/* ??? Some cp15 registers are accessible from userspace. */
if (IS_USER(s)) {
return 1;
}
rd = (insn >> 12) & 0xf;
if (insn & (1 << 20)) {
gen_op_movl_T0_cp15(insn);
/* If the destination register is r15 then sets condition codes. */
if (rd != 15)
gen_movl_reg_T0(s, rd);
} else {
gen_movl_T0_reg(s, rd);
gen_op_movl_cp15_T0(insn);
}
gen_lookup_tb(s);
return 0;
}
/* Disassemble a VFP instruction. Returns nonzero if an error occured
(ie. an undefined instruction). */
static int disas_vfp_insn(CPUState * env, DisasContext *s, uint32_t insn)
@ -499,8 +572,8 @@ static int disas_vfp_insn(CPUState * env, DisasContext *s, uint32_t insn)
gen_op_vfp_mrs();
}
if (rd == 15) {
/* This will only set the 4 flag bits */
gen_op_movl_psr_T0();
/* Set the 4 flag bits in the CPSR. */
gen_op_movl_cpsr_T0(0xf0000000);
} else
gen_movl_reg_T0(s, rd);
} else {
@ -516,9 +589,7 @@ static int disas_vfp_insn(CPUState * env, DisasContext *s, uint32_t insn)
gen_op_vfp_movl_fpscr_T0();
/* This could change vector settings, so jump to
the next instuction. */
gen_op_movl_T0_im(s->pc);
gen_movl_reg_T0(s, 15);
s->is_jmp = DISAS_UPDATE;
gen_lookup_tb(s);
break;
default:
return 1;
@ -848,11 +919,11 @@ static int disas_vfp_insn(CPUState * env, DisasContext *s, uint32_t insn)
offset = -offset;
gen_op_addl_T1_im(offset);
if (insn & (1 << 20)) {
gen_vfp_ld(dp);
gen_vfp_ld(s, dp);
gen_mov_vreg_F0(dp, rd);
} else {
gen_mov_F0_vreg(dp, rd);
gen_vfp_st(dp);
gen_vfp_st(s, dp);
}
} else {
/* load/store multiple */
@ -871,12 +942,12 @@ static int disas_vfp_insn(CPUState * env, DisasContext *s, uint32_t insn)
for (i = 0; i < n; i++) {
if (insn & (1 << 20)) {
/* load */
gen_vfp_ld(dp);
gen_vfp_ld(s, dp);
gen_mov_vreg_F0(dp, rd + i);
} else {
/* store */
gen_mov_F0_vreg(dp, rd + i);
gen_vfp_st(dp);
gen_vfp_st(s, dp);
}
gen_op_addl_T1_im(offset);
}
@ -939,11 +1010,68 @@ static inline void gen_jmp (DisasContext *s, uint32_t dest)
}
}
static inline void gen_mulxy(int x, int y)
{
if (x & 2)
gen_op_sarl_T0_im(16);
else
gen_op_sxth_T0();
if (y & 1)
gen_op_sarl_T1_im(16);
else
gen_op_sxth_T1();
gen_op_mul_T0_T1();
}
/* Return the mask of PSR bits set by a MSR instruction. */
static uint32_t msr_mask(DisasContext *s, int flags) {
uint32_t mask;
mask = 0;
if (flags & (1 << 0))
mask |= 0xff;
if (flags & (1 << 1))
mask |= 0xff00;
if (flags & (1 << 2))
mask |= 0xff0000;
if (flags & (1 << 3))
mask |= 0xff000000;
/* Mask out undefined bits and state bits. */
mask &= 0xf89f03df;
/* Mask out privileged bits. */
if (IS_USER(s))
mask &= 0xf80f0200;
return mask;
}
/* Returns nonzero if access to the PSR is not permitted. */
static int gen_set_psr_T0(DisasContext *s, uint32_t mask, int spsr)
{
if (spsr) {
/* ??? This is also undefined in system mode. */
if (IS_USER(s))
return 1;
gen_op_movl_spsr_T0(mask);
} else {
gen_op_movl_cpsr_T0(mask);
}
gen_lookup_tb(s);
return 0;
}
static void gen_exception_return(DisasContext *s)
{
gen_op_movl_reg_TN[0][15]();
gen_op_movl_T0_spsr();
gen_op_movl_cpsr_T0(0xffffffff);
s->is_jmp = DISAS_UPDATE;
}
static void disas_arm_insn(CPUState * env, DisasContext *s)
{
unsigned int cond, insn, val, op1, i, shift, rm, rs, rn, rd, sh;
insn = ldl(s->pc);
insn = ldl_code(s->pc);
s->pc += 4;
cond = insn >> 28;
@ -971,6 +1099,15 @@ static void disas_arm_insn(CPUState * env, DisasContext *s)
/* Coprocessor double register transfer. */
} else if ((insn & 0x0f000010) == 0x0e000010) {
/* Additional coprocessor register transfer. */
} else if ((insn & 0x0ff10010) == 0x01000000) {
/* cps (privileged) */
} else if ((insn & 0x0ffffdff) == 0x01010000) {
/* setend */
if (insn & (1 << 9)) {
/* BE8 mode not implemented. */
goto illegal_op;
}
return;
}
goto illegal_op;
}
@ -984,7 +1121,7 @@ static void disas_arm_insn(CPUState * env, DisasContext *s)
//s->is_jmp = DISAS_JUMP_NEXT;
}
if ((insn & 0x0f900000) == 0x03000000) {
if ((insn & 0x0ff0f000) != 0x0360f000)
if ((insn & 0x0fb0f000) != 0x0320f000)
goto illegal_op;
/* CPSR = immediate */
val = insn & 0xff;
@ -992,8 +1129,9 @@ static void disas_arm_insn(CPUState * env, DisasContext *s)
if (shift)
val = (val >> shift) | (val << (32 - shift));
gen_op_movl_T0_im(val);
if (insn & (1 << 19))
gen_op_movl_psr_T0();
if (gen_set_psr_T0(s, msr_mask(s, (insn >> 16) & 0xf),
(insn & (1 << 22)) != 0))
goto illegal_op;
} else if ((insn & 0x0f900000) == 0x01000000
&& (insn & 0x00000090) != 0x00000090) {
/* miscellaneous instructions */
@ -1002,19 +1140,22 @@ static void disas_arm_insn(CPUState * env, DisasContext *s)
rm = insn & 0xf;
switch (sh) {
case 0x0: /* move program status register */
if (op1 & 2) {
/* SPSR not accessible in user mode */
goto illegal_op;
}
if (op1 & 1) {
/* CPSR = reg */
/* PSR = reg */
gen_movl_T0_reg(s, rm);
if (insn & (1 << 19))
gen_op_movl_psr_T0();
if (gen_set_psr_T0(s, msr_mask(s, (insn >> 16) & 0xf),
(op1 & 2) != 0))
goto illegal_op;
} else {
/* reg = CPSR */
rd = (insn >> 12) & 0xf;
gen_op_movl_T0_psr();
if (op1 & 2) {
if (IS_USER(s))
goto illegal_op;
gen_op_movl_T0_spsr();
} else {
gen_op_movl_T0_cpsr();
}
gen_movl_reg_T0(s, rd);
}
break;
@ -1033,6 +1174,16 @@ static void disas_arm_insn(CPUState * env, DisasContext *s)
goto illegal_op;
}
break;
case 0x2:
if (op1 == 1) {
ARCH(5J); /* bxj */
/* Trivial implementation equivalent to bx. */
gen_movl_T0_reg(s, rm);
gen_bx(s);
} else {
goto illegal_op;
}
break;
case 0x3:
if (op1 != 1)
goto illegal_op;
@ -1071,7 +1222,7 @@ static void disas_arm_insn(CPUState * env, DisasContext *s)
if (sh & 4)
gen_op_sarl_T1_im(16);
else
gen_op_sxl_T1();
gen_op_sxth_T1();
gen_op_imulw_T0_T1();
if ((sh & 2) == 0) {
gen_movl_T1_reg(s, rn);
@ -1081,22 +1232,14 @@ static void disas_arm_insn(CPUState * env, DisasContext *s)
} else {
/* 16 * 16 */
gen_movl_T0_reg(s, rm);
if (sh & 2)
gen_op_sarl_T0_im(16);
else
gen_op_sxl_T0();
gen_movl_T1_reg(s, rs);
if (sh & 4)
gen_op_sarl_T1_im(16);
else
gen_op_sxl_T1();
gen_mulxy(sh & 2, sh & 4);
if (op1 == 2) {
gen_op_imull_T0_T1();
gen_op_signbit_T1_T0();
gen_op_addq_T0_T1(rn, rd);
gen_movl_reg_T0(s, rn);
gen_movl_reg_T1(s, rd);
} else {
gen_op_mul_T0_T1();
if (op1 == 0) {
gen_movl_T1_reg(s, rn);
gen_op_addl_T0_T1_setq();
@ -1176,11 +1319,19 @@ static void disas_arm_insn(CPUState * env, DisasContext *s)
gen_op_logic_T0_cc();
break;
case 0x02:
if (set_cc)
if (set_cc && rd == 15) {
/* SUBS r15, ... is used for exception return. */
if (IS_USER(s))
goto illegal_op;
gen_op_subl_T0_T1_cc();
else
gen_op_subl_T0_T1();
gen_movl_reg_T0(s, rd);
gen_exception_return(s);
} else {
if (set_cc)
gen_op_subl_T0_T1_cc();
else
gen_op_subl_T0_T1();
gen_movl_reg_T0(s, rd);
}
break;
case 0x03:
if (set_cc)
@ -1246,9 +1397,17 @@ static void disas_arm_insn(CPUState * env, DisasContext *s)
gen_op_logic_T0_cc();
break;
case 0x0d:
gen_movl_reg_T1(s, rd);
if (logic_cc)
gen_op_logic_T1_cc();
if (logic_cc && rd == 15) {
/* MOVS r15, ... is used for exception return. */
if (IS_USER(s))
goto illegal_op;
gen_op_movl_T0_T1();
gen_exception_return(s);
} else {
gen_movl_reg_T1(s, rd);
if (logic_cc)
gen_op_logic_T1_cc();
}
break;
case 0x0e:
gen_op_bicl_T0_T1();
@ -1301,6 +1460,7 @@ static void disas_arm_insn(CPUState * env, DisasContext *s)
if (insn & (1 << 21)) /* mult accumulate */
gen_op_addq_T0_T1(rn, rd);
if (!(insn & (1 << 23))) { /* double accumulate */
ARCH(6);
gen_op_addq_lo_T0_T1(rn);
gen_op_addq_lo_T0_T1(rd);
}
@ -1322,9 +1482,9 @@ static void disas_arm_insn(CPUState * env, DisasContext *s)
gen_movl_T0_reg(s, rm);
gen_movl_T1_reg(s, rn);
if (insn & (1 << 22)) {
gen_op_swpb_T0_T1();
gen_ldst(swpb, s);
} else {
gen_op_swpl_T0_T1();
gen_ldst(swpl, s);
}
gen_movl_reg_T0(s, rd);
}
@ -1340,14 +1500,14 @@ static void disas_arm_insn(CPUState * env, DisasContext *s)
/* load */
switch(sh) {
case 1:
gen_op_lduw_T0_T1();
gen_ldst(lduw, s);
break;
case 2:
gen_op_ldsb_T0_T1();
gen_ldst(ldsb, s);
break;
default:
case 3:
gen_op_ldsw_T0_T1();
gen_ldst(ldsw, s);
break;
}
gen_movl_reg_T0(s, rd);
@ -1356,18 +1516,18 @@ static void disas_arm_insn(CPUState * env, DisasContext *s)
if (sh & 1) {
/* store */
gen_movl_T0_reg(s, rd);
gen_op_stl_T0_T1();
gen_ldst(stl, s);
gen_op_addl_T1_im(4);
gen_movl_T0_reg(s, rd + 1);
gen_op_stl_T0_T1();
gen_ldst(stl, s);
if ((insn & (1 << 24)) || (insn & (1 << 20)))
gen_op_addl_T1_im(-4);
} else {
/* load */
gen_op_ldl_T0_T1();
gen_ldst(ldl, s);
gen_movl_reg_T0(s, rd);
gen_op_addl_T1_im(4);
gen_op_ldl_T0_T1();
gen_ldst(ldl, s);
gen_movl_reg_T0(s, rd + 1);
if ((insn & (1 << 24)) || (insn & (1 << 20)))
gen_op_addl_T1_im(-4);
@ -1375,7 +1535,7 @@ static void disas_arm_insn(CPUState * env, DisasContext *s)
} else {
/* store */
gen_movl_T0_reg(s, rd);
gen_op_stw_T0_T1();
gen_ldst(stw, s);
}
if (!(insn & (1 << 24))) {
gen_add_datah_offset(s, insn);
@ -1393,14 +1553,29 @@ static void disas_arm_insn(CPUState * env, DisasContext *s)
rn = (insn >> 16) & 0xf;
rd = (insn >> 12) & 0xf;
gen_movl_T1_reg(s, rn);
i = (IS_USER(s) || (insn & 0x01200000) == 0x00200000);
if (insn & (1 << 24))
gen_add_data_offset(s, insn);
if (insn & (1 << 20)) {
/* load */
#if defined(CONFIG_USER_ONLY)
if (insn & (1 << 22))
gen_op_ldub_T0_T1();
gen_op_ldub_raw();
else
gen_op_ldl_T0_T1();
gen_op_ldl_raw();
#else
if (insn & (1 << 22)) {
if (i)
gen_op_ldub_user();
else
gen_op_ldub_kernel();
} else {
if (i)
gen_op_ldl_user();
else
gen_op_ldl_kernel();
}
#endif
if (rd == 15)
gen_bx(s);
else
@ -1408,10 +1583,24 @@ static void disas_arm_insn(CPUState * env, DisasContext *s)
} else {
/* store */
gen_movl_T0_reg(s, rd);
#if defined(CONFIG_USER_ONLY)
if (insn & (1 << 22))
gen_op_stb_T0_T1();
gen_op_stb_raw();
else
gen_op_stl_T0_T1();
gen_op_stl_raw();
#else
if (insn & (1 << 22)) {
if (i)
gen_op_stb_user();
else
gen_op_stb_kernel();
} else {
if (i)
gen_op_stl_user();
else
gen_op_stl_kernel();
}
#endif
}
if (!(insn & (1 << 24))) {
gen_add_data_offset(s, insn);
@ -1423,11 +1612,17 @@ static void disas_arm_insn(CPUState * env, DisasContext *s)
case 0x08:
case 0x09:
{
int j, n;
int j, n, user;
/* load/store multiple words */
/* XXX: store correct base if write back */
if (insn & (1 << 22))
goto illegal_op; /* only usable in supervisor mode */
user = 0;
if (insn & (1 << 22)) {
if (IS_USER(s))
goto illegal_op; /* only usable in supervisor mode */
if ((insn & (1 << 15)) == 0)
user = 1;
}
rn = (insn >> 16) & 0xf;
gen_movl_T1_reg(s, rn);
@ -1460,21 +1655,26 @@ static void disas_arm_insn(CPUState * env, DisasContext *s)
if (insn & (1 << i)) {
if (insn & (1 << 20)) {
/* load */
gen_op_ldl_T0_T1();
if (i == 15)
gen_ldst(ldl, s);
if (i == 15) {
gen_bx(s);
else
} else if (user) {
gen_op_movl_user_T0(i);
} else {
gen_movl_reg_T0(s, i);
}
} else {
/* store */
if (i == 15) {
/* special case: r15 = PC + 12 */
val = (long)s->pc + 8;
gen_op_movl_TN_im[0](val);
} else if (user) {
gen_op_movl_T0_user(i);
} else {
gen_movl_T0_reg(s, i);
}
gen_op_stl_T0_T1();
gen_ldst(stl, s);
}
j++;
/* no need to add after the last transfer */
@ -1503,6 +1703,12 @@ static void disas_arm_insn(CPUState * env, DisasContext *s)
}
gen_movl_reg_T1(s, rn);
}
if ((insn & (1 << 22)) && !user) {
/* Restore CPSR from SPSR. */
gen_op_movl_T0_spsr();
gen_op_movl_cpsr_T0(0xffffffff);
s->is_jmp = DISAS_UPDATE;
}
}
break;
case 0xa:
@ -1532,6 +1738,10 @@ static void disas_arm_insn(CPUState * env, DisasContext *s)
if (disas_vfp_insn (env, s, insn))
goto illegal_op;
break;
case 15:
if (disas_cp15_insn (s, insn))
goto illegal_op;
break;
default:
/* unknown coprocessor. */
goto illegal_op;
@ -1561,9 +1771,9 @@ static void disas_thumb_insn(DisasContext *s)
int32_t offset;
int i;
insn = lduw(s->pc);
insn = lduw_code(s->pc);
s->pc += 2;
switch (insn >> 12) {
case 0: case 1:
rd = insn & 7;
@ -1628,7 +1838,7 @@ static void disas_thumb_insn(DisasContext *s)
val = s->pc + 2 + ((insn & 0xff) * 4);
val &= ~(uint32_t)2;
gen_op_movl_T1_im(val);
gen_op_ldl_T0_T1();
gen_ldst(ldl, s);
gen_movl_reg_T0(s, rd);
break;
}
@ -1771,28 +1981,28 @@ static void disas_thumb_insn(DisasContext *s)
switch (op) {
case 0: /* str */
gen_op_stl_T0_T1();
gen_ldst(stl, s);
break;
case 1: /* strh */
gen_op_stw_T0_T1();
gen_ldst(stw, s);
break;
case 2: /* strb */
gen_op_stb_T0_T1();
gen_ldst(stb, s);
break;
case 3: /* ldrsb */
gen_op_ldsb_T0_T1();
gen_ldst(ldsb, s);
break;
case 4: /* ldr */
gen_op_ldl_T0_T1();
gen_ldst(ldl, s);
break;
case 5: /* ldrh */
gen_op_lduw_T0_T1();
gen_ldst(lduw, s);
break;
case 6: /* ldrb */
gen_op_ldub_T0_T1();
gen_ldst(ldub, s);
break;
case 7: /* ldrsh */
gen_op_ldsw_T0_T1();
gen_ldst(ldsw, s);
break;
}
if (op >= 3) /* load */
@ -1810,12 +2020,12 @@ static void disas_thumb_insn(DisasContext *s)
if (insn & (1 << 11)) {
/* load */
gen_op_ldl_T0_T1();
gen_ldst(ldl, s);
gen_movl_reg_T0(s, rd);
} else {
/* store */
gen_movl_T0_reg(s, rd);
gen_op_stl_T0_T1();
gen_ldst(stl, s);
}
break;
@ -1830,12 +2040,12 @@ static void disas_thumb_insn(DisasContext *s)
if (insn & (1 << 11)) {
/* load */
gen_op_ldub_T0_T1();
gen_ldst(ldub, s);
gen_movl_reg_T0(s, rd);
} else {
/* store */
gen_movl_T0_reg(s, rd);
gen_op_stb_T0_T1();
gen_ldst(stb, s);
}
break;
@ -1850,12 +2060,12 @@ static void disas_thumb_insn(DisasContext *s)
if (insn & (1 << 11)) {
/* load */
gen_op_lduw_T0_T1();
gen_ldst(lduw, s);
gen_movl_reg_T0(s, rd);
} else {
/* store */
gen_movl_T0_reg(s, rd);
gen_op_stw_T0_T1();
gen_ldst(stw, s);
}
break;
@ -1869,12 +2079,12 @@ static void disas_thumb_insn(DisasContext *s)
if (insn & (1 << 11)) {
/* load */
gen_op_ldl_T0_T1();
gen_ldst(ldl, s);
gen_movl_reg_T0(s, rd);
} else {
/* store */
gen_movl_T0_reg(s, rd);
gen_op_stl_T0_T1();
gen_ldst(stl, s);
}
break;
@ -1929,12 +2139,12 @@ static void disas_thumb_insn(DisasContext *s)
if (insn & (1 << i)) {
if (insn & (1 << 11)) {
/* pop */
gen_op_ldl_T0_T1();
gen_ldst(ldl, s);
gen_movl_reg_T0(s, i);
} else {
/* push */
gen_movl_T0_reg(s, i);
gen_op_stl_T0_T1();
gen_ldst(stl, s);
}
/* advance to the next address. */
gen_op_addl_T1_T2();
@ -1943,13 +2153,13 @@ static void disas_thumb_insn(DisasContext *s)
if (insn & (1 << 8)) {
if (insn & (1 << 11)) {
/* pop pc */
gen_op_ldl_T0_T1();
gen_ldst(ldl, s);
/* don't set the pc until the rest of the instruction
has completed */
} else {
/* push lr */
gen_movl_T0_reg(s, 14);
gen_op_stl_T0_T1();
gen_ldst(stl, s);
}
gen_op_addl_T1_T2();
}
@ -1978,19 +2188,20 @@ static void disas_thumb_insn(DisasContext *s)
if (insn & (1 << i)) {
if (insn & (1 << 11)) {
/* load */
gen_op_ldl_T0_T1();
gen_ldst(ldl, s);
gen_movl_reg_T0(s, i);
} else {
/* store */
gen_movl_T0_reg(s, i);
gen_op_stl_T0_T1();
gen_ldst(stl, s);
}
/* advance to the next address */
gen_op_addl_T1_T2();
}
}
/* Base register writeback. */
gen_movl_reg_T1(s, rn);
if ((insn & (1 << rn)) == 0)
gen_movl_reg_T1(s, rn);
break;
case 13:
@ -2036,7 +2247,7 @@ static void disas_thumb_insn(DisasContext *s)
case 15:
/* branch and link [and switch to arm] */
offset = ((int32_t)insn << 21) >> 10;
insn = lduw(s->pc);
insn = lduw_code(s->pc);
offset |= insn & 0x7ff;
val = (uint32_t)s->pc + 2;
@ -2073,6 +2284,7 @@ static inline int gen_intermediate_code_internal(CPUState *env,
uint16_t *gen_opc_end;
int j, lj;
target_ulong pc_start;
uint32_t next_page_start;
/* generate intermediate code */
pc_start = tb->pc;
@ -2088,6 +2300,10 @@ static inline int gen_intermediate_code_internal(CPUState *env,
dc->singlestep_enabled = env->singlestep_enabled;
dc->condjmp = 0;
dc->thumb = env->thumb;
#if !defined(CONFIG_USER_ONLY)
dc->user = (env->uncached_cpsr & 0x1f) == ARM_CPU_MODE_USR;
#endif
next_page_start = (pc_start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
nb_gen_labels = 0;
lj = -1;
do {
@ -2124,12 +2340,13 @@ static inline int gen_intermediate_code_internal(CPUState *env,
}
/* Translation stops when a conditional branch is enoutered.
* Otherwise the subsequent code could get translated several times.
*/
* Also stop translation when a page boundary is reached. This
* ensures prefech aborts occur at the right place. */
} while (!dc->is_jmp && gen_opc_ptr < gen_opc_end &&
!env->singlestep_enabled &&
(dc->pc - pc_start) < (TARGET_PAGE_SIZE - 32));
/* It this stage dc->condjmp will only be set when the skipped
* instruction was a conditional branch, and teh PC has already been
dc->pc < next_page_start);
/* At this stage dc->condjmp will only be set when the skipped
* instruction was a conditional branch, and the PC has already been
* written. */
if (__builtin_expect(env->singlestep_enabled, 0)) {
/* Make sure the pc is updated, and raise a debug exception. */
@ -2180,8 +2397,15 @@ static inline int gen_intermediate_code_internal(CPUState *env,
}
}
#endif
if (!search_pc)
if (search_pc) {
j = gen_opc_ptr - gen_opc_buf;
lj++;
while (lj <= j)
gen_opc_instr_start[lj++] = 0;
tb->size = 0;
} else {
tb->size = dc->pc - pc_start;
}
return 0;
}
@ -2195,6 +2419,17 @@ int gen_intermediate_code_pc(CPUState *env, TranslationBlock *tb)
return gen_intermediate_code_internal(env, tb, 1);
}
void cpu_reset(CPUARMState *env)
{
#if defined (CONFIG_USER_ONLY)
/* SVC mode with interrupts disabled. */
env->uncached_cpsr = ARM_CPU_MODE_SVC | CPSR_A | CPSR_F | CPSR_I;
#else
env->uncached_cpsr = ARM_CPU_MODE_USR;
#endif
env->regs[15] = 0;
}
CPUARMState *cpu_arm_init(void)
{
CPUARMState *env;
@ -2203,6 +2438,8 @@ CPUARMState *cpu_arm_init(void)
if (!env)
return NULL;
cpu_exec_init(env);
cpu_reset(env);
tlb_flush(env, 1);
return env;
}
@ -2211,6 +2448,10 @@ void cpu_arm_close(CPUARMState *env)
free(env);
}
static const char *cpu_mode_names[16] = {
"usr", "fiq", "irq", "svc", "???", "???", "???", "abt",
"???", "???", "???", "und", "???", "???", "???", "sys"
};
void cpu_dump_state(CPUState *env, FILE *f,
int (*cpu_fprintf)(FILE *f, const char *fmt, ...),
int flags)
@ -2221,6 +2462,7 @@ void cpu_dump_state(CPUState *env, FILE *f,
float s;
} s0, s1;
CPU_DoubleU d;
uint32_t psr;
for(i=0;i<16;i++) {
cpu_fprintf(f, "R%02d=%08x", i, env->regs[i]);
@ -2229,13 +2471,15 @@ void cpu_dump_state(CPUState *env, FILE *f,
else
cpu_fprintf(f, " ");
}
cpu_fprintf(f, "PSR=%08x %c%c%c%c %c\n",
env->cpsr,
env->cpsr & (1 << 31) ? 'N' : '-',
env->cpsr & (1 << 30) ? 'Z' : '-',
env->cpsr & (1 << 29) ? 'C' : '-',
env->cpsr & (1 << 28) ? 'V' : '-',
env->thumb ? 'T' : 'A');
psr = cpsr_read(env);
cpu_fprintf(f, "PSR=%08x %c%c%c%c %c %s%d %x\n",
psr,
psr & (1 << 31) ? 'N' : '-',
psr & (1 << 30) ? 'Z' : '-',
psr & (1 << 29) ? 'C' : '-',
psr & (1 << 28) ? 'V' : '-',
psr & CPSR_T ? 'T' : 'A',
cpu_mode_names[psr & 0xf], (psr & 0x10) ? 32 : 26);
for (i = 0; i < 16; i++) {
d.d = env->vfp.regs[i];
@ -2250,27 +2494,3 @@ void cpu_dump_state(CPUState *env, FILE *f,
cpu_fprintf(f, "FPSCR: %08x\n", (int)env->vfp.fpscr);
}
target_ulong cpu_get_phys_page_debug(CPUState *env, target_ulong addr)
{
return addr;
}
#if defined(CONFIG_USER_ONLY)
int cpu_arm_handle_mmu_fault (CPUState *env, target_ulong address, int rw,
int is_user, int is_softmmu)
{
env->cp15_6 = address;
if (rw == 2) {
env->exception_index = EXCP_PREFETCH_ABORT;
} else {
env->exception_index = EXCP_DATA_ABORT;
}
return 1;
}
#else
#error not implemented
#endif

17
vl.c
View File

@ -3359,6 +3359,19 @@ int cpu_load(QEMUFile *f, void *opaque, int version_id)
tlb_flush(env, 1);
return 0;
}
#elif defined(TARGET_ARM)
/* ??? Need to implement these. */
void cpu_save(QEMUFile *f, void *opaque)
{
}
int cpu_load(QEMUFile *f, void *opaque, int version_id)
{
return 0;
}
#else
#warning No CPU save/restore functions
@ -4054,6 +4067,10 @@ void register_machines(void)
#else
qemu_register_machine(&sun4m_machine);
#endif
#elif defined(TARGET_ARM)
qemu_register_machine(&integratorcp_machine);
#else
#error unsupported CPU
#endif
}

3
vl.h
View File

@ -928,6 +928,9 @@ void do_usb_add(const char *devname);
void do_usb_del(const char *devname);
void usb_info(void);
/* integratorcp.c */
extern QEMUMachine integratorcp_machine;
/* ps2.c */
void *ps2_kbd_init(void (*update_irq)(void *, int), void *update_arg);
void *ps2_mouse_init(void (*update_irq)(void *, int), void *update_arg);