mirror of
https://github.com/xemu-project/xemu.git
synced 2024-11-27 13:30:52 +00:00
cpu-exec: avoid cpu_exec_nocache infinite loop with record/replay
This patch ensures that icount_decr.u32.high is clear before calling cpu_exec_nocache when exception is pending. Because the exception is caused by the first instruction in the block and it cannot be executed without resetting the flag. There are two parts in the fix. First, clear icount_decr.u32.high in cpu_handle_interrupt (just before processing the "dependent" request, stored in cpu->interrupt_request or cpu->exit_request) rather than cpu_loop_exec_tb; this ensures that cpu_handle_exception is always reached with zero icount_decr.u32.high unless another interrupt has happened in the meanwhile. Second, try to cause the exception at the beginning of cpu_handle_exception, and exit immediately if the TB cannot execute. With this change, interrupts are processed and cpu_exec_nocache can make process. Signed-off-by: Maria Klimushenkova <maria.klimushenkova@ispras.ru> Signed-off-by: Pavel Dovgalyuk <pavel.dovgaluk@ispras.ru> Message-Id: <20171114081818.27640.33165.stgit@pasha-VirtualBox> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
This commit is contained in:
parent
e01cecabf3
commit
17b50b0c29
@ -470,48 +470,51 @@ static inline void cpu_handle_debug_exception(CPUState *cpu)
|
||||
|
||||
static inline bool cpu_handle_exception(CPUState *cpu, int *ret)
|
||||
{
|
||||
if (cpu->exception_index >= 0) {
|
||||
if (cpu->exception_index >= EXCP_INTERRUPT) {
|
||||
/* exit request from the cpu execution loop */
|
||||
*ret = cpu->exception_index;
|
||||
if (*ret == EXCP_DEBUG) {
|
||||
cpu_handle_debug_exception(cpu);
|
||||
}
|
||||
cpu->exception_index = -1;
|
||||
return true;
|
||||
} else {
|
||||
#if defined(CONFIG_USER_ONLY)
|
||||
/* if user mode only, we simulate a fake exception
|
||||
which will be handled outside the cpu execution
|
||||
loop */
|
||||
#if defined(TARGET_I386)
|
||||
CPUClass *cc = CPU_GET_CLASS(cpu);
|
||||
cc->do_interrupt(cpu);
|
||||
#endif
|
||||
*ret = cpu->exception_index;
|
||||
cpu->exception_index = -1;
|
||||
return true;
|
||||
#else
|
||||
if (replay_exception()) {
|
||||
CPUClass *cc = CPU_GET_CLASS(cpu);
|
||||
qemu_mutex_lock_iothread();
|
||||
cc->do_interrupt(cpu);
|
||||
qemu_mutex_unlock_iothread();
|
||||
cpu->exception_index = -1;
|
||||
} else if (!replay_has_interrupt()) {
|
||||
/* give a chance to iothread in replay mode */
|
||||
*ret = EXCP_INTERRUPT;
|
||||
return true;
|
||||
}
|
||||
#endif
|
||||
}
|
||||
if (cpu->exception_index < 0) {
|
||||
#ifndef CONFIG_USER_ONLY
|
||||
} else if (replay_has_exception()
|
||||
if (replay_has_exception()
|
||||
&& cpu->icount_decr.u16.low + cpu->icount_extra == 0) {
|
||||
/* try to cause an exception pending in the log */
|
||||
cpu_exec_nocache(cpu, 1, tb_find(cpu, NULL, 0, curr_cflags()), true);
|
||||
*ret = -1;
|
||||
/* try to cause an exception pending in the log */
|
||||
cpu_exec_nocache(cpu, 1, tb_find(cpu, NULL, 0, curr_cflags()), true);
|
||||
}
|
||||
#endif
|
||||
if (cpu->exception_index < 0) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
if (cpu->exception_index >= EXCP_INTERRUPT) {
|
||||
/* exit request from the cpu execution loop */
|
||||
*ret = cpu->exception_index;
|
||||
if (*ret == EXCP_DEBUG) {
|
||||
cpu_handle_debug_exception(cpu);
|
||||
}
|
||||
cpu->exception_index = -1;
|
||||
return true;
|
||||
} else {
|
||||
#if defined(CONFIG_USER_ONLY)
|
||||
/* if user mode only, we simulate a fake exception
|
||||
which will be handled outside the cpu execution
|
||||
loop */
|
||||
#if defined(TARGET_I386)
|
||||
CPUClass *cc = CPU_GET_CLASS(cpu);
|
||||
cc->do_interrupt(cpu);
|
||||
#endif
|
||||
*ret = cpu->exception_index;
|
||||
cpu->exception_index = -1;
|
||||
return true;
|
||||
#else
|
||||
if (replay_exception()) {
|
||||
CPUClass *cc = CPU_GET_CLASS(cpu);
|
||||
qemu_mutex_lock_iothread();
|
||||
cc->do_interrupt(cpu);
|
||||
qemu_mutex_unlock_iothread();
|
||||
cpu->exception_index = -1;
|
||||
} else if (!replay_has_interrupt()) {
|
||||
/* give a chance to iothread in replay mode */
|
||||
*ret = EXCP_INTERRUPT;
|
||||
return true;
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
@ -522,6 +525,19 @@ static inline bool cpu_handle_interrupt(CPUState *cpu,
|
||||
TranslationBlock **last_tb)
|
||||
{
|
||||
CPUClass *cc = CPU_GET_CLASS(cpu);
|
||||
int32_t insns_left;
|
||||
|
||||
/* Clear the interrupt flag now since we're processing
|
||||
* cpu->interrupt_request and cpu->exit_request.
|
||||
*/
|
||||
insns_left = atomic_read(&cpu->icount_decr.u32);
|
||||
atomic_set(&cpu->icount_decr.u16.high, 0);
|
||||
if (unlikely(insns_left < 0)) {
|
||||
/* Ensure the zeroing of icount_decr comes before the next read
|
||||
* of cpu->exit_request or cpu->interrupt_request.
|
||||
*/
|
||||
smp_mb();
|
||||
}
|
||||
|
||||
if (unlikely(atomic_read(&cpu->interrupt_request))) {
|
||||
int interrupt_request;
|
||||
@ -620,17 +636,14 @@ static inline void cpu_loop_exec_tb(CPUState *cpu, TranslationBlock *tb,
|
||||
|
||||
*last_tb = NULL;
|
||||
insns_left = atomic_read(&cpu->icount_decr.u32);
|
||||
atomic_set(&cpu->icount_decr.u16.high, 0);
|
||||
if (insns_left < 0) {
|
||||
/* Something asked us to stop executing chained TBs; just
|
||||
* continue round the main loop. Whatever requested the exit
|
||||
* will also have set something else (eg exit_request or
|
||||
* interrupt_request) which we will handle next time around
|
||||
* the loop. But we need to ensure the zeroing of icount_decr
|
||||
* comes before the next read of cpu->exit_request
|
||||
* or cpu->interrupt_request.
|
||||
* interrupt_request) which will be handled by
|
||||
* cpu_handle_interrupt. cpu_handle_interrupt will also
|
||||
* clear cpu->icount_decr.u16.high.
|
||||
*/
|
||||
smp_mb();
|
||||
return;
|
||||
}
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user