apic: Defer interrupt updates to VCPU thread

KVM performs TPR raising asynchronously to QEMU, specifically outside
QEMU's global lock. When an interrupt is injected into the APIC and TPR
is checked to decide if this can be delivered, a stale TPR value may be
used, causing spurious interrupts in the end.

Fix this by deferring apic_update_irq to the context of the target VCPU.
We introduce a new interrupt flag for this, CPU_INTERRUPT_POLL. When it
is set, the VCPU calls apic_poll_irq before checking for further pending
interrupts. To avoid special-casing KVM, we also implement this logic
for TCG mode.

Signed-off-by: Jan Kiszka <jan.kiszka@siemens.com>
Signed-off-by: Avi Kivity <avi@redhat.com>
This commit is contained in:
Jan Kiszka 2012-07-09 16:42:32 +02:00 committed by Avi Kivity
parent a94820ddc3
commit 5d62c43a17
6 changed files with 18 additions and 3 deletions

View File

@ -288,6 +288,12 @@ int cpu_exec(CPUArchState *env)
} }
#endif #endif
#if defined(TARGET_I386) #if defined(TARGET_I386)
#if !defined(CONFIG_USER_ONLY)
if (interrupt_request & CPU_INTERRUPT_POLL) {
env->interrupt_request &= ~CPU_INTERRUPT_POLL;
apic_poll_irq(env->apic_state);
}
#endif
if (interrupt_request & CPU_INTERRUPT_INIT) { if (interrupt_request & CPU_INTERRUPT_INIT) {
cpu_svm_check_intercept_param(env, SVM_EXIT_INIT, cpu_svm_check_intercept_param(env, SVM_EXIT_INIT,
0); 0);

View File

@ -16,6 +16,7 @@
* You should have received a copy of the GNU Lesser General Public * You should have received a copy of the GNU Lesser General Public
* License along with this library; if not, see <http://www.gnu.org/licenses/> * License along with this library; if not, see <http://www.gnu.org/licenses/>
*/ */
#include "qemu-thread.h"
#include "apic_internal.h" #include "apic_internal.h"
#include "apic.h" #include "apic.h"
#include "ioapic.h" #include "ioapic.h"
@ -361,7 +362,9 @@ static void apic_update_irq(APICCommonState *s)
if (!(s->spurious_vec & APIC_SV_ENABLE)) { if (!(s->spurious_vec & APIC_SV_ENABLE)) {
return; return;
} }
if (apic_irq_pending(s) > 0) { if (!qemu_cpu_is_self(s->cpu_env)) {
cpu_interrupt(s->cpu_env, CPU_INTERRUPT_POLL);
} else if (apic_irq_pending(s) > 0) {
cpu_interrupt(s->cpu_env, CPU_INTERRUPT_HARD); cpu_interrupt(s->cpu_env, CPU_INTERRUPT_HARD);
} }
} }

View File

@ -20,6 +20,7 @@ void apic_init_reset(DeviceState *s);
void apic_sipi(DeviceState *s); void apic_sipi(DeviceState *s);
void apic_handle_tpr_access_report(DeviceState *d, target_ulong ip, void apic_handle_tpr_access_report(DeviceState *d, target_ulong ip,
TPRAccess access); TPRAccess access);
void apic_poll_irq(DeviceState *d);
/* pc.c */ /* pc.c */
int cpu_is_bsp(CPUX86State *env); int cpu_is_bsp(CPUX86State *env);

View File

@ -141,7 +141,6 @@ void apic_report_irq_delivered(int delivered);
bool apic_next_timer(APICCommonState *s, int64_t current_time); bool apic_next_timer(APICCommonState *s, int64_t current_time);
void apic_enable_tpr_access_reporting(DeviceState *d, bool enable); void apic_enable_tpr_access_reporting(DeviceState *d, bool enable);
void apic_enable_vapic(DeviceState *d, target_phys_addr_t paddr); void apic_enable_vapic(DeviceState *d, target_phys_addr_t paddr);
void apic_poll_irq(DeviceState *d);
void vapic_report_tpr_access(DeviceState *dev, void *cpu, target_ulong ip, void vapic_report_tpr_access(DeviceState *dev, void *cpu, target_ulong ip,
TPRAccess access); TPRAccess access);

View File

@ -478,6 +478,7 @@
for syscall instruction */ for syscall instruction */
/* i386-specific interrupt pending bits. */ /* i386-specific interrupt pending bits. */
#define CPU_INTERRUPT_POLL CPU_INTERRUPT_TGT_EXT_1
#define CPU_INTERRUPT_SMI CPU_INTERRUPT_TGT_EXT_2 #define CPU_INTERRUPT_SMI CPU_INTERRUPT_TGT_EXT_2
#define CPU_INTERRUPT_NMI CPU_INTERRUPT_TGT_EXT_3 #define CPU_INTERRUPT_NMI CPU_INTERRUPT_TGT_EXT_3
#define CPU_INTERRUPT_MCE CPU_INTERRUPT_TGT_EXT_4 #define CPU_INTERRUPT_MCE CPU_INTERRUPT_TGT_EXT_4
@ -1048,7 +1049,8 @@ static inline void cpu_clone_regs(CPUX86State *env, target_ulong newsp)
static inline bool cpu_has_work(CPUX86State *env) static inline bool cpu_has_work(CPUX86State *env)
{ {
return ((env->interrupt_request & CPU_INTERRUPT_HARD) && return ((env->interrupt_request & (CPU_INTERRUPT_HARD |
CPU_INTERRUPT_POLL)) &&
(env->eflags & IF_MASK)) || (env->eflags & IF_MASK)) ||
(env->interrupt_request & (CPU_INTERRUPT_NMI | (env->interrupt_request & (CPU_INTERRUPT_NMI |
CPU_INTERRUPT_INIT | CPU_INTERRUPT_INIT |

View File

@ -1732,6 +1732,10 @@ int kvm_arch_process_async_events(CPUX86State *env)
return 0; return 0;
} }
if (env->interrupt_request & CPU_INTERRUPT_POLL) {
env->interrupt_request &= ~CPU_INTERRUPT_POLL;
apic_poll_irq(env->apic_state);
}
if (((env->interrupt_request & CPU_INTERRUPT_HARD) && if (((env->interrupt_request & CPU_INTERRUPT_HARD) &&
(env->eflags & IF_MASK)) || (env->eflags & IF_MASK)) ||
(env->interrupt_request & CPU_INTERRUPT_NMI)) { (env->interrupt_request & CPU_INTERRUPT_NMI)) {