mirror of
https://github.com/xemu-project/xemu.git
synced 2024-11-24 12:09:58 +00:00
75e972dab5
As a rule, CPU internal state should never be updated when !cpu->kvm_vcpu_dirty (or the HAX equivalent). If that is done, then subsequent calls to cpu_synchronize_state() - usually safe and idempotent - will clobber state. However, we routinely do this during a loadvm or incoming migration. Usually this is called shortly after a reset, which will clear all the cpu dirty flags with cpu_synchronize_all_post_reset(). Nothing is expected to set the dirty flags again before the cpu state is loaded from the incoming stream. This means that it isn't safe to call cpu_synchronize_state() from a post_load handler, which is non-obvious and potentially inconvenient. We could cpu_synchronize_all_state() before the loadvm, but that would be overkill since a) we expect the state to already be synchronized from the reset and b) we expect to completely rewrite the state with a call to cpu_synchronize_all_post_init() at the end of qemu_loadvm_state(). To clear this up, this patch introduces cpu_synchronize_pre_loadvm() and associated helpers, which simply marks the cpu state as dirty without actually changing anything. i.e. it says we want to discard any existing KVM (or HAX) state and replace it with what we're going to load. Cc: Juan Quintela <quintela@redhat.com> Cc: Dave Gilbert <dgilbert@redhat.com> Signed-off-by: David Gibson <david@gibson.dropbear.id.au> Reviewed-by: Juan Quintela <quintela@redhat.com>
59 lines
1.2 KiB
C
59 lines
1.2 KiB
C
/*
|
|
* QEMU Hardware accelertors support
|
|
*
|
|
* Copyright 2016 Google, Inc.
|
|
*
|
|
* This work is licensed under the terms of the GNU GPL, version 2 or later.
|
|
* See the COPYING file in the top-level directory.
|
|
*
|
|
*/
|
|
|
|
#ifndef QEMU_HW_ACCEL_H
|
|
#define QEMU_HW_ACCEL_H
|
|
|
|
#include "qom/cpu.h"
|
|
#include "sysemu/hax.h"
|
|
#include "sysemu/kvm.h"
|
|
|
|
static inline void cpu_synchronize_state(CPUState *cpu)
|
|
{
|
|
if (kvm_enabled()) {
|
|
kvm_cpu_synchronize_state(cpu);
|
|
}
|
|
if (hax_enabled()) {
|
|
hax_cpu_synchronize_state(cpu);
|
|
}
|
|
}
|
|
|
|
static inline void cpu_synchronize_post_reset(CPUState *cpu)
|
|
{
|
|
if (kvm_enabled()) {
|
|
kvm_cpu_synchronize_post_reset(cpu);
|
|
}
|
|
if (hax_enabled()) {
|
|
hax_cpu_synchronize_post_reset(cpu);
|
|
}
|
|
}
|
|
|
|
static inline void cpu_synchronize_post_init(CPUState *cpu)
|
|
{
|
|
if (kvm_enabled()) {
|
|
kvm_cpu_synchronize_post_init(cpu);
|
|
}
|
|
if (hax_enabled()) {
|
|
hax_cpu_synchronize_post_init(cpu);
|
|
}
|
|
}
|
|
|
|
static inline void cpu_synchronize_pre_loadvm(CPUState *cpu)
|
|
{
|
|
if (kvm_enabled()) {
|
|
kvm_cpu_synchronize_pre_loadvm(cpu);
|
|
}
|
|
if (hax_enabled()) {
|
|
hax_cpu_synchronize_pre_loadvm(cpu);
|
|
}
|
|
}
|
|
|
|
#endif /* QEMU_HW_ACCEL_H */
|