mirror of
https://github.com/xemu-project/xemu.git
synced 2024-11-28 22:10:33 +00:00
Bugfixes for 5.0-rc2.
-----BEGIN PGP SIGNATURE----- iQFIBAABCAAyFiEE8TM4V0tmI4mGbHaCv/vSX3jHroMFAl6GNasUHHBib256aW5p QHJlZGhhdC5jb20ACgkQv/vSX3jHroNHzwf/Vc9i0os2WogOq7FADfAl+Sw9Y/nM n0dyuuYDCNIajh0hVkOBtzTRnDVPMbleSMf+jDbqs4Lk+LHZAe3jyEPyY9NgnuuZ xDymrSm4HWJJhLWPTLdkAOdN61D4qTUODLuSPTML90EBABYFETaUXpx0ZReg+btS hpEepCNUXWzYlV5tf2oO7kilfK34QkcJt5DuIeeHOKzC/bGdrH9dYSl58bFxMxOu z8c/rk7XFXFcvgiQIOeuiRchseMjK3yDouS0PDQQiaFSquKb+uE18c/zaKrVUf6h +hpveoma3iIIqGaNqyLomu4gsprBtRU++NWgIvyAMTrgj85na0J0OXyDEA== =laXu -----END PGP SIGNATURE----- Merge remote-tracking branch 'remotes/bonzini/tags/for-upstream' into staging Bugfixes for 5.0-rc2. # gpg: Signature made Thu 02 Apr 2020 19:57:47 BST # gpg: using RSA key F13338574B662389866C7682BFFBD25F78C7AE83 # gpg: issuer "pbonzini@redhat.com" # gpg: Good signature from "Paolo Bonzini <bonzini@gnu.org>" [full] # gpg: aka "Paolo Bonzini <pbonzini@redhat.com>" [full] # Primary key fingerprint: 46F5 9FBD 57D6 12E7 BFD4 E2F7 7E15 100C CD36 69B1 # Subkey fingerprint: F133 3857 4B66 2389 866C 7682 BFFB D25F 78C7 AE83 * remotes/bonzini/tags/for-upstream: xen: fixup RAM memory region initialization object-add: don't create return value if failed qmp: fix leak on callbacks that return both value and error migration: fix cleanup_bh leak on resume target/i386: do not set unsupported VMX secondary execution controls serial: Fix double migration data i386: hvf: Reset IRQ inhibition after moving RIP vl: fix broken IPA range for ARM -M virt with KVM enabled util/bufferiszero: improve avx2 accelerator util/bufferiszero: assign length_to_accel value for each accelerator case MAINTAINERS: Add an entry for the HVF accelerator softmmu: fix crash with invalid -M memory-backend= virtio-iommu: depend on PCI hw/isa/superio: Correct the license text hw/scsi/vmw_pvscsi: Remove assertion for kick after reset Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
This commit is contained in:
commit
5142ca078d
@ -412,6 +412,13 @@ S: Supported
|
||||
F: target/i386/kvm.c
|
||||
F: scripts/kvm/vmxcap
|
||||
|
||||
X86 HVF CPUs
|
||||
M: Roman Bolshakov <r.bolshakov@yadro.com>
|
||||
S: Maintained
|
||||
F: accel/stubs/hvf-stub.c
|
||||
F: target/i386/hvf/
|
||||
F: include/sysemu/hvf.h
|
||||
|
||||
WHPX CPUs
|
||||
M: Sunil Muthuswamy <sunilmut@microsoft.com>
|
||||
S: Supported
|
||||
|
@ -1043,7 +1043,6 @@ static void serial_class_init(ObjectClass *klass, void* data)
|
||||
dc->user_creatable = false;
|
||||
dc->realize = serial_realize;
|
||||
dc->unrealize = serial_unrealize;
|
||||
dc->vmsd = &vmstate_serial;
|
||||
device_class_set_props(dc, serial_properties);
|
||||
}
|
||||
|
||||
@ -1113,6 +1112,16 @@ static void serial_mm_realize(DeviceState *dev, Error **errp)
|
||||
sysbus_init_irq(SYS_BUS_DEVICE(smm), &smm->serial.irq);
|
||||
}
|
||||
|
||||
static const VMStateDescription vmstate_serial_mm = {
|
||||
.name = "serial",
|
||||
.version_id = 3,
|
||||
.minimum_version_id = 2,
|
||||
.fields = (VMStateField[]) {
|
||||
VMSTATE_STRUCT(serial, SerialMM, 0, vmstate_serial, SerialState),
|
||||
VMSTATE_END_OF_LIST()
|
||||
}
|
||||
};
|
||||
|
||||
SerialMM *serial_mm_init(MemoryRegion *address_space,
|
||||
hwaddr base, int regshift,
|
||||
qemu_irq irq, int baudbase,
|
||||
@ -1162,6 +1171,7 @@ static void serial_mm_class_init(ObjectClass *oc, void *data)
|
||||
|
||||
device_class_set_props(dc, serial_mm_properties);
|
||||
dc->realize = serial_mm_realize;
|
||||
dc->vmsd = &vmstate_serial_mm;
|
||||
}
|
||||
|
||||
static const TypeInfo serial_mm_info = {
|
||||
|
@ -5,7 +5,7 @@
|
||||
* Copyright (c) 2011-2012 Andreas Färber
|
||||
* Copyright (c) 2018 Philippe Mathieu-Daudé
|
||||
*
|
||||
* This code is licensed under the GNU GPLv2 and later.
|
||||
* This work is licensed under the terms of the GNU GPL, version 2 or later.
|
||||
* See the COPYING file in the top-level directory.
|
||||
* SPDX-License-Identifier: GPL-2.0-or-later
|
||||
*/
|
||||
|
@ -3,7 +3,7 @@
|
||||
*
|
||||
* Copyright (c) 2018 Philippe Mathieu-Daudé
|
||||
*
|
||||
* This code is licensed under the GNU GPLv2 and later.
|
||||
* This work is licensed under the terms of the GNU GPL, version 2 or later.
|
||||
* See the COPYING file in the top-level directory.
|
||||
* SPDX-License-Identifier: GPL-2.0-or-later
|
||||
*/
|
||||
|
@ -719,7 +719,10 @@ pvscsi_process_io(PVSCSIState *s)
|
||||
PVSCSIRingReqDesc descr;
|
||||
hwaddr next_descr_pa;
|
||||
|
||||
assert(s->rings_info_valid);
|
||||
if (!s->rings_info_valid) {
|
||||
return;
|
||||
}
|
||||
|
||||
while ((next_descr_pa = pvscsi_ring_pop_req_descr(&s->rings)) != 0) {
|
||||
|
||||
/* Only read after production index verification */
|
||||
|
@ -12,7 +12,7 @@ config VIRTIO_RNG
|
||||
config VIRTIO_IOMMU
|
||||
bool
|
||||
default y
|
||||
depends on VIRTIO
|
||||
depends on PCI && VIRTIO
|
||||
|
||||
config VIRTIO_PCI
|
||||
bool
|
||||
|
@ -19,6 +19,7 @@
|
||||
#include "sysemu/runstate.h"
|
||||
#include "migration/misc.h"
|
||||
#include "migration/global_state.h"
|
||||
#include "hw/boards.h"
|
||||
|
||||
//#define DEBUG_XEN
|
||||
|
||||
@ -151,6 +152,8 @@ static void xen_setup_post(MachineState *ms, AccelState *accel)
|
||||
|
||||
static int xen_init(MachineState *ms)
|
||||
{
|
||||
MachineClass *mc = MACHINE_GET_CLASS(ms);
|
||||
|
||||
xen_xc = xc_interface_open(0, 0, 0);
|
||||
if (xen_xc == NULL) {
|
||||
xen_pv_printf(NULL, 0, "can't open xen interface\n");
|
||||
@ -170,6 +173,10 @@ static int xen_init(MachineState *ms)
|
||||
return -1;
|
||||
}
|
||||
qemu_add_vm_change_state_handler(xen_change_state_handler, NULL);
|
||||
/*
|
||||
* opt out of system RAM being allocated by generic code
|
||||
*/
|
||||
mc->default_ram_id = NULL;
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -3,7 +3,7 @@
|
||||
*
|
||||
* Copyright (c) 2018 Philippe Mathieu-Daudé
|
||||
*
|
||||
* This code is licensed under the GNU GPLv2 and later.
|
||||
* This work is licensed under the terms of the GNU GPL, version 2 or later.
|
||||
* See the COPYING file in the top-level directory.
|
||||
* SPDX-License-Identifier: GPL-2.0-or-later
|
||||
*/
|
||||
|
@ -3478,7 +3478,12 @@ void migrate_fd_connect(MigrationState *s, Error *error_in)
|
||||
bool resume = s->state == MIGRATION_STATUS_POSTCOPY_PAUSED;
|
||||
|
||||
s->expected_downtime = s->parameters.downtime_limit;
|
||||
s->cleanup_bh = qemu_bh_new(migrate_fd_cleanup_bh, s);
|
||||
if (resume) {
|
||||
assert(s->cleanup_bh);
|
||||
} else {
|
||||
assert(!s->cleanup_bh);
|
||||
s->cleanup_bh = qemu_bh_new(migrate_fd_cleanup_bh, s);
|
||||
}
|
||||
if (error_in) {
|
||||
migrate_fd_error(s, error_in);
|
||||
migrate_fd_cleanup(s);
|
||||
|
@ -155,6 +155,8 @@ QDict *qmp_dispatch(const QmpCommandList *cmds, QObject *request,
|
||||
cmd->fn(args, &ret, &err);
|
||||
qobject_unref(args);
|
||||
if (err) {
|
||||
/* or assert(!ret) after reviewing all handlers: */
|
||||
qobject_unref(ret);
|
||||
goto out;
|
||||
}
|
||||
|
||||
|
@ -285,10 +285,7 @@ void qmp_object_add(QDict *qdict, QObject **ret_data, Error **errp)
|
||||
v = qobject_input_visitor_new(QOBJECT(qdict));
|
||||
obj = user_creatable_add_type(type, id, qdict, v, errp);
|
||||
visit_free(v);
|
||||
if (obj) {
|
||||
object_unref(obj);
|
||||
}
|
||||
*ret_data = QOBJECT(qdict_new());
|
||||
object_unref(obj);
|
||||
}
|
||||
|
||||
void qmp_object_del(const char *id, Error **errp)
|
||||
|
12
softmmu/vl.c
12
softmmu/vl.c
@ -4137,6 +4137,9 @@ void qemu_init(int argc, char **argv, char **envp)
|
||||
machine_opts = qemu_get_machine_opts();
|
||||
qemu_opt_foreach(machine_opts, machine_set_property, current_machine,
|
||||
&error_fatal);
|
||||
current_machine->ram_size = ram_size;
|
||||
current_machine->maxram_size = maxram_size;
|
||||
current_machine->ram_slots = ram_slots;
|
||||
|
||||
/*
|
||||
* Note: uses machine properties such as kernel-irqchip, must run
|
||||
@ -4298,6 +4301,11 @@ void qemu_init(int argc, char **argv, char **envp)
|
||||
|
||||
backend = object_resolve_path_type(current_machine->ram_memdev_id,
|
||||
TYPE_MEMORY_BACKEND, NULL);
|
||||
if (!backend) {
|
||||
error_report("Memory backend '%s' not found",
|
||||
current_machine->ram_memdev_id);
|
||||
exit(EXIT_FAILURE);
|
||||
}
|
||||
backend_size = object_property_get_uint(backend, "size", &error_abort);
|
||||
if (have_custom_ram_size && backend_size != ram_size) {
|
||||
error_report("Size specified by -m option must match size of "
|
||||
@ -4315,10 +4323,6 @@ void qemu_init(int argc, char **argv, char **envp)
|
||||
}
|
||||
}
|
||||
|
||||
current_machine->ram_size = ram_size;
|
||||
current_machine->maxram_size = maxram_size;
|
||||
current_machine->ram_slots = ram_slots;
|
||||
|
||||
parse_numa_opts(current_machine);
|
||||
|
||||
if (machine_class->default_ram_id && current_machine->ram_size &&
|
||||
|
@ -167,6 +167,8 @@ static inline void macvm_set_cr4(hv_vcpuid_t vcpu, uint64_t cr4)
|
||||
|
||||
static inline void macvm_set_rip(CPUState *cpu, uint64_t rip)
|
||||
{
|
||||
X86CPU *x86_cpu = X86_CPU(cpu);
|
||||
CPUX86State *env = &x86_cpu->env;
|
||||
uint64_t val;
|
||||
|
||||
/* BUG, should take considering overlap.. */
|
||||
@ -176,6 +178,7 @@ static inline void macvm_set_rip(CPUState *cpu, uint64_t rip)
|
||||
val = rvmcs(cpu->hvf_fd, VMCS_GUEST_INTERRUPTIBILITY);
|
||||
if (val & (VMCS_INTERRUPTIBILITY_STI_BLOCKING |
|
||||
VMCS_INTERRUPTIBILITY_MOVSS_BLOCKING)) {
|
||||
env->hflags &= ~HF_INHIBIT_IRQ_MASK;
|
||||
wvmcs(cpu->hvf_fd, VMCS_GUEST_INTERRUPTIBILITY,
|
||||
val & ~(VMCS_INTERRUPTIBILITY_STI_BLOCKING |
|
||||
VMCS_INTERRUPTIBILITY_MOVSS_BLOCKING));
|
||||
|
@ -106,6 +106,7 @@ static bool has_msr_arch_capabs;
|
||||
static bool has_msr_core_capabs;
|
||||
static bool has_msr_vmx_vmfunc;
|
||||
static bool has_msr_ucode_rev;
|
||||
static bool has_msr_vmx_procbased_ctls2;
|
||||
|
||||
static uint32_t has_architectural_pmu_version;
|
||||
static uint32_t num_architectural_pmu_gp_counters;
|
||||
@ -490,21 +491,28 @@ uint64_t kvm_arch_get_supported_msr_feature(KVMState *s, uint32_t index)
|
||||
value = msr_data.entries[0].data;
|
||||
switch (index) {
|
||||
case MSR_IA32_VMX_PROCBASED_CTLS2:
|
||||
/* KVM forgot to add these bits for some time, do this ourselves. */
|
||||
if (kvm_arch_get_supported_cpuid(s, 0xD, 1, R_ECX) & CPUID_XSAVE_XSAVES) {
|
||||
value |= (uint64_t)VMX_SECONDARY_EXEC_XSAVES << 32;
|
||||
}
|
||||
if (kvm_arch_get_supported_cpuid(s, 1, 0, R_ECX) & CPUID_EXT_RDRAND) {
|
||||
value |= (uint64_t)VMX_SECONDARY_EXEC_RDRAND_EXITING << 32;
|
||||
}
|
||||
if (kvm_arch_get_supported_cpuid(s, 7, 0, R_EBX) & CPUID_7_0_EBX_INVPCID) {
|
||||
value |= (uint64_t)VMX_SECONDARY_EXEC_ENABLE_INVPCID << 32;
|
||||
}
|
||||
if (kvm_arch_get_supported_cpuid(s, 7, 0, R_EBX) & CPUID_7_0_EBX_RDSEED) {
|
||||
value |= (uint64_t)VMX_SECONDARY_EXEC_RDSEED_EXITING << 32;
|
||||
}
|
||||
if (kvm_arch_get_supported_cpuid(s, 0x80000001, 0, R_EDX) & CPUID_EXT2_RDTSCP) {
|
||||
value |= (uint64_t)VMX_SECONDARY_EXEC_RDTSCP << 32;
|
||||
if (!has_msr_vmx_procbased_ctls2) {
|
||||
/* KVM forgot to add these bits for some time, do this ourselves. */
|
||||
if (kvm_arch_get_supported_cpuid(s, 0xD, 1, R_ECX) &
|
||||
CPUID_XSAVE_XSAVES) {
|
||||
value |= (uint64_t)VMX_SECONDARY_EXEC_XSAVES << 32;
|
||||
}
|
||||
if (kvm_arch_get_supported_cpuid(s, 1, 0, R_ECX) &
|
||||
CPUID_EXT_RDRAND) {
|
||||
value |= (uint64_t)VMX_SECONDARY_EXEC_RDRAND_EXITING << 32;
|
||||
}
|
||||
if (kvm_arch_get_supported_cpuid(s, 7, 0, R_EBX) &
|
||||
CPUID_7_0_EBX_INVPCID) {
|
||||
value |= (uint64_t)VMX_SECONDARY_EXEC_ENABLE_INVPCID << 32;
|
||||
}
|
||||
if (kvm_arch_get_supported_cpuid(s, 7, 0, R_EBX) &
|
||||
CPUID_7_0_EBX_RDSEED) {
|
||||
value |= (uint64_t)VMX_SECONDARY_EXEC_RDSEED_EXITING << 32;
|
||||
}
|
||||
if (kvm_arch_get_supported_cpuid(s, 0x80000001, 0, R_EDX) &
|
||||
CPUID_EXT2_RDTSCP) {
|
||||
value |= (uint64_t)VMX_SECONDARY_EXEC_RDTSCP << 32;
|
||||
}
|
||||
}
|
||||
/* fall through */
|
||||
case MSR_IA32_VMX_TRUE_PINBASED_CTLS:
|
||||
@ -2060,6 +2068,9 @@ static int kvm_get_supported_msrs(KVMState *s)
|
||||
case MSR_IA32_UCODE_REV:
|
||||
has_msr_ucode_rev = true;
|
||||
break;
|
||||
case MSR_IA32_VMX_PROCBASED_CTLS2:
|
||||
has_msr_vmx_procbased_ctls2 = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -158,27 +158,19 @@ buffer_zero_avx2(const void *buf, size_t len)
|
||||
__m256i *p = (__m256i *)(((uintptr_t)buf + 5 * 32) & -32);
|
||||
__m256i *e = (__m256i *)(((uintptr_t)buf + len) & -32);
|
||||
|
||||
if (likely(p <= e)) {
|
||||
/* Loop over 32-byte aligned blocks of 128. */
|
||||
do {
|
||||
__builtin_prefetch(p);
|
||||
if (unlikely(!_mm256_testz_si256(t, t))) {
|
||||
return false;
|
||||
}
|
||||
t = p[-4] | p[-3] | p[-2] | p[-1];
|
||||
p += 4;
|
||||
} while (p <= e);
|
||||
} else {
|
||||
t |= _mm256_loadu_si256(buf + 32);
|
||||
if (len <= 128) {
|
||||
goto last2;
|
||||
/* Loop over 32-byte aligned blocks of 128. */
|
||||
while (p <= e) {
|
||||
__builtin_prefetch(p);
|
||||
if (unlikely(!_mm256_testz_si256(t, t))) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
t = p[-4] | p[-3] | p[-2] | p[-1];
|
||||
p += 4;
|
||||
} ;
|
||||
|
||||
/* Finish the last block of 128 unaligned. */
|
||||
t |= _mm256_loadu_si256(buf + len - 4 * 32);
|
||||
t |= _mm256_loadu_si256(buf + len - 3 * 32);
|
||||
last2:
|
||||
t |= _mm256_loadu_si256(buf + len - 2 * 32);
|
||||
t |= _mm256_loadu_si256(buf + len - 1 * 32);
|
||||
|
||||
@ -254,13 +246,16 @@ static void init_accel(unsigned cache)
|
||||
bool (*fn)(const void *, size_t) = buffer_zero_int;
|
||||
if (cache & CACHE_SSE2) {
|
||||
fn = buffer_zero_sse2;
|
||||
length_to_accel = 64;
|
||||
}
|
||||
#ifdef CONFIG_AVX2_OPT
|
||||
if (cache & CACHE_SSE4) {
|
||||
fn = buffer_zero_sse4;
|
||||
length_to_accel = 64;
|
||||
}
|
||||
if (cache & CACHE_AVX2) {
|
||||
fn = buffer_zero_avx2;
|
||||
length_to_accel = 128;
|
||||
}
|
||||
#endif
|
||||
#ifdef CONFIG_AVX512F_OPT
|
||||
|
Loading…
Reference in New Issue
Block a user