2011-04-01 04:15:20 +00:00
|
|
|
/*
|
|
|
|
* QEMU PowerPC pSeries Logical Partition (aka sPAPR) hardware System Emulator
|
|
|
|
*
|
|
|
|
* Copyright (c) 2004-2007 Fabrice Bellard
|
|
|
|
* Copyright (c) 2007 Jocelyn Mayer
|
|
|
|
* Copyright (c) 2010 David Gibson, IBM Corporation.
|
|
|
|
*
|
|
|
|
* Permission is hereby granted, free of charge, to any person obtaining a copy
|
|
|
|
* of this software and associated documentation files (the "Software"), to deal
|
|
|
|
* in the Software without restriction, including without limitation the rights
|
|
|
|
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
|
|
|
* copies of the Software, and to permit persons to whom the Software is
|
|
|
|
* furnished to do so, subject to the following conditions:
|
|
|
|
*
|
|
|
|
* The above copyright notice and this permission notice shall be included in
|
|
|
|
* all copies or substantial portions of the Software.
|
|
|
|
*
|
|
|
|
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
|
|
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
|
|
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
|
|
|
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
|
|
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
|
|
|
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
|
|
|
* THE SOFTWARE.
|
|
|
|
*
|
|
|
|
*/
|
2016-01-26 18:16:58 +00:00
|
|
|
#include "qemu/osdep.h"
|
include/qemu/osdep.h: Don't include qapi/error.h
Commit 57cb38b included qapi/error.h into qemu/osdep.h to get the
Error typedef. Since then, we've moved to include qemu/osdep.h
everywhere. Its file comment explains: "To avoid getting into
possible circular include dependencies, this file should not include
any other QEMU headers, with the exceptions of config-host.h,
compiler.h, os-posix.h and os-win32.h, all of which are doing a
similar job to this file and are under similar constraints."
qapi/error.h doesn't do a similar job, and it doesn't adhere to
similar constraints: it includes qapi-types.h. That's in excess of
100KiB of crap most .c files don't actually need.
Add the typedef to qemu/typedefs.h, and include that instead of
qapi/error.h. Include qapi/error.h in .c files that need it and don't
get it now. Include qapi-types.h in qom/object.h for uint16List.
Update scripts/clean-includes accordingly. Update it further to match
reality: replace config.h by config-target.h, add sysemu/os-posix.h,
sysemu/os-win32.h. Update the list of includes in the qemu/osdep.h
comment quoted above similarly.
This reduces the number of objects depending on qapi/error.h from "all
of them" to less than a third. Unfortunately, the number depending on
qapi-types.h shrinks only a little. More work is needed for that one.
Signed-off-by: Markus Armbruster <armbru@redhat.com>
[Fix compilation without the spice devel packages. - Paolo]
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
2016-03-14 08:01:28 +00:00
|
|
|
#include "qapi/error.h"
|
2012-12-17 17:20:04 +00:00
|
|
|
#include "sysemu/sysemu.h"
|
2015-02-08 18:51:16 +00:00
|
|
|
#include "sysemu/numa.h"
|
2013-02-04 14:40:22 +00:00
|
|
|
#include "hw/hw.h"
|
2015-12-15 12:16:16 +00:00
|
|
|
#include "qemu/log.h"
|
2014-03-17 02:40:27 +00:00
|
|
|
#include "hw/fw-path-provider.h"
|
2011-04-01 04:15:20 +00:00
|
|
|
#include "elf.h"
|
2012-10-24 06:43:34 +00:00
|
|
|
#include "net/net.h"
|
2015-09-01 01:25:35 +00:00
|
|
|
#include "sysemu/device_tree.h"
|
2014-10-07 11:59:13 +00:00
|
|
|
#include "sysemu/block-backend.h"
|
2012-12-17 17:20:04 +00:00
|
|
|
#include "sysemu/cpus.h"
|
|
|
|
#include "sysemu/kvm.h"
|
2015-09-01 01:22:35 +00:00
|
|
|
#include "sysemu/device_tree.h"
|
2011-09-29 21:39:10 +00:00
|
|
|
#include "kvm_ppc.h"
|
2015-06-12 17:37:52 +00:00
|
|
|
#include "migration/migration.h"
|
2013-07-18 19:33:01 +00:00
|
|
|
#include "mmu-hash64.h"
|
spapr: Implement processor compatibility in ibm, client-architecture-support
Modern Linux kernels support last POWERPC CPUs so when a kernel boots,
in most cases it can find a matching cpu_spec in the kernel's cpu_specs
list. However if the kernel is quite old, it may be missing a definition
of the actual CPU. To provide an ability for old kernels to work on modern
hardware, a Processor Compatibility Mode has been introduced
by the PowerISA specification.
>From the hardware prospective, it is supported by the Processor
Compatibility Register (PCR) which is defined in PowerISA. The register
enables one of the compatibility modes (2.05/2.06/2.07).
Since PCR is a hypervisor privileged register and cannot be
directly accessed from the guest, the mode selection is done via
ibm,client-architecture-support (CAS) RTAS call using which the guest
specifies what "raw" and "architected" CPU versions it supports.
QEMU works out the best match, changes a "cpu-version" property of
every CPU and notifies the guest about the change by setting these
properties in the buffer passed as a response on a custom H_CAS hypercall.
This implements ibm,client-architecture-support parameters parsing
(now only for PVRs) and cooks the device tree diff with new values for
"cpu-version", "ibm,ppc-interrupt-server#s" and
"ibm,ppc-interrupt-server#s" properties.
Signed-off-by: Alexey Kardashevskiy <aik@ozlabs.ru>
Signed-off-by: Alexander Graf <agraf@suse.de>
2014-05-23 02:26:57 +00:00
|
|
|
#include "qom/cpu.h"
|
2011-04-01 04:15:20 +00:00
|
|
|
|
|
|
|
#include "hw/boards.h"
|
2013-02-05 16:06:20 +00:00
|
|
|
#include "hw/ppc/ppc.h"
|
2011-04-01 04:15:20 +00:00
|
|
|
#include "hw/loader.h"
|
|
|
|
|
2016-07-25 14:24:41 +00:00
|
|
|
#include "hw/ppc/fdt.h"
|
2013-02-05 16:06:20 +00:00
|
|
|
#include "hw/ppc/spapr.h"
|
|
|
|
#include "hw/ppc/spapr_vio.h"
|
|
|
|
#include "hw/pci-host/spapr.h"
|
|
|
|
#include "hw/ppc/xics.h"
|
2012-12-12 12:24:50 +00:00
|
|
|
#include "hw/pci/msi.h"
|
2011-04-01 04:15:20 +00:00
|
|
|
|
2013-02-04 14:40:22 +00:00
|
|
|
#include "hw/pci/pci.h"
|
2014-03-17 02:40:27 +00:00
|
|
|
#include "hw/scsi/scsi.h"
|
|
|
|
#include "hw/virtio/virtio-scsi.h"
|
2011-08-09 15:57:37 +00:00
|
|
|
|
2012-12-17 17:19:49 +00:00
|
|
|
#include "exec/address-spaces.h"
|
2012-08-16 02:03:56 +00:00
|
|
|
#include "hw/usb.h"
|
2012-12-17 17:20:00 +00:00
|
|
|
#include "qemu/config-file.h"
|
2013-12-23 15:40:40 +00:00
|
|
|
#include "qemu/error-report.h"
|
2014-05-23 02:26:54 +00:00
|
|
|
#include "trace.h"
|
2014-08-20 12:16:36 +00:00
|
|
|
#include "hw/nmi.h"
|
2011-10-03 10:56:38 +00:00
|
|
|
|
2014-10-14 16:40:06 +00:00
|
|
|
#include "hw/compat.h"
|
2016-03-20 17:16:19 +00:00
|
|
|
#include "qemu/cutils.h"
|
2016-06-10 00:59:03 +00:00
|
|
|
#include "hw/ppc/spapr_cpu_core.h"
|
2016-06-10 00:59:08 +00:00
|
|
|
#include "qmp-commands.h"
|
2014-10-14 16:40:06 +00:00
|
|
|
|
2011-04-01 04:15:20 +00:00
|
|
|
#include <libfdt.h>
|
|
|
|
|
2012-01-11 19:46:28 +00:00
|
|
|
/* SLOF memory layout:
|
|
|
|
*
|
|
|
|
* SLOF raw image loaded at 0, copies its romfs right below the flat
|
|
|
|
* device-tree, then position SLOF itself 31M below that
|
|
|
|
*
|
|
|
|
* So we set FW_OVERHEAD to 40MB which should account for all of that
|
|
|
|
* and more
|
|
|
|
*
|
|
|
|
* We load our kernel at 4M, leaving space for SLOF initial image
|
|
|
|
*/
|
2015-08-06 03:37:24 +00:00
|
|
|
#define FDT_MAX_SIZE 0x100000
|
2011-04-01 04:15:23 +00:00
|
|
|
#define RTAS_MAX_SIZE 0x10000
|
spapr: Locate RTAS and device-tree based on real RMA
We currently calculate the final RTAS and FDT location based on
the early estimate of the RMA size, cropped to 256M on KVM since
we only know the real RMA size at reset time which happens much
later in the boot process.
This means the FDT and RTAS end up right below 256M while they
could be much higher, using precious RMA space and limiting
what the OS bootloader can put there which has proved to be
a problem with some OSes (such as when using very large initrd's)
Fortunately, we do the actual copy of the device-tree into guest
memory much later, during reset, late enough to be able to do it
using the final RMA value, we just need to move the calculation
to the right place.
However, RTAS is still loaded too early, so we change the code to
load the tiny blob into qemu memory early on, and then copy it into
guest memory at reset time. It's small enough that the memory usage
doesn't matter.
Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
[aik: fixed errors from checkpatch.pl, defined RTAS_MAX_ADDR]
Signed-off-by: Alexey Kardashevskiy <aik@ozlabs.ru>
[agraf: fix compilation on 32bit hosts]
Signed-off-by: Alexander Graf <agraf@suse.de>
2014-07-21 03:02:04 +00:00
|
|
|
#define RTAS_MAX_ADDR 0x80000000 /* RTAS must stay below that */
|
Add SLOF-based partition firmware for pSeries machine, allowing more boot options
Currently, the emulated pSeries machine requires the use of the
-kernel parameter in order to explicitly load a guest kernel. This
means booting from the virtual disk, cdrom or network is not possible.
This patch addresses this limitation by inserting a within-partition
firmware image (derived from the "SLOF" free Open Firmware project).
If -kernel is not specified, qemu will now load the SLOF image, which
has access to the qemu boot device list through the device tree, and
can boot from any of the usual virtual devices.
In order to support the new firmware, an extension to the emulated
machine/hypervisor is necessary. Unlike Linux, which expects
multi-CPU entry to be handled kexec() style, the SLOF firmware expects
only one CPU to be active at entry, and to use a hypervisor RTAS
method to enable the other CPUs one by one.
This patch also implements this 'start-cpu' method, so that SLOF can
start the secondary CPUs and marshal them into the kexec() holding
pattern ready for entry into the guest OS. Linux should, and in the
future might directly use the start-cpu method to enable initially
disabled CPUs, but for now it does require kexec() entry.
Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Signed-off-by: Paul Mackerras <paulus@samba.org>
Signed-off-by: David Gibson <dwg@au1.ibm.com>
Signed-off-by: Alexander Graf <agraf@suse.de>
2011-04-01 04:15:34 +00:00
|
|
|
#define FW_MAX_SIZE 0x400000
|
|
|
|
#define FW_FILE_NAME "slof.bin"
|
2012-01-11 19:46:28 +00:00
|
|
|
#define FW_OVERHEAD 0x2800000
|
|
|
|
#define KERNEL_LOAD_ADDR FW_MAX_SIZE
|
Add SLOF-based partition firmware for pSeries machine, allowing more boot options
Currently, the emulated pSeries machine requires the use of the
-kernel parameter in order to explicitly load a guest kernel. This
means booting from the virtual disk, cdrom or network is not possible.
This patch addresses this limitation by inserting a within-partition
firmware image (derived from the "SLOF" free Open Firmware project).
If -kernel is not specified, qemu will now load the SLOF image, which
has access to the qemu boot device list through the device tree, and
can boot from any of the usual virtual devices.
In order to support the new firmware, an extension to the emulated
machine/hypervisor is necessary. Unlike Linux, which expects
multi-CPU entry to be handled kexec() style, the SLOF firmware expects
only one CPU to be active at entry, and to use a hypervisor RTAS
method to enable the other CPUs one by one.
This patch also implements this 'start-cpu' method, so that SLOF can
start the secondary CPUs and marshal them into the kexec() holding
pattern ready for entry into the guest OS. Linux should, and in the
future might directly use the start-cpu method to enable initially
disabled CPUs, but for now it does require kexec() entry.
Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Signed-off-by: Paul Mackerras <paulus@samba.org>
Signed-off-by: David Gibson <dwg@au1.ibm.com>
Signed-off-by: Alexander Graf <agraf@suse.de>
2011-04-01 04:15:34 +00:00
|
|
|
|
2012-01-11 19:46:28 +00:00
|
|
|
#define MIN_RMA_SLOF 128UL
|
2011-04-01 04:15:20 +00:00
|
|
|
|
2011-08-03 21:02:17 +00:00
|
|
|
#define PHANDLE_XICP 0x00001111
|
|
|
|
|
2012-09-12 16:57:12 +00:00
|
|
|
#define HTAB_SIZE(spapr) (1ULL << ((spapr)->htab_shift))
|
|
|
|
|
xics: rename types to be sane and follow coding style
Basically, in HW the layout of the interrupt network is:
- One ICP per processor thread (the "presenter"). This contains the
registers to fetch a pending interrupt (ack), EOI, and control the
processor priority.
- One ICS per logical source of interrupts (ie, one per PCI host
bridge, and a few others here or there). This contains the per-interrupt
source configuration (target processor(s), priority, mask) and the
per-interrupt internal state.
Under PAPR, there is a single "virtual" ICS ... somewhat (it's a bit
oddball what pHyp does here, arguably there are two but we can ignore
that distinction). There is no register level access. A pair of firmware
(RTAS) calls is used to configure each virtual interrupt.
So our model here is somewhat the same. We have one ICS in the emulated
XICS which arguably *is* the emulated XICS, there's no point making it a
separate "device", that would just be gross, and each VCPU has an
associated ICP.
Yet we call the "XICS" struct icp_state and then the ICPs
'struct icp_server_state'. It's particularly confusing when all of the
functions have xics_prefixes yet take *icp arguments.
Rename:
struct icp_state -> XICSState
struct icp_server_state -> ICPState
struct ics_state -> ICSState
struct ics_irq_state -> ICSIRQState
Signed-off-by: David Gibson <david@gibson.dropbear.id.au>
Signed-off-by: Anthony Liguori <aliguori@us.ibm.com>
Signed-off-by: Alexey Kardashevskiy <aik@ozlabs.ru>
Message-id: 1374175984-8930-12-git-send-email-aliguori@us.ibm.com
[aik: added ics_resend() on post_load]
Signed-off-by: Alexey Kardashevskiy <aik@ozlabs.ru>
Signed-off-by: Anthony Liguori <aliguori@us.ibm.com>
2013-07-18 19:33:04 +00:00
|
|
|
static XICSState *try_create_xics(const char *type, int nr_servers,
|
2015-02-05 09:34:48 +00:00
|
|
|
int nr_irqs, Error **errp)
|
xics: rename types to be sane and follow coding style
Basically, in HW the layout of the interrupt network is:
- One ICP per processor thread (the "presenter"). This contains the
registers to fetch a pending interrupt (ack), EOI, and control the
processor priority.
- One ICS per logical source of interrupts (ie, one per PCI host
bridge, and a few others here or there). This contains the per-interrupt
source configuration (target processor(s), priority, mask) and the
per-interrupt internal state.
Under PAPR, there is a single "virtual" ICS ... somewhat (it's a bit
oddball what pHyp does here, arguably there are two but we can ignore
that distinction). There is no register level access. A pair of firmware
(RTAS) calls is used to configure each virtual interrupt.
So our model here is somewhat the same. We have one ICS in the emulated
XICS which arguably *is* the emulated XICS, there's no point making it a
separate "device", that would just be gross, and each VCPU has an
associated ICP.
Yet we call the "XICS" struct icp_state and then the ICPs
'struct icp_server_state'. It's particularly confusing when all of the
functions have xics_prefixes yet take *icp arguments.
Rename:
struct icp_state -> XICSState
struct icp_server_state -> ICPState
struct ics_state -> ICSState
struct ics_irq_state -> ICSIRQState
Signed-off-by: David Gibson <david@gibson.dropbear.id.au>
Signed-off-by: Anthony Liguori <aliguori@us.ibm.com>
Signed-off-by: Alexey Kardashevskiy <aik@ozlabs.ru>
Message-id: 1374175984-8930-12-git-send-email-aliguori@us.ibm.com
[aik: added ics_resend() on post_load]
Signed-off-by: Alexey Kardashevskiy <aik@ozlabs.ru>
Signed-off-by: Anthony Liguori <aliguori@us.ibm.com>
2013-07-18 19:33:04 +00:00
|
|
|
{
|
2015-02-05 09:34:48 +00:00
|
|
|
Error *err = NULL;
|
xics: rename types to be sane and follow coding style
Basically, in HW the layout of the interrupt network is:
- One ICP per processor thread (the "presenter"). This contains the
registers to fetch a pending interrupt (ack), EOI, and control the
processor priority.
- One ICS per logical source of interrupts (ie, one per PCI host
bridge, and a few others here or there). This contains the per-interrupt
source configuration (target processor(s), priority, mask) and the
per-interrupt internal state.
Under PAPR, there is a single "virtual" ICS ... somewhat (it's a bit
oddball what pHyp does here, arguably there are two but we can ignore
that distinction). There is no register level access. A pair of firmware
(RTAS) calls is used to configure each virtual interrupt.
So our model here is somewhat the same. We have one ICS in the emulated
XICS which arguably *is* the emulated XICS, there's no point making it a
separate "device", that would just be gross, and each VCPU has an
associated ICP.
Yet we call the "XICS" struct icp_state and then the ICPs
'struct icp_server_state'. It's particularly confusing when all of the
functions have xics_prefixes yet take *icp arguments.
Rename:
struct icp_state -> XICSState
struct icp_server_state -> ICPState
struct ics_state -> ICSState
struct ics_irq_state -> ICSIRQState
Signed-off-by: David Gibson <david@gibson.dropbear.id.au>
Signed-off-by: Anthony Liguori <aliguori@us.ibm.com>
Signed-off-by: Alexey Kardashevskiy <aik@ozlabs.ru>
Message-id: 1374175984-8930-12-git-send-email-aliguori@us.ibm.com
[aik: added ics_resend() on post_load]
Signed-off-by: Alexey Kardashevskiy <aik@ozlabs.ru>
Signed-off-by: Anthony Liguori <aliguori@us.ibm.com>
2013-07-18 19:33:04 +00:00
|
|
|
DeviceState *dev;
|
|
|
|
|
|
|
|
dev = qdev_create(NULL, type);
|
|
|
|
qdev_prop_set_uint32(dev, "nr_servers", nr_servers);
|
|
|
|
qdev_prop_set_uint32(dev, "nr_irqs", nr_irqs);
|
2015-02-05 09:34:48 +00:00
|
|
|
object_property_set_bool(OBJECT(dev), true, "realized", &err);
|
|
|
|
if (err) {
|
|
|
|
error_propagate(errp, err);
|
|
|
|
object_unparent(OBJECT(dev));
|
xics: rename types to be sane and follow coding style
Basically, in HW the layout of the interrupt network is:
- One ICP per processor thread (the "presenter"). This contains the
registers to fetch a pending interrupt (ack), EOI, and control the
processor priority.
- One ICS per logical source of interrupts (ie, one per PCI host
bridge, and a few others here or there). This contains the per-interrupt
source configuration (target processor(s), priority, mask) and the
per-interrupt internal state.
Under PAPR, there is a single "virtual" ICS ... somewhat (it's a bit
oddball what pHyp does here, arguably there are two but we can ignore
that distinction). There is no register level access. A pair of firmware
(RTAS) calls is used to configure each virtual interrupt.
So our model here is somewhat the same. We have one ICS in the emulated
XICS which arguably *is* the emulated XICS, there's no point making it a
separate "device", that would just be gross, and each VCPU has an
associated ICP.
Yet we call the "XICS" struct icp_state and then the ICPs
'struct icp_server_state'. It's particularly confusing when all of the
functions have xics_prefixes yet take *icp arguments.
Rename:
struct icp_state -> XICSState
struct icp_server_state -> ICPState
struct ics_state -> ICSState
struct ics_irq_state -> ICSIRQState
Signed-off-by: David Gibson <david@gibson.dropbear.id.au>
Signed-off-by: Anthony Liguori <aliguori@us.ibm.com>
Signed-off-by: Alexey Kardashevskiy <aik@ozlabs.ru>
Message-id: 1374175984-8930-12-git-send-email-aliguori@us.ibm.com
[aik: added ics_resend() on post_load]
Signed-off-by: Alexey Kardashevskiy <aik@ozlabs.ru>
Signed-off-by: Anthony Liguori <aliguori@us.ibm.com>
2013-07-18 19:33:04 +00:00
|
|
|
return NULL;
|
|
|
|
}
|
2013-09-26 06:18:42 +00:00
|
|
|
return XICS_COMMON(dev);
|
xics: rename types to be sane and follow coding style
Basically, in HW the layout of the interrupt network is:
- One ICP per processor thread (the "presenter"). This contains the
registers to fetch a pending interrupt (ack), EOI, and control the
processor priority.
- One ICS per logical source of interrupts (ie, one per PCI host
bridge, and a few others here or there). This contains the per-interrupt
source configuration (target processor(s), priority, mask) and the
per-interrupt internal state.
Under PAPR, there is a single "virtual" ICS ... somewhat (it's a bit
oddball what pHyp does here, arguably there are two but we can ignore
that distinction). There is no register level access. A pair of firmware
(RTAS) calls is used to configure each virtual interrupt.
So our model here is somewhat the same. We have one ICS in the emulated
XICS which arguably *is* the emulated XICS, there's no point making it a
separate "device", that would just be gross, and each VCPU has an
associated ICP.
Yet we call the "XICS" struct icp_state and then the ICPs
'struct icp_server_state'. It's particularly confusing when all of the
functions have xics_prefixes yet take *icp arguments.
Rename:
struct icp_state -> XICSState
struct icp_server_state -> ICPState
struct ics_state -> ICSState
struct ics_irq_state -> ICSIRQState
Signed-off-by: David Gibson <david@gibson.dropbear.id.au>
Signed-off-by: Anthony Liguori <aliguori@us.ibm.com>
Signed-off-by: Alexey Kardashevskiy <aik@ozlabs.ru>
Message-id: 1374175984-8930-12-git-send-email-aliguori@us.ibm.com
[aik: added ics_resend() on post_load]
Signed-off-by: Alexey Kardashevskiy <aik@ozlabs.ru>
Signed-off-by: Anthony Liguori <aliguori@us.ibm.com>
2013-07-18 19:33:04 +00:00
|
|
|
}
|
|
|
|
|
2015-03-10 16:59:54 +00:00
|
|
|
static XICSState *xics_system_init(MachineState *machine,
|
2016-01-20 01:58:48 +00:00
|
|
|
int nr_servers, int nr_irqs, Error **errp)
|
xics: rename types to be sane and follow coding style
Basically, in HW the layout of the interrupt network is:
- One ICP per processor thread (the "presenter"). This contains the
registers to fetch a pending interrupt (ack), EOI, and control the
processor priority.
- One ICS per logical source of interrupts (ie, one per PCI host
bridge, and a few others here or there). This contains the per-interrupt
source configuration (target processor(s), priority, mask) and the
per-interrupt internal state.
Under PAPR, there is a single "virtual" ICS ... somewhat (it's a bit
oddball what pHyp does here, arguably there are two but we can ignore
that distinction). There is no register level access. A pair of firmware
(RTAS) calls is used to configure each virtual interrupt.
So our model here is somewhat the same. We have one ICS in the emulated
XICS which arguably *is* the emulated XICS, there's no point making it a
separate "device", that would just be gross, and each VCPU has an
associated ICP.
Yet we call the "XICS" struct icp_state and then the ICPs
'struct icp_server_state'. It's particularly confusing when all of the
functions have xics_prefixes yet take *icp arguments.
Rename:
struct icp_state -> XICSState
struct icp_server_state -> ICPState
struct ics_state -> ICSState
struct ics_irq_state -> ICSIRQState
Signed-off-by: David Gibson <david@gibson.dropbear.id.au>
Signed-off-by: Anthony Liguori <aliguori@us.ibm.com>
Signed-off-by: Alexey Kardashevskiy <aik@ozlabs.ru>
Message-id: 1374175984-8930-12-git-send-email-aliguori@us.ibm.com
[aik: added ics_resend() on post_load]
Signed-off-by: Alexey Kardashevskiy <aik@ozlabs.ru>
Signed-off-by: Anthony Liguori <aliguori@us.ibm.com>
2013-07-18 19:33:04 +00:00
|
|
|
{
|
2016-06-28 19:05:15 +00:00
|
|
|
XICSState *xics = NULL;
|
xics: rename types to be sane and follow coding style
Basically, in HW the layout of the interrupt network is:
- One ICP per processor thread (the "presenter"). This contains the
registers to fetch a pending interrupt (ack), EOI, and control the
processor priority.
- One ICS per logical source of interrupts (ie, one per PCI host
bridge, and a few others here or there). This contains the per-interrupt
source configuration (target processor(s), priority, mask) and the
per-interrupt internal state.
Under PAPR, there is a single "virtual" ICS ... somewhat (it's a bit
oddball what pHyp does here, arguably there are two but we can ignore
that distinction). There is no register level access. A pair of firmware
(RTAS) calls is used to configure each virtual interrupt.
So our model here is somewhat the same. We have one ICS in the emulated
XICS which arguably *is* the emulated XICS, there's no point making it a
separate "device", that would just be gross, and each VCPU has an
associated ICP.
Yet we call the "XICS" struct icp_state and then the ICPs
'struct icp_server_state'. It's particularly confusing when all of the
functions have xics_prefixes yet take *icp arguments.
Rename:
struct icp_state -> XICSState
struct icp_server_state -> ICPState
struct ics_state -> ICSState
struct ics_irq_state -> ICSIRQState
Signed-off-by: David Gibson <david@gibson.dropbear.id.au>
Signed-off-by: Anthony Liguori <aliguori@us.ibm.com>
Signed-off-by: Alexey Kardashevskiy <aik@ozlabs.ru>
Message-id: 1374175984-8930-12-git-send-email-aliguori@us.ibm.com
[aik: added ics_resend() on post_load]
Signed-off-by: Alexey Kardashevskiy <aik@ozlabs.ru>
Signed-off-by: Anthony Liguori <aliguori@us.ibm.com>
2013-07-18 19:33:04 +00:00
|
|
|
|
2013-09-26 06:18:44 +00:00
|
|
|
if (kvm_enabled()) {
|
2015-02-05 09:34:48 +00:00
|
|
|
Error *err = NULL;
|
|
|
|
|
2015-03-10 16:59:54 +00:00
|
|
|
if (machine_kernel_irqchip_allowed(machine)) {
|
2016-06-28 19:05:15 +00:00
|
|
|
xics = try_create_xics(TYPE_XICS_SPAPR_KVM, nr_servers, nr_irqs,
|
|
|
|
&err);
|
2013-09-26 06:18:44 +00:00
|
|
|
}
|
2016-06-28 19:05:15 +00:00
|
|
|
if (machine_kernel_irqchip_required(machine) && !xics) {
|
2015-12-18 15:35:16 +00:00
|
|
|
error_reportf_err(err,
|
|
|
|
"kernel_irqchip requested but unavailable: ");
|
|
|
|
} else {
|
|
|
|
error_free(err);
|
2013-09-26 06:18:44 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-06-28 19:05:15 +00:00
|
|
|
if (!xics) {
|
|
|
|
xics = try_create_xics(TYPE_XICS_SPAPR, nr_servers, nr_irqs, errp);
|
xics: rename types to be sane and follow coding style
Basically, in HW the layout of the interrupt network is:
- One ICP per processor thread (the "presenter"). This contains the
registers to fetch a pending interrupt (ack), EOI, and control the
processor priority.
- One ICS per logical source of interrupts (ie, one per PCI host
bridge, and a few others here or there). This contains the per-interrupt
source configuration (target processor(s), priority, mask) and the
per-interrupt internal state.
Under PAPR, there is a single "virtual" ICS ... somewhat (it's a bit
oddball what pHyp does here, arguably there are two but we can ignore
that distinction). There is no register level access. A pair of firmware
(RTAS) calls is used to configure each virtual interrupt.
So our model here is somewhat the same. We have one ICS in the emulated
XICS which arguably *is* the emulated XICS, there's no point making it a
separate "device", that would just be gross, and each VCPU has an
associated ICP.
Yet we call the "XICS" struct icp_state and then the ICPs
'struct icp_server_state'. It's particularly confusing when all of the
functions have xics_prefixes yet take *icp arguments.
Rename:
struct icp_state -> XICSState
struct icp_server_state -> ICPState
struct ics_state -> ICSState
struct ics_irq_state -> ICSIRQState
Signed-off-by: David Gibson <david@gibson.dropbear.id.au>
Signed-off-by: Anthony Liguori <aliguori@us.ibm.com>
Signed-off-by: Alexey Kardashevskiy <aik@ozlabs.ru>
Message-id: 1374175984-8930-12-git-send-email-aliguori@us.ibm.com
[aik: added ics_resend() on post_load]
Signed-off-by: Alexey Kardashevskiy <aik@ozlabs.ru>
Signed-off-by: Anthony Liguori <aliguori@us.ibm.com>
2013-07-18 19:33:04 +00:00
|
|
|
}
|
|
|
|
|
2016-06-28 19:05:15 +00:00
|
|
|
return xics;
|
xics: rename types to be sane and follow coding style
Basically, in HW the layout of the interrupt network is:
- One ICP per processor thread (the "presenter"). This contains the
registers to fetch a pending interrupt (ack), EOI, and control the
processor priority.
- One ICS per logical source of interrupts (ie, one per PCI host
bridge, and a few others here or there). This contains the per-interrupt
source configuration (target processor(s), priority, mask) and the
per-interrupt internal state.
Under PAPR, there is a single "virtual" ICS ... somewhat (it's a bit
oddball what pHyp does here, arguably there are two but we can ignore
that distinction). There is no register level access. A pair of firmware
(RTAS) calls is used to configure each virtual interrupt.
So our model here is somewhat the same. We have one ICS in the emulated
XICS which arguably *is* the emulated XICS, there's no point making it a
separate "device", that would just be gross, and each VCPU has an
associated ICP.
Yet we call the "XICS" struct icp_state and then the ICPs
'struct icp_server_state'. It's particularly confusing when all of the
functions have xics_prefixes yet take *icp arguments.
Rename:
struct icp_state -> XICSState
struct icp_server_state -> ICPState
struct ics_state -> ICSState
struct ics_irq_state -> ICSIRQState
Signed-off-by: David Gibson <david@gibson.dropbear.id.au>
Signed-off-by: Anthony Liguori <aliguori@us.ibm.com>
Signed-off-by: Alexey Kardashevskiy <aik@ozlabs.ru>
Message-id: 1374175984-8930-12-git-send-email-aliguori@us.ibm.com
[aik: added ics_resend() on post_load]
Signed-off-by: Alexey Kardashevskiy <aik@ozlabs.ru>
Signed-off-by: Anthony Liguori <aliguori@us.ibm.com>
2013-07-18 19:33:04 +00:00
|
|
|
}
|
|
|
|
|
2014-05-23 02:26:51 +00:00
|
|
|
static int spapr_fixup_cpu_smt_dt(void *fdt, int offset, PowerPCCPU *cpu,
|
|
|
|
int smt_threads)
|
|
|
|
{
|
|
|
|
int i, ret = 0;
|
|
|
|
uint32_t servers_prop[smt_threads];
|
|
|
|
uint32_t gservers_prop[smt_threads * 2];
|
|
|
|
int index = ppc_get_vcpu_dt_id(cpu);
|
|
|
|
|
2014-05-23 02:26:52 +00:00
|
|
|
if (cpu->cpu_version) {
|
2014-06-27 13:47:37 +00:00
|
|
|
ret = fdt_setprop_cell(fdt, offset, "cpu-version", cpu->cpu_version);
|
2014-05-23 02:26:52 +00:00
|
|
|
if (ret < 0) {
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-05-23 02:26:51 +00:00
|
|
|
/* Build interrupt servers and gservers properties */
|
|
|
|
for (i = 0; i < smt_threads; i++) {
|
|
|
|
servers_prop[i] = cpu_to_be32(index + i);
|
|
|
|
/* Hack, direct the group queues back to cpu 0 */
|
|
|
|
gservers_prop[i*2] = cpu_to_be32(index + i);
|
|
|
|
gservers_prop[i*2 + 1] = 0;
|
|
|
|
}
|
|
|
|
ret = fdt_setprop(fdt, offset, "ibm,ppc-interrupt-server#s",
|
|
|
|
servers_prop, sizeof(servers_prop));
|
|
|
|
if (ret < 0) {
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
ret = fdt_setprop(fdt, offset, "ibm,ppc-interrupt-gserver#s",
|
|
|
|
gservers_prop, sizeof(gservers_prop));
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2015-07-02 06:23:17 +00:00
|
|
|
static int spapr_fixup_cpu_numa_dt(void *fdt, int offset, CPUState *cs)
|
|
|
|
{
|
|
|
|
int ret = 0;
|
|
|
|
PowerPCCPU *cpu = POWERPC_CPU(cs);
|
|
|
|
int index = ppc_get_vcpu_dt_id(cpu);
|
|
|
|
uint32_t associativity[] = {cpu_to_be32(0x5),
|
|
|
|
cpu_to_be32(0x0),
|
|
|
|
cpu_to_be32(0x0),
|
|
|
|
cpu_to_be32(0x0),
|
|
|
|
cpu_to_be32(cs->numa_node),
|
|
|
|
cpu_to_be32(index)};
|
|
|
|
|
|
|
|
/* Advertise NUMA via ibm,associativity */
|
|
|
|
if (nb_numa_nodes > 1) {
|
|
|
|
ret = fdt_setprop(fdt, offset, "ibm,associativity", associativity,
|
|
|
|
sizeof(associativity));
|
|
|
|
}
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2015-07-02 06:23:04 +00:00
|
|
|
static int spapr_fixup_cpu_dt(void *fdt, sPAPRMachineState *spapr)
|
2011-12-12 18:24:30 +00:00
|
|
|
{
|
2014-05-23 02:26:55 +00:00
|
|
|
int ret = 0, offset, cpus_offset;
|
|
|
|
CPUState *cs;
|
2011-12-12 18:24:30 +00:00
|
|
|
char cpu_model[32];
|
|
|
|
int smt = kvmppc_smt_threads();
|
2012-09-12 16:57:12 +00:00
|
|
|
uint32_t pft_size_prop[] = {0, cpu_to_be32(spapr->htab_shift)};
|
2011-12-12 18:24:30 +00:00
|
|
|
|
2014-05-23 02:26:55 +00:00
|
|
|
CPU_FOREACH(cs) {
|
|
|
|
PowerPCCPU *cpu = POWERPC_CPU(cs);
|
|
|
|
DeviceClass *dc = DEVICE_GET_CLASS(cs);
|
|
|
|
int index = ppc_get_vcpu_dt_id(cpu);
|
2011-12-12 18:24:30 +00:00
|
|
|
|
2014-02-01 14:45:52 +00:00
|
|
|
if ((index % smt) != 0) {
|
2011-12-12 18:24:30 +00:00
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
2014-05-23 02:26:55 +00:00
|
|
|
snprintf(cpu_model, 32, "%s@%x", dc->fw_name, index);
|
2011-12-12 18:24:30 +00:00
|
|
|
|
2014-05-23 02:26:55 +00:00
|
|
|
cpus_offset = fdt_path_offset(fdt, "/cpus");
|
|
|
|
if (cpus_offset < 0) {
|
|
|
|
cpus_offset = fdt_add_subnode(fdt, fdt_path_offset(fdt, "/"),
|
|
|
|
"cpus");
|
|
|
|
if (cpus_offset < 0) {
|
|
|
|
return cpus_offset;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
offset = fdt_subnode_offset(fdt, cpus_offset, cpu_model);
|
2011-12-12 18:24:30 +00:00
|
|
|
if (offset < 0) {
|
2014-05-23 02:26:55 +00:00
|
|
|
offset = fdt_add_subnode(fdt, cpus_offset, cpu_model);
|
|
|
|
if (offset < 0) {
|
|
|
|
return offset;
|
|
|
|
}
|
2011-12-12 18:24:30 +00:00
|
|
|
}
|
|
|
|
|
2012-09-12 16:57:12 +00:00
|
|
|
ret = fdt_setprop(fdt, offset, "ibm,pft-size",
|
|
|
|
pft_size_prop, sizeof(pft_size_prop));
|
2011-12-12 18:24:30 +00:00
|
|
|
if (ret < 0) {
|
|
|
|
return ret;
|
|
|
|
}
|
2014-05-23 02:26:51 +00:00
|
|
|
|
2015-07-02 06:23:17 +00:00
|
|
|
ret = spapr_fixup_cpu_numa_dt(fdt, offset, cs);
|
|
|
|
if (ret < 0) {
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2014-05-23 02:26:55 +00:00
|
|
|
ret = spapr_fixup_cpu_smt_dt(fdt, offset, cpu,
|
2014-05-23 02:26:56 +00:00
|
|
|
ppc_get_compat_smt_threads(cpu));
|
2014-05-23 02:26:51 +00:00
|
|
|
if (ret < 0) {
|
|
|
|
return ret;
|
|
|
|
}
|
2011-12-12 18:24:30 +00:00
|
|
|
}
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2014-07-03 03:10:06 +00:00
|
|
|
static hwaddr spapr_node0_size(void)
|
|
|
|
{
|
2015-07-02 06:23:05 +00:00
|
|
|
MachineState *machine = MACHINE(qdev_get_machine());
|
|
|
|
|
2014-07-03 03:10:06 +00:00
|
|
|
if (nb_numa_nodes) {
|
|
|
|
int i;
|
|
|
|
for (i = 0; i < nb_numa_nodes; ++i) {
|
|
|
|
if (numa_info[i].node_mem) {
|
2015-07-02 06:23:05 +00:00
|
|
|
return MIN(pow2floor(numa_info[i].node_mem),
|
|
|
|
machine->ram_size);
|
2014-07-03 03:10:06 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2015-07-02 06:23:05 +00:00
|
|
|
return machine->ram_size;
|
2014-07-03 03:10:06 +00:00
|
|
|
}
|
|
|
|
|
2014-05-27 05:36:29 +00:00
|
|
|
static void add_str(GString *s, const gchar *s1)
|
|
|
|
{
|
|
|
|
g_string_append_len(s, s1, strlen(s1) + 1);
|
|
|
|
}
|
2012-09-12 16:57:12 +00:00
|
|
|
|
2013-10-15 16:33:37 +00:00
|
|
|
static void *spapr_create_fdt_skel(hwaddr initrd_base,
|
2012-10-23 10:30:10 +00:00
|
|
|
hwaddr initrd_size,
|
|
|
|
hwaddr kernel_size,
|
2013-09-25 07:40:15 +00:00
|
|
|
bool little_endian,
|
2012-10-08 18:17:39 +00:00
|
|
|
const char *kernel_cmdline,
|
|
|
|
uint32_t epow_irq)
|
2011-04-01 04:15:20 +00:00
|
|
|
{
|
|
|
|
void *fdt;
|
|
|
|
uint32_t start_prop = cpu_to_be32(initrd_base);
|
|
|
|
uint32_t end_prop = cpu_to_be32(initrd_base + initrd_size);
|
2014-05-27 05:36:29 +00:00
|
|
|
GString *hypertas = g_string_sized_new(256);
|
|
|
|
GString *qemu_hypertas = g_string_sized_new(256);
|
2012-09-12 16:57:12 +00:00
|
|
|
uint32_t refpoints[] = {cpu_to_be32(0x4), cpu_to_be32(0x4)};
|
2015-07-02 06:23:14 +00:00
|
|
|
uint32_t interrupt_server_ranges_prop[] = {0, cpu_to_be32(max_cpus)};
|
2011-12-12 18:24:30 +00:00
|
|
|
unsigned char vec5[] = {0x0, 0x0, 0x0, 0x0, 0x0, 0x80};
|
2014-07-09 10:38:37 +00:00
|
|
|
char *buf;
|
2011-04-01 04:15:20 +00:00
|
|
|
|
2014-05-27 05:36:29 +00:00
|
|
|
add_str(hypertas, "hcall-pft");
|
|
|
|
add_str(hypertas, "hcall-term");
|
|
|
|
add_str(hypertas, "hcall-dabr");
|
|
|
|
add_str(hypertas, "hcall-interrupt");
|
|
|
|
add_str(hypertas, "hcall-tce");
|
|
|
|
add_str(hypertas, "hcall-vio");
|
|
|
|
add_str(hypertas, "hcall-splpar");
|
|
|
|
add_str(hypertas, "hcall-bulk");
|
|
|
|
add_str(hypertas, "hcall-set-mode");
|
2016-06-27 11:25:03 +00:00
|
|
|
add_str(hypertas, "hcall-sprg0");
|
|
|
|
add_str(hypertas, "hcall-copy");
|
|
|
|
add_str(hypertas, "hcall-debug");
|
2014-05-27 05:36:29 +00:00
|
|
|
add_str(qemu_hypertas, "hcall-memop1");
|
|
|
|
|
2011-08-21 03:09:37 +00:00
|
|
|
fdt = g_malloc0(FDT_MAX_SIZE);
|
2011-04-01 04:15:20 +00:00
|
|
|
_FDT((fdt_create(fdt, FDT_MAX_SIZE)));
|
|
|
|
|
2012-01-11 19:46:28 +00:00
|
|
|
if (kernel_size) {
|
|
|
|
_FDT((fdt_add_reservemap_entry(fdt, KERNEL_LOAD_ADDR, kernel_size)));
|
|
|
|
}
|
|
|
|
if (initrd_size) {
|
|
|
|
_FDT((fdt_add_reservemap_entry(fdt, initrd_base, initrd_size)));
|
|
|
|
}
|
2011-04-01 04:15:20 +00:00
|
|
|
_FDT((fdt_finish_reservemap(fdt)));
|
|
|
|
|
|
|
|
/* Root node */
|
|
|
|
_FDT((fdt_begin_node(fdt, "")));
|
|
|
|
_FDT((fdt_property_string(fdt, "device_type", "chrp")));
|
2011-04-19 01:54:51 +00:00
|
|
|
_FDT((fdt_property_string(fdt, "model", "IBM pSeries (emulated by qemu)")));
|
2013-02-25 19:27:12 +00:00
|
|
|
_FDT((fdt_property_string(fdt, "compatible", "qemu,pseries")));
|
2011-04-01 04:15:20 +00:00
|
|
|
|
2014-07-09 10:38:37 +00:00
|
|
|
/*
|
|
|
|
* Add info to guest to indentify which host is it being run on
|
|
|
|
* and what is the uuid of the guest
|
|
|
|
*/
|
|
|
|
if (kvmppc_get_host_model(&buf)) {
|
|
|
|
_FDT((fdt_property_string(fdt, "host-model", buf)));
|
|
|
|
g_free(buf);
|
|
|
|
}
|
|
|
|
if (kvmppc_get_host_serial(&buf)) {
|
|
|
|
_FDT((fdt_property_string(fdt, "host-serial", buf)));
|
|
|
|
g_free(buf);
|
|
|
|
}
|
|
|
|
|
|
|
|
buf = g_strdup_printf(UUID_FMT, qemu_uuid[0], qemu_uuid[1],
|
|
|
|
qemu_uuid[2], qemu_uuid[3], qemu_uuid[4],
|
|
|
|
qemu_uuid[5], qemu_uuid[6], qemu_uuid[7],
|
|
|
|
qemu_uuid[8], qemu_uuid[9], qemu_uuid[10],
|
|
|
|
qemu_uuid[11], qemu_uuid[12], qemu_uuid[13],
|
|
|
|
qemu_uuid[14], qemu_uuid[15]);
|
|
|
|
|
|
|
|
_FDT((fdt_property_string(fdt, "vm,uuid", buf)));
|
2015-11-09 06:47:17 +00:00
|
|
|
if (qemu_uuid_set) {
|
|
|
|
_FDT((fdt_property_string(fdt, "system-id", buf)));
|
|
|
|
}
|
2014-07-09 10:38:37 +00:00
|
|
|
g_free(buf);
|
|
|
|
|
2015-09-01 01:23:19 +00:00
|
|
|
if (qemu_get_vm_name()) {
|
|
|
|
_FDT((fdt_property_string(fdt, "ibm,partition-name",
|
|
|
|
qemu_get_vm_name())));
|
|
|
|
}
|
|
|
|
|
2011-04-01 04:15:20 +00:00
|
|
|
_FDT((fdt_property_cell(fdt, "#address-cells", 0x2)));
|
|
|
|
_FDT((fdt_property_cell(fdt, "#size-cells", 0x2)));
|
|
|
|
|
|
|
|
/* /chosen */
|
|
|
|
_FDT((fdt_begin_node(fdt, "chosen")));
|
|
|
|
|
2011-12-12 18:24:30 +00:00
|
|
|
/* Set Form1_affinity */
|
|
|
|
_FDT((fdt_property(fdt, "ibm,architecture-vec-5", vec5, sizeof(vec5))));
|
|
|
|
|
2011-04-01 04:15:20 +00:00
|
|
|
_FDT((fdt_property_string(fdt, "bootargs", kernel_cmdline)));
|
|
|
|
_FDT((fdt_property(fdt, "linux,initrd-start",
|
|
|
|
&start_prop, sizeof(start_prop))));
|
|
|
|
_FDT((fdt_property(fdt, "linux,initrd-end",
|
|
|
|
&end_prop, sizeof(end_prop))));
|
2012-01-11 19:46:28 +00:00
|
|
|
if (kernel_size) {
|
|
|
|
uint64_t kprop[2] = { cpu_to_be64(KERNEL_LOAD_ADDR),
|
|
|
|
cpu_to_be64(kernel_size) };
|
2011-04-01 04:15:20 +00:00
|
|
|
|
2012-01-11 19:46:28 +00:00
|
|
|
_FDT((fdt_property(fdt, "qemu,boot-kernel", &kprop, sizeof(kprop))));
|
2013-09-25 07:40:15 +00:00
|
|
|
if (little_endian) {
|
|
|
|
_FDT((fdt_property(fdt, "qemu,boot-kernel-le", NULL, 0)));
|
|
|
|
}
|
2012-01-11 19:46:28 +00:00
|
|
|
}
|
2014-06-10 07:56:44 +00:00
|
|
|
if (boot_menu) {
|
|
|
|
_FDT((fdt_property_cell(fdt, "qemu,boot-menu", boot_menu)));
|
|
|
|
}
|
2012-08-06 16:42:00 +00:00
|
|
|
_FDT((fdt_property_cell(fdt, "qemu,graphic-width", graphic_width)));
|
|
|
|
_FDT((fdt_property_cell(fdt, "qemu,graphic-height", graphic_height)));
|
|
|
|
_FDT((fdt_property_cell(fdt, "qemu,graphic-depth", graphic_depth)));
|
2011-10-30 17:16:46 +00:00
|
|
|
|
2011-04-01 04:15:20 +00:00
|
|
|
_FDT((fdt_end_node(fdt)));
|
|
|
|
|
2011-04-01 04:15:22 +00:00
|
|
|
/* RTAS */
|
|
|
|
_FDT((fdt_begin_node(fdt, "rtas")));
|
|
|
|
|
2014-05-27 05:36:30 +00:00
|
|
|
if (!kvm_enabled() || kvmppc_spapr_use_multitce()) {
|
|
|
|
add_str(hypertas, "hcall-multi-tce");
|
|
|
|
}
|
2014-05-27 05:36:29 +00:00
|
|
|
_FDT((fdt_property(fdt, "ibm,hypertas-functions", hypertas->str,
|
|
|
|
hypertas->len)));
|
|
|
|
g_string_free(hypertas, TRUE);
|
|
|
|
_FDT((fdt_property(fdt, "qemu,hypertas-functions", qemu_hypertas->str,
|
|
|
|
qemu_hypertas->len)));
|
|
|
|
g_string_free(qemu_hypertas, TRUE);
|
2011-04-01 04:15:22 +00:00
|
|
|
|
2011-12-12 18:24:30 +00:00
|
|
|
_FDT((fdt_property(fdt, "ibm,associativity-reference-points",
|
|
|
|
refpoints, sizeof(refpoints))));
|
|
|
|
|
2012-10-08 18:17:39 +00:00
|
|
|
_FDT((fdt_property_cell(fdt, "rtas-error-log-max", RTAS_ERROR_LOG_MAX)));
|
2015-05-07 05:33:50 +00:00
|
|
|
_FDT((fdt_property_cell(fdt, "rtas-event-scan-rate",
|
|
|
|
RTAS_EVENT_SCAN_RATE)));
|
2012-10-08 18:17:39 +00:00
|
|
|
|
2016-03-04 09:24:28 +00:00
|
|
|
if (msi_nonbroken) {
|
2015-09-01 01:23:34 +00:00
|
|
|
_FDT((fdt_property(fdt, "ibm,change-msix-capable", NULL, 0)));
|
|
|
|
}
|
|
|
|
|
2014-06-30 08:35:29 +00:00
|
|
|
/*
|
2014-09-10 11:29:07 +00:00
|
|
|
* According to PAPR, rtas ibm,os-term does not guarantee a return
|
2014-06-30 08:35:29 +00:00
|
|
|
* back to the guest cpu.
|
|
|
|
*
|
|
|
|
* While an additional ibm,extended-os-term property indicates that
|
|
|
|
* rtas call return will always occur. Set this property.
|
|
|
|
*/
|
|
|
|
_FDT((fdt_property(fdt, "ibm,extended-os-term", NULL, 0)));
|
|
|
|
|
2011-04-01 04:15:22 +00:00
|
|
|
_FDT((fdt_end_node(fdt)));
|
|
|
|
|
Implement the PAPR (pSeries) virtualized interrupt controller (xics)
PAPR defines an interrupt control architecture which is logically divided
into ICS (Interrupt Control Presentation, each unit is responsible for
presenting interrupts to a particular "interrupt server", i.e. CPU) and
ICS (Interrupt Control Source, each unit responsible for one or more
hardware interrupts as numbered globally across the system). All PAPR
virtual IO devices expect to deliver interrupts via this mechanism. In
Linux, this interrupt controller system is handled by the "xics" driver.
On pSeries systems, access to the interrupt controller is virtualized via
hypercalls and RTAS methods. However, the virtualized interface is very
similar to the underlying interrupt controller hardware, and similar PICs
exist un-virtualized in some other systems.
This patch implements both the ICP and ICS sides of the PAPR interrupt
controller. For now, only the hypercall virtualized interface is provided,
however it would be relatively straightforward to graft an emulated
register interface onto the underlying interrupt logic if we want to add
a machine with a hardware ICS/ICP system in the future.
There are some limitations in this implementation: it is assumed for now
that only one instance of the ICS exists, although a full xics system can
have several, each responsible for a different group of hardware irqs.
ICP/ICS can handle both level-sensitve (LSI) and message signalled (MSI)
interrupt inputs. For now, this implementation supports only MSI
interrupts, since that is used by PAPR virtual IO devices.
Signed-off-by: Paul Mackerras <paulus@samba.org>
Signed-off-by: David Gibson <dwg@au1.ibm.com>
Signed-off-by: Alexander Graf <agraf@suse.de>
2011-04-01 04:15:25 +00:00
|
|
|
/* interrupt controller */
|
2011-08-03 21:02:18 +00:00
|
|
|
_FDT((fdt_begin_node(fdt, "interrupt-controller")));
|
Implement the PAPR (pSeries) virtualized interrupt controller (xics)
PAPR defines an interrupt control architecture which is logically divided
into ICS (Interrupt Control Presentation, each unit is responsible for
presenting interrupts to a particular "interrupt server", i.e. CPU) and
ICS (Interrupt Control Source, each unit responsible for one or more
hardware interrupts as numbered globally across the system). All PAPR
virtual IO devices expect to deliver interrupts via this mechanism. In
Linux, this interrupt controller system is handled by the "xics" driver.
On pSeries systems, access to the interrupt controller is virtualized via
hypercalls and RTAS methods. However, the virtualized interface is very
similar to the underlying interrupt controller hardware, and similar PICs
exist un-virtualized in some other systems.
This patch implements both the ICP and ICS sides of the PAPR interrupt
controller. For now, only the hypercall virtualized interface is provided,
however it would be relatively straightforward to graft an emulated
register interface onto the underlying interrupt logic if we want to add
a machine with a hardware ICS/ICP system in the future.
There are some limitations in this implementation: it is assumed for now
that only one instance of the ICS exists, although a full xics system can
have several, each responsible for a different group of hardware irqs.
ICP/ICS can handle both level-sensitve (LSI) and message signalled (MSI)
interrupt inputs. For now, this implementation supports only MSI
interrupts, since that is used by PAPR virtual IO devices.
Signed-off-by: Paul Mackerras <paulus@samba.org>
Signed-off-by: David Gibson <dwg@au1.ibm.com>
Signed-off-by: Alexander Graf <agraf@suse.de>
2011-04-01 04:15:25 +00:00
|
|
|
|
|
|
|
_FDT((fdt_property_string(fdt, "device_type",
|
|
|
|
"PowerPC-External-Interrupt-Presentation")));
|
|
|
|
_FDT((fdt_property_string(fdt, "compatible", "IBM,ppc-xicp")));
|
|
|
|
_FDT((fdt_property(fdt, "interrupt-controller", NULL, 0)));
|
|
|
|
_FDT((fdt_property(fdt, "ibm,interrupt-server-ranges",
|
|
|
|
interrupt_server_ranges_prop,
|
|
|
|
sizeof(interrupt_server_ranges_prop))));
|
2011-08-03 21:02:17 +00:00
|
|
|
_FDT((fdt_property_cell(fdt, "#interrupt-cells", 2)));
|
|
|
|
_FDT((fdt_property_cell(fdt, "linux,phandle", PHANDLE_XICP)));
|
|
|
|
_FDT((fdt_property_cell(fdt, "phandle", PHANDLE_XICP)));
|
Implement the PAPR (pSeries) virtualized interrupt controller (xics)
PAPR defines an interrupt control architecture which is logically divided
into ICS (Interrupt Control Presentation, each unit is responsible for
presenting interrupts to a particular "interrupt server", i.e. CPU) and
ICS (Interrupt Control Source, each unit responsible for one or more
hardware interrupts as numbered globally across the system). All PAPR
virtual IO devices expect to deliver interrupts via this mechanism. In
Linux, this interrupt controller system is handled by the "xics" driver.
On pSeries systems, access to the interrupt controller is virtualized via
hypercalls and RTAS methods. However, the virtualized interface is very
similar to the underlying interrupt controller hardware, and similar PICs
exist un-virtualized in some other systems.
This patch implements both the ICP and ICS sides of the PAPR interrupt
controller. For now, only the hypercall virtualized interface is provided,
however it would be relatively straightforward to graft an emulated
register interface onto the underlying interrupt logic if we want to add
a machine with a hardware ICS/ICP system in the future.
There are some limitations in this implementation: it is assumed for now
that only one instance of the ICS exists, although a full xics system can
have several, each responsible for a different group of hardware irqs.
ICP/ICS can handle both level-sensitve (LSI) and message signalled (MSI)
interrupt inputs. For now, this implementation supports only MSI
interrupts, since that is used by PAPR virtual IO devices.
Signed-off-by: Paul Mackerras <paulus@samba.org>
Signed-off-by: David Gibson <dwg@au1.ibm.com>
Signed-off-by: Alexander Graf <agraf@suse.de>
2011-04-01 04:15:25 +00:00
|
|
|
|
|
|
|
_FDT((fdt_end_node(fdt)));
|
|
|
|
|
2011-04-01 04:15:21 +00:00
|
|
|
/* vdevice */
|
|
|
|
_FDT((fdt_begin_node(fdt, "vdevice")));
|
|
|
|
|
|
|
|
_FDT((fdt_property_string(fdt, "device_type", "vdevice")));
|
|
|
|
_FDT((fdt_property_string(fdt, "compatible", "IBM,vdevice")));
|
|
|
|
_FDT((fdt_property_cell(fdt, "#address-cells", 0x1)));
|
|
|
|
_FDT((fdt_property_cell(fdt, "#size-cells", 0x0)));
|
Implement the PAPR (pSeries) virtualized interrupt controller (xics)
PAPR defines an interrupt control architecture which is logically divided
into ICS (Interrupt Control Presentation, each unit is responsible for
presenting interrupts to a particular "interrupt server", i.e. CPU) and
ICS (Interrupt Control Source, each unit responsible for one or more
hardware interrupts as numbered globally across the system). All PAPR
virtual IO devices expect to deliver interrupts via this mechanism. In
Linux, this interrupt controller system is handled by the "xics" driver.
On pSeries systems, access to the interrupt controller is virtualized via
hypercalls and RTAS methods. However, the virtualized interface is very
similar to the underlying interrupt controller hardware, and similar PICs
exist un-virtualized in some other systems.
This patch implements both the ICP and ICS sides of the PAPR interrupt
controller. For now, only the hypercall virtualized interface is provided,
however it would be relatively straightforward to graft an emulated
register interface onto the underlying interrupt logic if we want to add
a machine with a hardware ICS/ICP system in the future.
There are some limitations in this implementation: it is assumed for now
that only one instance of the ICS exists, although a full xics system can
have several, each responsible for a different group of hardware irqs.
ICP/ICS can handle both level-sensitve (LSI) and message signalled (MSI)
interrupt inputs. For now, this implementation supports only MSI
interrupts, since that is used by PAPR virtual IO devices.
Signed-off-by: Paul Mackerras <paulus@samba.org>
Signed-off-by: David Gibson <dwg@au1.ibm.com>
Signed-off-by: Alexander Graf <agraf@suse.de>
2011-04-01 04:15:25 +00:00
|
|
|
_FDT((fdt_property_cell(fdt, "#interrupt-cells", 0x2)));
|
|
|
|
_FDT((fdt_property(fdt, "interrupt-controller", NULL, 0)));
|
2011-04-01 04:15:21 +00:00
|
|
|
|
|
|
|
_FDT((fdt_end_node(fdt)));
|
|
|
|
|
2012-10-08 18:17:39 +00:00
|
|
|
/* event-sources */
|
|
|
|
spapr_events_fdt_skel(fdt, epow_irq);
|
|
|
|
|
2014-04-24 12:57:04 +00:00
|
|
|
/* /hypervisor node */
|
|
|
|
if (kvm_enabled()) {
|
|
|
|
uint8_t hypercall[16];
|
|
|
|
|
|
|
|
/* indicate KVM hypercall interface */
|
|
|
|
_FDT((fdt_begin_node(fdt, "hypervisor")));
|
|
|
|
_FDT((fdt_property_string(fdt, "compatible", "linux,kvm")));
|
|
|
|
if (kvmppc_has_cap_fixup_hcalls()) {
|
|
|
|
/*
|
|
|
|
* Older KVM versions with older guest kernels were broken with the
|
|
|
|
* magic page, don't allow the guest to map it.
|
|
|
|
*/
|
2016-03-21 02:14:02 +00:00
|
|
|
if (!kvmppc_get_hypercall(first_cpu->env_ptr, hypercall,
|
|
|
|
sizeof(hypercall))) {
|
|
|
|
_FDT((fdt_property(fdt, "hcall-instructions", hypercall,
|
|
|
|
sizeof(hypercall))));
|
|
|
|
}
|
2014-04-24 12:57:04 +00:00
|
|
|
}
|
|
|
|
_FDT((fdt_end_node(fdt)));
|
|
|
|
}
|
|
|
|
|
2011-04-01 04:15:20 +00:00
|
|
|
_FDT((fdt_end_node(fdt))); /* close root node */
|
|
|
|
_FDT((fdt_finish(fdt)));
|
|
|
|
|
Delay creation of pseries device tree until reset
At present, the 'pseries' machine creates a flattened device tree in the
machine->init function to pass to either the guest kernel or to firmware.
However, the machine->init function runs before processing of -device
command line options, which means that the device tree so created will
be (incorrectly) missing devices specified that way.
Supplying a correct device tree is, in any case, part of the required
platform entry conditions. Therefore, this patch moves the creation and
loading of the device tree from machine->init to a reset callback. The
setup of entry point address and initial register state moves with it,
which leads to a slight cleanup.
This is not, alas, quite enough to make a fully working reset for pseries.
For that we would need to reload the firmware images, which on this
machine are loaded into RAM. It's a step in the right direction, though.
Signed-off-by: David Gibson <dwg@au1.ibm.com>
Signed-off-by: Alexander Graf <agraf@suse.de>
2011-04-05 05:12:10 +00:00
|
|
|
return fdt;
|
|
|
|
}
|
|
|
|
|
2015-07-13 00:34:00 +00:00
|
|
|
static int spapr_populate_memory_node(void *fdt, int nodeid, hwaddr start,
|
2014-07-03 03:10:02 +00:00
|
|
|
hwaddr size)
|
|
|
|
{
|
|
|
|
uint32_t associativity[] = {
|
|
|
|
cpu_to_be32(0x4), /* length */
|
|
|
|
cpu_to_be32(0x0), cpu_to_be32(0x0),
|
2014-07-03 03:10:07 +00:00
|
|
|
cpu_to_be32(0x0), cpu_to_be32(nodeid)
|
2014-07-03 03:10:02 +00:00
|
|
|
};
|
|
|
|
char mem_name[32];
|
|
|
|
uint64_t mem_reg_property[2];
|
|
|
|
int off;
|
|
|
|
|
|
|
|
mem_reg_property[0] = cpu_to_be64(start);
|
|
|
|
mem_reg_property[1] = cpu_to_be64(size);
|
|
|
|
|
|
|
|
sprintf(mem_name, "memory@" TARGET_FMT_lx, start);
|
|
|
|
off = fdt_add_subnode(fdt, 0, mem_name);
|
|
|
|
_FDT(off);
|
|
|
|
_FDT((fdt_setprop_string(fdt, off, "device_type", "memory")));
|
|
|
|
_FDT((fdt_setprop(fdt, off, "reg", mem_reg_property,
|
|
|
|
sizeof(mem_reg_property))));
|
|
|
|
_FDT((fdt_setprop(fdt, off, "ibm,associativity", associativity,
|
|
|
|
sizeof(associativity))));
|
2015-07-13 00:34:00 +00:00
|
|
|
return off;
|
2014-07-03 03:10:02 +00:00
|
|
|
}
|
|
|
|
|
2015-07-02 06:23:04 +00:00
|
|
|
static int spapr_populate_memory(sPAPRMachineState *spapr, void *fdt)
|
2012-09-12 16:57:12 +00:00
|
|
|
{
|
2015-07-02 06:23:05 +00:00
|
|
|
MachineState *machine = MACHINE(spapr);
|
2014-07-03 03:10:04 +00:00
|
|
|
hwaddr mem_start, node_size;
|
|
|
|
int i, nb_nodes = nb_numa_nodes;
|
|
|
|
NodeInfo *nodes = numa_info;
|
|
|
|
NodeInfo ramnode;
|
|
|
|
|
|
|
|
/* No NUMA nodes, assume there is just one node with whole RAM */
|
|
|
|
if (!nb_numa_nodes) {
|
|
|
|
nb_nodes = 1;
|
2015-07-02 06:23:05 +00:00
|
|
|
ramnode.node_mem = machine->ram_size;
|
2014-07-03 03:10:04 +00:00
|
|
|
nodes = &ramnode;
|
2013-11-25 03:14:51 +00:00
|
|
|
}
|
2012-09-12 16:57:12 +00:00
|
|
|
|
2014-07-03 03:10:04 +00:00
|
|
|
for (i = 0, mem_start = 0; i < nb_nodes; ++i) {
|
|
|
|
if (!nodes[i].node_mem) {
|
|
|
|
continue;
|
|
|
|
}
|
2015-07-02 06:23:05 +00:00
|
|
|
if (mem_start >= machine->ram_size) {
|
2013-11-25 03:14:51 +00:00
|
|
|
node_size = 0;
|
|
|
|
} else {
|
2014-07-03 03:10:04 +00:00
|
|
|
node_size = nodes[i].node_mem;
|
2015-07-02 06:23:05 +00:00
|
|
|
if (node_size > machine->ram_size - mem_start) {
|
|
|
|
node_size = machine->ram_size - mem_start;
|
2013-11-25 03:14:51 +00:00
|
|
|
}
|
|
|
|
}
|
2014-07-03 03:10:04 +00:00
|
|
|
if (!mem_start) {
|
|
|
|
/* ppc_spapr_init() checks for rma_size <= node0_size already */
|
2015-08-03 05:35:41 +00:00
|
|
|
spapr_populate_memory_node(fdt, i, 0, spapr->rma_size);
|
2014-07-03 03:10:04 +00:00
|
|
|
mem_start += spapr->rma_size;
|
|
|
|
node_size -= spapr->rma_size;
|
|
|
|
}
|
2014-07-03 03:10:05 +00:00
|
|
|
for ( ; node_size; ) {
|
|
|
|
hwaddr sizetmp = pow2floor(node_size);
|
|
|
|
|
|
|
|
/* mem_start != 0 here */
|
|
|
|
if (ctzl(mem_start) < ctzl(sizetmp)) {
|
|
|
|
sizetmp = 1ULL << ctzl(mem_start);
|
|
|
|
}
|
|
|
|
|
|
|
|
spapr_populate_memory_node(fdt, i, mem_start, sizetmp);
|
|
|
|
node_size -= sizetmp;
|
|
|
|
mem_start += sizetmp;
|
|
|
|
}
|
2012-09-12 16:57:12 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2015-07-02 06:23:17 +00:00
|
|
|
static void spapr_populate_cpu_dt(CPUState *cs, void *fdt, int offset,
|
|
|
|
sPAPRMachineState *spapr)
|
|
|
|
{
|
|
|
|
PowerPCCPU *cpu = POWERPC_CPU(cs);
|
|
|
|
CPUPPCState *env = &cpu->env;
|
|
|
|
PowerPCCPUClass *pcc = POWERPC_CPU_GET_CLASS(cs);
|
|
|
|
int index = ppc_get_vcpu_dt_id(cpu);
|
|
|
|
uint32_t segs[] = {cpu_to_be32(28), cpu_to_be32(40),
|
|
|
|
0xffffffff, 0xffffffff};
|
2016-06-10 00:59:02 +00:00
|
|
|
uint32_t tbfreq = kvm_enabled() ? kvmppc_get_tbfreq()
|
|
|
|
: SPAPR_TIMEBASE_FREQ;
|
2015-07-02 06:23:17 +00:00
|
|
|
uint32_t cpufreq = kvm_enabled() ? kvmppc_get_clockfreq() : 1000000000;
|
|
|
|
uint32_t page_sizes_prop[64];
|
|
|
|
size_t page_sizes_prop_size;
|
2015-09-08 01:21:31 +00:00
|
|
|
uint32_t vcpus_per_socket = smp_threads * smp_cores;
|
2015-07-02 06:23:17 +00:00
|
|
|
uint32_t pft_size_prop[] = {0, cpu_to_be32(spapr->htab_shift)};
|
2016-06-10 00:59:04 +00:00
|
|
|
sPAPRDRConnector *drc;
|
|
|
|
sPAPRDRConnectorClass *drck;
|
|
|
|
int drc_index;
|
|
|
|
|
|
|
|
drc = spapr_dr_connector_by_id(SPAPR_DR_CONNECTOR_TYPE_CPU, index);
|
|
|
|
if (drc) {
|
|
|
|
drck = SPAPR_DR_CONNECTOR_GET_CLASS(drc);
|
|
|
|
drc_index = drck->get_index(drc);
|
|
|
|
_FDT((fdt_setprop_cell(fdt, offset, "ibm,my-drc-index", drc_index)));
|
|
|
|
}
|
2015-07-02 06:23:17 +00:00
|
|
|
|
2015-10-22 07:30:59 +00:00
|
|
|
/* Note: we keep CI large pages off for now because a 64K capable guest
|
|
|
|
* provisioned with large pages might otherwise try to map a qemu
|
|
|
|
* framebuffer (or other kind of memory mapped PCI BAR) using 64K pages
|
|
|
|
* even if that qemu runs on a 4k host.
|
|
|
|
*
|
|
|
|
* We can later add this bit back when we are confident this is not
|
|
|
|
* an issue (!HV KVM or 64K host)
|
|
|
|
*/
|
|
|
|
uint8_t pa_features_206[] = { 6, 0,
|
|
|
|
0xf6, 0x1f, 0xc7, 0x00, 0x80, 0xc0 };
|
|
|
|
uint8_t pa_features_207[] = { 24, 0,
|
|
|
|
0xf6, 0x1f, 0xc7, 0xc0, 0x80, 0xf0,
|
|
|
|
0x80, 0x00, 0x00, 0x00, 0x00, 0x00,
|
|
|
|
0x00, 0x00, 0x00, 0x00, 0x80, 0x00,
|
|
|
|
0x80, 0x00, 0x80, 0x00, 0x80, 0x00 };
|
|
|
|
uint8_t *pa_features;
|
|
|
|
size_t pa_size;
|
|
|
|
|
2015-07-02 06:23:17 +00:00
|
|
|
_FDT((fdt_setprop_cell(fdt, offset, "reg", index)));
|
|
|
|
_FDT((fdt_setprop_string(fdt, offset, "device_type", "cpu")));
|
|
|
|
|
|
|
|
_FDT((fdt_setprop_cell(fdt, offset, "cpu-version", env->spr[SPR_PVR])));
|
|
|
|
_FDT((fdt_setprop_cell(fdt, offset, "d-cache-block-size",
|
|
|
|
env->dcache_line_size)));
|
|
|
|
_FDT((fdt_setprop_cell(fdt, offset, "d-cache-line-size",
|
|
|
|
env->dcache_line_size)));
|
|
|
|
_FDT((fdt_setprop_cell(fdt, offset, "i-cache-block-size",
|
|
|
|
env->icache_line_size)));
|
|
|
|
_FDT((fdt_setprop_cell(fdt, offset, "i-cache-line-size",
|
|
|
|
env->icache_line_size)));
|
|
|
|
|
|
|
|
if (pcc->l1_dcache_size) {
|
|
|
|
_FDT((fdt_setprop_cell(fdt, offset, "d-cache-size",
|
|
|
|
pcc->l1_dcache_size)));
|
|
|
|
} else {
|
2016-08-02 17:38:00 +00:00
|
|
|
error_report("Warning: Unknown L1 dcache size for cpu");
|
2015-07-02 06:23:17 +00:00
|
|
|
}
|
|
|
|
if (pcc->l1_icache_size) {
|
|
|
|
_FDT((fdt_setprop_cell(fdt, offset, "i-cache-size",
|
|
|
|
pcc->l1_icache_size)));
|
|
|
|
} else {
|
2016-08-02 17:38:00 +00:00
|
|
|
error_report("Warning: Unknown L1 icache size for cpu");
|
2015-07-02 06:23:17 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
_FDT((fdt_setprop_cell(fdt, offset, "timebase-frequency", tbfreq)));
|
|
|
|
_FDT((fdt_setprop_cell(fdt, offset, "clock-frequency", cpufreq)));
|
2015-10-01 13:30:07 +00:00
|
|
|
_FDT((fdt_setprop_cell(fdt, offset, "slb-size", env->slb_nr)));
|
2015-07-02 06:23:17 +00:00
|
|
|
_FDT((fdt_setprop_cell(fdt, offset, "ibm,slb-size", env->slb_nr)));
|
|
|
|
_FDT((fdt_setprop_string(fdt, offset, "status", "okay")));
|
|
|
|
_FDT((fdt_setprop(fdt, offset, "64-bit", NULL, 0)));
|
|
|
|
|
|
|
|
if (env->spr_cb[SPR_PURR].oea_read) {
|
|
|
|
_FDT((fdt_setprop(fdt, offset, "ibm,purr", NULL, 0)));
|
|
|
|
}
|
|
|
|
|
|
|
|
if (env->mmu_model & POWERPC_MMU_1TSEG) {
|
|
|
|
_FDT((fdt_setprop(fdt, offset, "ibm,processor-segment-sizes",
|
|
|
|
segs, sizeof(segs))));
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Advertise VMX/VSX (vector extensions) if available
|
|
|
|
* 0 / no property == no vector extensions
|
|
|
|
* 1 == VMX / Altivec available
|
|
|
|
* 2 == VSX available */
|
|
|
|
if (env->insns_flags & PPC_ALTIVEC) {
|
|
|
|
uint32_t vmx = (env->insns_flags2 & PPC2_VSX) ? 2 : 1;
|
|
|
|
|
|
|
|
_FDT((fdt_setprop_cell(fdt, offset, "ibm,vmx", vmx)));
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Advertise DFP (Decimal Floating Point) if available
|
|
|
|
* 0 / no property == no DFP
|
|
|
|
* 1 == DFP available */
|
|
|
|
if (env->insns_flags2 & PPC2_DFP) {
|
|
|
|
_FDT((fdt_setprop_cell(fdt, offset, "ibm,dfp", 1)));
|
|
|
|
}
|
|
|
|
|
2016-08-02 17:38:01 +00:00
|
|
|
page_sizes_prop_size = ppc_create_page_sizes_prop(env, page_sizes_prop,
|
2015-07-02 06:23:17 +00:00
|
|
|
sizeof(page_sizes_prop));
|
|
|
|
if (page_sizes_prop_size) {
|
|
|
|
_FDT((fdt_setprop(fdt, offset, "ibm,segment-page-sizes",
|
|
|
|
page_sizes_prop, page_sizes_prop_size)));
|
|
|
|
}
|
|
|
|
|
2015-10-22 07:30:59 +00:00
|
|
|
/* Do the ibm,pa-features property, adjust it for ci-large-pages */
|
|
|
|
if (env->mmu_model == POWERPC_MMU_2_06) {
|
|
|
|
pa_features = pa_features_206;
|
|
|
|
pa_size = sizeof(pa_features_206);
|
|
|
|
} else /* env->mmu_model == POWERPC_MMU_2_07 */ {
|
|
|
|
pa_features = pa_features_207;
|
|
|
|
pa_size = sizeof(pa_features_207);
|
|
|
|
}
|
|
|
|
if (env->ci_large_pages) {
|
|
|
|
pa_features[3] |= 0x20;
|
|
|
|
}
|
|
|
|
_FDT((fdt_setprop(fdt, offset, "ibm,pa-features", pa_features, pa_size)));
|
|
|
|
|
2015-07-02 06:23:17 +00:00
|
|
|
_FDT((fdt_setprop_cell(fdt, offset, "ibm,chip-id",
|
2015-09-08 01:21:31 +00:00
|
|
|
cs->cpu_index / vcpus_per_socket)));
|
2015-07-02 06:23:17 +00:00
|
|
|
|
|
|
|
_FDT((fdt_setprop(fdt, offset, "ibm,pft-size",
|
|
|
|
pft_size_prop, sizeof(pft_size_prop))));
|
|
|
|
|
|
|
|
_FDT(spapr_fixup_cpu_numa_dt(fdt, offset, cs));
|
|
|
|
|
|
|
|
_FDT(spapr_fixup_cpu_smt_dt(fdt, offset, cpu,
|
|
|
|
ppc_get_compat_smt_threads(cpu)));
|
|
|
|
}
|
|
|
|
|
|
|
|
static void spapr_populate_cpus_dt_node(void *fdt, sPAPRMachineState *spapr)
|
|
|
|
{
|
|
|
|
CPUState *cs;
|
|
|
|
int cpus_offset;
|
|
|
|
char *nodename;
|
|
|
|
int smt = kvmppc_smt_threads();
|
|
|
|
|
|
|
|
cpus_offset = fdt_add_subnode(fdt, 0, "cpus");
|
|
|
|
_FDT(cpus_offset);
|
|
|
|
_FDT((fdt_setprop_cell(fdt, cpus_offset, "#address-cells", 0x1)));
|
|
|
|
_FDT((fdt_setprop_cell(fdt, cpus_offset, "#size-cells", 0x0)));
|
|
|
|
|
|
|
|
/*
|
|
|
|
* We walk the CPUs in reverse order to ensure that CPU DT nodes
|
|
|
|
* created by fdt_add_subnode() end up in the right order in FDT
|
|
|
|
* for the guest kernel the enumerate the CPUs correctly.
|
|
|
|
*/
|
|
|
|
CPU_FOREACH_REVERSE(cs) {
|
|
|
|
PowerPCCPU *cpu = POWERPC_CPU(cs);
|
|
|
|
int index = ppc_get_vcpu_dt_id(cpu);
|
|
|
|
DeviceClass *dc = DEVICE_GET_CLASS(cs);
|
|
|
|
int offset;
|
|
|
|
|
|
|
|
if ((index % smt) != 0) {
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
nodename = g_strdup_printf("%s@%x", dc->fw_name, index);
|
|
|
|
offset = fdt_add_subnode(fdt, cpus_offset, nodename);
|
|
|
|
g_free(nodename);
|
|
|
|
_FDT(offset);
|
|
|
|
spapr_populate_cpu_dt(cs, fdt, offset, spapr);
|
|
|
|
}
|
|
|
|
|
|
|
|
}
|
|
|
|
|
2015-07-13 00:34:00 +00:00
|
|
|
/*
|
|
|
|
* Adds ibm,dynamic-reconfiguration-memory node.
|
|
|
|
* Refer to docs/specs/ppc-spapr-hotplug.txt for the documentation
|
|
|
|
* of this device tree node.
|
|
|
|
*/
|
|
|
|
static int spapr_populate_drconf_memory(sPAPRMachineState *spapr, void *fdt)
|
|
|
|
{
|
|
|
|
MachineState *machine = MACHINE(spapr);
|
|
|
|
int ret, i, offset;
|
|
|
|
uint64_t lmb_size = SPAPR_MEMORY_BLOCK_SIZE;
|
|
|
|
uint32_t prop_lmb_size[] = {0, cpu_to_be32(lmb_size)};
|
2016-06-10 05:14:48 +00:00
|
|
|
uint32_t hotplug_lmb_start = spapr->hotplug_memory.base / lmb_size;
|
|
|
|
uint32_t nr_lmbs = (spapr->hotplug_memory.base +
|
|
|
|
memory_region_size(&spapr->hotplug_memory.mr)) /
|
|
|
|
lmb_size;
|
2015-07-13 00:34:00 +00:00
|
|
|
uint32_t *int_buf, *cur_index, buf_len;
|
2015-08-03 05:35:40 +00:00
|
|
|
int nr_nodes = nb_numa_nodes ? nb_numa_nodes : 1;
|
2015-07-13 00:34:00 +00:00
|
|
|
|
2016-01-19 04:39:21 +00:00
|
|
|
/*
|
2016-06-10 05:14:48 +00:00
|
|
|
* Don't create the node if there is no hotpluggable memory
|
2016-01-19 04:39:21 +00:00
|
|
|
*/
|
2016-06-10 05:14:48 +00:00
|
|
|
if (machine->ram_size == machine->maxram_size) {
|
2016-01-19 04:39:21 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2015-09-15 19:34:20 +00:00
|
|
|
/*
|
|
|
|
* Allocate enough buffer size to fit in ibm,dynamic-memory
|
|
|
|
* or ibm,associativity-lookup-arrays
|
|
|
|
*/
|
|
|
|
buf_len = MAX(nr_lmbs * SPAPR_DR_LMB_LIST_ENTRY_SIZE + 1, nr_nodes * 4 + 2)
|
|
|
|
* sizeof(uint32_t);
|
2015-07-13 00:34:00 +00:00
|
|
|
cur_index = int_buf = g_malloc0(buf_len);
|
|
|
|
|
|
|
|
offset = fdt_add_subnode(fdt, 0, "ibm,dynamic-reconfiguration-memory");
|
|
|
|
|
|
|
|
ret = fdt_setprop(fdt, offset, "ibm,lmb-size", prop_lmb_size,
|
|
|
|
sizeof(prop_lmb_size));
|
|
|
|
if (ret < 0) {
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
ret = fdt_setprop_cell(fdt, offset, "ibm,memory-flags-mask", 0xff);
|
|
|
|
if (ret < 0) {
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
ret = fdt_setprop_cell(fdt, offset, "ibm,memory-preservation-time", 0x0);
|
|
|
|
if (ret < 0) {
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* ibm,dynamic-memory */
|
|
|
|
int_buf[0] = cpu_to_be32(nr_lmbs);
|
|
|
|
cur_index++;
|
|
|
|
for (i = 0; i < nr_lmbs; i++) {
|
2016-06-10 05:14:48 +00:00
|
|
|
uint64_t addr = i * lmb_size;
|
2015-07-13 00:34:00 +00:00
|
|
|
uint32_t *dynamic_memory = cur_index;
|
|
|
|
|
2016-06-10 05:14:48 +00:00
|
|
|
if (i >= hotplug_lmb_start) {
|
|
|
|
sPAPRDRConnector *drc;
|
|
|
|
sPAPRDRConnectorClass *drck;
|
|
|
|
|
|
|
|
drc = spapr_dr_connector_by_id(SPAPR_DR_CONNECTOR_TYPE_LMB, i);
|
|
|
|
g_assert(drc);
|
|
|
|
drck = SPAPR_DR_CONNECTOR_GET_CLASS(drc);
|
|
|
|
|
|
|
|
dynamic_memory[0] = cpu_to_be32(addr >> 32);
|
|
|
|
dynamic_memory[1] = cpu_to_be32(addr & 0xffffffff);
|
|
|
|
dynamic_memory[2] = cpu_to_be32(drck->get_index(drc));
|
|
|
|
dynamic_memory[3] = cpu_to_be32(0); /* reserved */
|
|
|
|
dynamic_memory[4] = cpu_to_be32(numa_get_node(addr, NULL));
|
|
|
|
if (memory_region_present(get_system_memory(), addr)) {
|
|
|
|
dynamic_memory[5] = cpu_to_be32(SPAPR_LMB_FLAGS_ASSIGNED);
|
|
|
|
} else {
|
|
|
|
dynamic_memory[5] = cpu_to_be32(0);
|
|
|
|
}
|
2015-07-13 00:34:00 +00:00
|
|
|
} else {
|
2016-06-10 05:14:48 +00:00
|
|
|
/*
|
|
|
|
* LMB information for RMA, boot time RAM and gap b/n RAM and
|
|
|
|
* hotplug memory region -- all these are marked as reserved
|
|
|
|
* and as having no valid DRC.
|
|
|
|
*/
|
|
|
|
dynamic_memory[0] = cpu_to_be32(addr >> 32);
|
|
|
|
dynamic_memory[1] = cpu_to_be32(addr & 0xffffffff);
|
|
|
|
dynamic_memory[2] = cpu_to_be32(0);
|
|
|
|
dynamic_memory[3] = cpu_to_be32(0); /* reserved */
|
|
|
|
dynamic_memory[4] = cpu_to_be32(-1);
|
|
|
|
dynamic_memory[5] = cpu_to_be32(SPAPR_LMB_FLAGS_RESERVED |
|
|
|
|
SPAPR_LMB_FLAGS_DRC_INVALID);
|
2015-07-13 00:34:00 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
cur_index += SPAPR_DR_LMB_LIST_ENTRY_SIZE;
|
|
|
|
}
|
|
|
|
ret = fdt_setprop(fdt, offset, "ibm,dynamic-memory", int_buf, buf_len);
|
|
|
|
if (ret < 0) {
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* ibm,associativity-lookup-arrays */
|
|
|
|
cur_index = int_buf;
|
2015-08-03 05:35:40 +00:00
|
|
|
int_buf[0] = cpu_to_be32(nr_nodes);
|
2015-07-13 00:34:00 +00:00
|
|
|
int_buf[1] = cpu_to_be32(4); /* Number of entries per associativity list */
|
|
|
|
cur_index += 2;
|
2015-08-03 05:35:40 +00:00
|
|
|
for (i = 0; i < nr_nodes; i++) {
|
2015-07-13 00:34:00 +00:00
|
|
|
uint32_t associativity[] = {
|
|
|
|
cpu_to_be32(0x0),
|
|
|
|
cpu_to_be32(0x0),
|
|
|
|
cpu_to_be32(0x0),
|
|
|
|
cpu_to_be32(i)
|
|
|
|
};
|
|
|
|
memcpy(cur_index, associativity, sizeof(associativity));
|
|
|
|
cur_index += 4;
|
|
|
|
}
|
|
|
|
ret = fdt_setprop(fdt, offset, "ibm,associativity-lookup-arrays", int_buf,
|
|
|
|
(cur_index - int_buf) * sizeof(uint32_t));
|
|
|
|
out:
|
|
|
|
g_free(int_buf);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
int spapr_h_cas_compose_response(sPAPRMachineState *spapr,
|
|
|
|
target_ulong addr, target_ulong size,
|
|
|
|
bool cpu_update, bool memory_update)
|
|
|
|
{
|
|
|
|
void *fdt, *fdt_skel;
|
|
|
|
sPAPRDeviceTreeUpdateHeader hdr = { .version_id = 1 };
|
|
|
|
sPAPRMachineClass *smc = SPAPR_MACHINE_GET_CLASS(qdev_get_machine());
|
|
|
|
|
|
|
|
size -= sizeof(hdr);
|
|
|
|
|
|
|
|
/* Create sceleton */
|
|
|
|
fdt_skel = g_malloc0(size);
|
|
|
|
_FDT((fdt_create(fdt_skel, size)));
|
|
|
|
_FDT((fdt_begin_node(fdt_skel, "")));
|
|
|
|
_FDT((fdt_end_node(fdt_skel)));
|
|
|
|
_FDT((fdt_finish(fdt_skel)));
|
|
|
|
fdt = g_malloc0(size);
|
|
|
|
_FDT((fdt_open_into(fdt_skel, fdt, size)));
|
|
|
|
g_free(fdt_skel);
|
|
|
|
|
|
|
|
/* Fixup cpu nodes */
|
|
|
|
if (cpu_update) {
|
|
|
|
_FDT((spapr_fixup_cpu_dt(fdt, spapr)));
|
|
|
|
}
|
|
|
|
|
2016-01-19 04:39:21 +00:00
|
|
|
/* Generate ibm,dynamic-reconfiguration-memory node if required */
|
2015-07-13 00:34:00 +00:00
|
|
|
if (memory_update && smc->dr_lmb_enabled) {
|
|
|
|
_FDT((spapr_populate_drconf_memory(spapr, fdt)));
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Pack resulting tree */
|
|
|
|
_FDT((fdt_pack(fdt)));
|
|
|
|
|
|
|
|
if (fdt_totalsize(fdt) + sizeof(hdr) > size) {
|
|
|
|
trace_spapr_cas_failed(size);
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
cpu_physical_memory_write(addr, &hdr, sizeof(hdr));
|
|
|
|
cpu_physical_memory_write(addr + sizeof(hdr), fdt, fdt_totalsize(fdt));
|
|
|
|
trace_spapr_cas_continue(fdt_totalsize(fdt) + sizeof(hdr));
|
|
|
|
g_free(fdt);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2015-07-02 06:23:04 +00:00
|
|
|
static void spapr_finalize_fdt(sPAPRMachineState *spapr,
|
2012-10-23 10:30:10 +00:00
|
|
|
hwaddr fdt_addr,
|
|
|
|
hwaddr rtas_addr,
|
|
|
|
hwaddr rtas_size)
|
Delay creation of pseries device tree until reset
At present, the 'pseries' machine creates a flattened device tree in the
machine->init function to pass to either the guest kernel or to firmware.
However, the machine->init function runs before processing of -device
command line options, which means that the device tree so created will
be (incorrectly) missing devices specified that way.
Supplying a correct device tree is, in any case, part of the required
platform entry conditions. Therefore, this patch moves the creation and
loading of the device tree from machine->init to a reset callback. The
setup of entry point address and initial register state moves with it,
which leads to a slight cleanup.
This is not, alas, quite enough to make a fully working reset for pseries.
For that we would need to reload the firmware images, which on this
machine are loaded into RAM. It's a step in the right direction, though.
Signed-off-by: David Gibson <dwg@au1.ibm.com>
Signed-off-by: Alexander Graf <agraf@suse.de>
2011-04-05 05:12:10 +00:00
|
|
|
{
|
2015-03-18 12:30:44 +00:00
|
|
|
MachineState *machine = MACHINE(qdev_get_machine());
|
2016-08-05 06:25:33 +00:00
|
|
|
MachineClass *mc = MACHINE_GET_CLASS(machine);
|
2015-09-01 01:22:35 +00:00
|
|
|
sPAPRMachineClass *smc = SPAPR_MACHINE_GET_CLASS(machine);
|
2015-03-18 12:30:44 +00:00
|
|
|
const char *boot_device = machine->boot_order;
|
2014-03-17 02:40:27 +00:00
|
|
|
int ret, i;
|
|
|
|
size_t cb = 0;
|
|
|
|
char *bootlist;
|
Delay creation of pseries device tree until reset
At present, the 'pseries' machine creates a flattened device tree in the
machine->init function to pass to either the guest kernel or to firmware.
However, the machine->init function runs before processing of -device
command line options, which means that the device tree so created will
be (incorrectly) missing devices specified that way.
Supplying a correct device tree is, in any case, part of the required
platform entry conditions. Therefore, this patch moves the creation and
loading of the device tree from machine->init to a reset callback. The
setup of entry point address and initial register state moves with it,
which leads to a slight cleanup.
This is not, alas, quite enough to make a fully working reset for pseries.
For that we would need to reload the firmware images, which on this
machine are loaded into RAM. It's a step in the right direction, though.
Signed-off-by: David Gibson <dwg@au1.ibm.com>
Signed-off-by: Alexander Graf <agraf@suse.de>
2011-04-05 05:12:10 +00:00
|
|
|
void *fdt;
|
2011-10-30 17:16:46 +00:00
|
|
|
sPAPRPHBState *phb;
|
Delay creation of pseries device tree until reset
At present, the 'pseries' machine creates a flattened device tree in the
machine->init function to pass to either the guest kernel or to firmware.
However, the machine->init function runs before processing of -device
command line options, which means that the device tree so created will
be (incorrectly) missing devices specified that way.
Supplying a correct device tree is, in any case, part of the required
platform entry conditions. Therefore, this patch moves the creation and
loading of the device tree from machine->init to a reset callback. The
setup of entry point address and initial register state moves with it,
which leads to a slight cleanup.
This is not, alas, quite enough to make a fully working reset for pseries.
For that we would need to reload the firmware images, which on this
machine are loaded into RAM. It's a step in the right direction, though.
Signed-off-by: David Gibson <dwg@au1.ibm.com>
Signed-off-by: Alexander Graf <agraf@suse.de>
2011-04-05 05:12:10 +00:00
|
|
|
|
2011-08-21 03:09:37 +00:00
|
|
|
fdt = g_malloc(FDT_MAX_SIZE);
|
Delay creation of pseries device tree until reset
At present, the 'pseries' machine creates a flattened device tree in the
machine->init function to pass to either the guest kernel or to firmware.
However, the machine->init function runs before processing of -device
command line options, which means that the device tree so created will
be (incorrectly) missing devices specified that way.
Supplying a correct device tree is, in any case, part of the required
platform entry conditions. Therefore, this patch moves the creation and
loading of the device tree from machine->init to a reset callback. The
setup of entry point address and initial register state moves with it,
which leads to a slight cleanup.
This is not, alas, quite enough to make a fully working reset for pseries.
For that we would need to reload the firmware images, which on this
machine are loaded into RAM. It's a step in the right direction, though.
Signed-off-by: David Gibson <dwg@au1.ibm.com>
Signed-off-by: Alexander Graf <agraf@suse.de>
2011-04-05 05:12:10 +00:00
|
|
|
|
|
|
|
/* open out the base tree into a temp buffer for the final tweaks */
|
|
|
|
_FDT((fdt_open_into(spapr->fdt_skel, fdt, FDT_MAX_SIZE)));
|
2011-04-01 04:15:21 +00:00
|
|
|
|
2015-08-03 05:35:41 +00:00
|
|
|
ret = spapr_populate_memory(spapr, fdt);
|
|
|
|
if (ret < 0) {
|
2016-08-02 17:38:00 +00:00
|
|
|
error_report("couldn't setup memory nodes in fdt");
|
2015-08-03 05:35:41 +00:00
|
|
|
exit(1);
|
2012-09-12 16:57:12 +00:00
|
|
|
}
|
|
|
|
|
2011-04-01 04:15:21 +00:00
|
|
|
ret = spapr_populate_vdevice(spapr->vio_bus, fdt);
|
|
|
|
if (ret < 0) {
|
2016-08-02 17:38:00 +00:00
|
|
|
error_report("couldn't setup vio devices in fdt");
|
2011-04-01 04:15:21 +00:00
|
|
|
exit(1);
|
|
|
|
}
|
|
|
|
|
ppc/spapr: Implement H_RANDOM hypercall in QEMU
The PAPR interface defines a hypercall to pass high-quality
hardware generated random numbers to guests. Recent kernels can
already provide this hypercall to the guest if the right hardware
random number generator is available. But in case the user wants
to use another source like EGD, or QEMU is running with an older
kernel, we should also have this call in QEMU, so that guests that
do not support virtio-rng yet can get good random numbers, too.
This patch now adds a new pseudo-device to QEMU that either
directly provides this hypercall to the guest or is able to
enable the in-kernel hypercall if available. The in-kernel
hypercall can be enabled with the use-kvm property, e.g.:
qemu-system-ppc64 -device spapr-rng,use-kvm=true
For handling the hypercall in QEMU instead, a "RngBackend" is
required since the hypercall should provide "good" random data
instead of pseudo-random (like from a "simple" library function
like rand() or g_random_int()). Since there are multiple RngBackends
available, the user must select an appropriate back-end via the
"rng" property of the device, e.g.:
qemu-system-ppc64 -object rng-random,filename=/dev/hwrng,id=gid0 \
-device spapr-rng,rng=gid0 ...
See http://wiki.qemu-project.org/Features-Done/VirtIORNG for
other example of specifying RngBackends.
Signed-off-by: Thomas Huth <thuth@redhat.com>
Signed-off-by: David Gibson <david@gibson.dropbear.id.au>
2015-09-17 08:49:41 +00:00
|
|
|
if (object_resolve_path_type("", TYPE_SPAPR_RNG, NULL)) {
|
|
|
|
ret = spapr_rng_populate_dt(fdt);
|
|
|
|
if (ret < 0) {
|
2016-08-02 17:38:00 +00:00
|
|
|
error_report("could not set up rng device in the fdt");
|
ppc/spapr: Implement H_RANDOM hypercall in QEMU
The PAPR interface defines a hypercall to pass high-quality
hardware generated random numbers to guests. Recent kernels can
already provide this hypercall to the guest if the right hardware
random number generator is available. But in case the user wants
to use another source like EGD, or QEMU is running with an older
kernel, we should also have this call in QEMU, so that guests that
do not support virtio-rng yet can get good random numbers, too.
This patch now adds a new pseudo-device to QEMU that either
directly provides this hypercall to the guest or is able to
enable the in-kernel hypercall if available. The in-kernel
hypercall can be enabled with the use-kvm property, e.g.:
qemu-system-ppc64 -device spapr-rng,use-kvm=true
For handling the hypercall in QEMU instead, a "RngBackend" is
required since the hypercall should provide "good" random data
instead of pseudo-random (like from a "simple" library function
like rand() or g_random_int()). Since there are multiple RngBackends
available, the user must select an appropriate back-end via the
"rng" property of the device, e.g.:
qemu-system-ppc64 -object rng-random,filename=/dev/hwrng,id=gid0 \
-device spapr-rng,rng=gid0 ...
See http://wiki.qemu-project.org/Features-Done/VirtIORNG for
other example of specifying RngBackends.
Signed-off-by: Thomas Huth <thuth@redhat.com>
Signed-off-by: David Gibson <david@gibson.dropbear.id.au>
2015-09-17 08:49:41 +00:00
|
|
|
exit(1);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2011-10-30 17:16:46 +00:00
|
|
|
QLIST_FOREACH(phb, &spapr->phbs, list) {
|
2012-06-13 18:40:06 +00:00
|
|
|
ret = spapr_populate_pci_dt(phb, PHANDLE_XICP, fdt);
|
2016-04-21 10:08:58 +00:00
|
|
|
if (ret < 0) {
|
|
|
|
error_report("couldn't setup PCI devices in fdt");
|
|
|
|
exit(1);
|
|
|
|
}
|
2011-10-30 17:16:46 +00:00
|
|
|
}
|
|
|
|
|
2011-04-01 04:15:23 +00:00
|
|
|
/* RTAS */
|
|
|
|
ret = spapr_rtas_device_tree_setup(fdt, rtas_addr, rtas_size);
|
|
|
|
if (ret < 0) {
|
2016-08-02 17:38:00 +00:00
|
|
|
error_report("Couldn't set up RTAS device tree properties");
|
2011-04-01 04:15:23 +00:00
|
|
|
}
|
|
|
|
|
2015-07-02 06:23:17 +00:00
|
|
|
/* cpus */
|
|
|
|
spapr_populate_cpus_dt_node(fdt, spapr);
|
2011-12-12 18:24:30 +00:00
|
|
|
|
2014-03-17 02:40:27 +00:00
|
|
|
bootlist = get_boot_devices_list(&cb, true);
|
|
|
|
if (cb && bootlist) {
|
|
|
|
int offset = fdt_path_offset(fdt, "/chosen");
|
|
|
|
if (offset < 0) {
|
|
|
|
exit(1);
|
|
|
|
}
|
|
|
|
for (i = 0; i < cb; i++) {
|
|
|
|
if (bootlist[i] == '\n') {
|
|
|
|
bootlist[i] = ' ';
|
|
|
|
}
|
|
|
|
|
|
|
|
}
|
|
|
|
ret = fdt_setprop_string(fdt, offset, "qemu,boot-list", bootlist);
|
|
|
|
}
|
|
|
|
|
2015-03-18 12:30:44 +00:00
|
|
|
if (boot_device && strlen(boot_device)) {
|
|
|
|
int offset = fdt_path_offset(fdt, "/chosen");
|
|
|
|
|
|
|
|
if (offset < 0) {
|
|
|
|
exit(1);
|
|
|
|
}
|
|
|
|
fdt_setprop_string(fdt, offset, "qemu,boot-device", boot_device);
|
|
|
|
}
|
|
|
|
|
2012-08-14 11:22:13 +00:00
|
|
|
if (!spapr->has_graphics) {
|
2012-08-06 16:42:00 +00:00
|
|
|
spapr_populate_chosen_stdout(fdt, spapr->vio_bus);
|
|
|
|
}
|
2011-12-13 04:24:34 +00:00
|
|
|
|
2015-09-01 01:22:35 +00:00
|
|
|
if (smc->dr_lmb_enabled) {
|
|
|
|
_FDT(spapr_drc_populate_dt(fdt, 0, NULL, SPAPR_DR_CONNECTOR_TYPE_LMB));
|
|
|
|
}
|
|
|
|
|
2016-08-05 06:25:33 +00:00
|
|
|
if (mc->query_hotpluggable_cpus) {
|
2016-06-10 00:59:04 +00:00
|
|
|
int offset = fdt_path_offset(fdt, "/cpus");
|
|
|
|
ret = spapr_drc_populate_dt(fdt, offset, NULL,
|
|
|
|
SPAPR_DR_CONNECTOR_TYPE_CPU);
|
|
|
|
if (ret < 0) {
|
|
|
|
error_report("Couldn't set up CPU DR device tree properties");
|
|
|
|
exit(1);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2011-04-01 04:15:21 +00:00
|
|
|
_FDT((fdt_pack(fdt)));
|
|
|
|
|
2012-01-11 19:46:28 +00:00
|
|
|
if (fdt_totalsize(fdt) > FDT_MAX_SIZE) {
|
2015-05-07 05:33:41 +00:00
|
|
|
error_report("FDT too big ! 0x%x bytes (max is 0x%x)",
|
|
|
|
fdt_totalsize(fdt), FDT_MAX_SIZE);
|
2012-01-11 19:46:28 +00:00
|
|
|
exit(1);
|
|
|
|
}
|
|
|
|
|
2015-09-01 01:25:35 +00:00
|
|
|
qemu_fdt_dumpdtb(fdt, fdt_totalsize(fdt));
|
Delay creation of pseries device tree until reset
At present, the 'pseries' machine creates a flattened device tree in the
machine->init function to pass to either the guest kernel or to firmware.
However, the machine->init function runs before processing of -device
command line options, which means that the device tree so created will
be (incorrectly) missing devices specified that way.
Supplying a correct device tree is, in any case, part of the required
platform entry conditions. Therefore, this patch moves the creation and
loading of the device tree from machine->init to a reset callback. The
setup of entry point address and initial register state moves with it,
which leads to a slight cleanup.
This is not, alas, quite enough to make a fully working reset for pseries.
For that we would need to reload the firmware images, which on this
machine are loaded into RAM. It's a step in the right direction, though.
Signed-off-by: David Gibson <dwg@au1.ibm.com>
Signed-off-by: Alexander Graf <agraf@suse.de>
2011-04-05 05:12:10 +00:00
|
|
|
cpu_physical_memory_write(fdt_addr, fdt, fdt_totalsize(fdt));
|
2011-04-01 04:15:20 +00:00
|
|
|
|
2014-07-26 04:45:33 +00:00
|
|
|
g_free(bootlist);
|
2011-08-21 03:09:37 +00:00
|
|
|
g_free(fdt);
|
2011-04-01 04:15:20 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static uint64_t translate_kernel_address(void *opaque, uint64_t addr)
|
|
|
|
{
|
|
|
|
return (addr & 0x0fffffff) + KERNEL_LOAD_ADDR;
|
|
|
|
}
|
|
|
|
|
2012-05-03 04:03:45 +00:00
|
|
|
static void emulate_spapr_hypercall(PowerPCCPU *cpu)
|
2011-04-01 04:15:20 +00:00
|
|
|
{
|
2012-05-03 04:03:45 +00:00
|
|
|
CPUPPCState *env = &cpu->env;
|
|
|
|
|
2012-09-25 17:12:20 +00:00
|
|
|
if (msr_pr) {
|
|
|
|
hcall_dprintf("Hypercall made with MSR[PR]=1\n");
|
|
|
|
env->gpr[3] = H_PRIVILEGE;
|
|
|
|
} else {
|
2012-05-03 04:13:14 +00:00
|
|
|
env->gpr[3] = spapr_hypercall(cpu, env->gpr[3], &env->gpr[4]);
|
2012-09-25 17:12:20 +00:00
|
|
|
}
|
2011-04-01 04:15:20 +00:00
|
|
|
}
|
|
|
|
|
2014-11-17 04:12:30 +00:00
|
|
|
#define HPTE(_table, _i) (void *)(((uint64_t *)(_table)) + ((_i) * 2))
|
|
|
|
#define HPTE_VALID(_hpte) (tswap64(*((uint64_t *)(_hpte))) & HPTE64_V_VALID)
|
|
|
|
#define HPTE_DIRTY(_hpte) (tswap64(*((uint64_t *)(_hpte))) & HPTE64_V_HPTE_DIRTY)
|
|
|
|
#define CLEAN_HPTE(_hpte) ((*(uint64_t *)(_hpte)) &= tswap64(~HPTE64_V_HPTE_DIRTY))
|
|
|
|
#define DIRTY_HPTE(_hpte) ((*(uint64_t *)(_hpte)) |= tswap64(HPTE64_V_HPTE_DIRTY))
|
|
|
|
|
2016-02-08 23:28:58 +00:00
|
|
|
/*
|
|
|
|
* Get the fd to access the kernel htab, re-opening it if necessary
|
|
|
|
*/
|
|
|
|
static int get_htab_fd(sPAPRMachineState *spapr)
|
|
|
|
{
|
|
|
|
if (spapr->htab_fd >= 0) {
|
|
|
|
return spapr->htab_fd;
|
|
|
|
}
|
|
|
|
|
|
|
|
spapr->htab_fd = kvmppc_get_htab_fd(false);
|
|
|
|
if (spapr->htab_fd < 0) {
|
|
|
|
error_report("Unable to open fd for reading hash table from KVM: %s",
|
|
|
|
strerror(errno));
|
|
|
|
}
|
|
|
|
|
|
|
|
return spapr->htab_fd;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void close_htab_fd(sPAPRMachineState *spapr)
|
|
|
|
{
|
|
|
|
if (spapr->htab_fd >= 0) {
|
|
|
|
close(spapr->htab_fd);
|
|
|
|
}
|
|
|
|
spapr->htab_fd = -1;
|
|
|
|
}
|
|
|
|
|
2016-02-09 00:15:12 +00:00
|
|
|
static int spapr_hpt_shift_for_ramsize(uint64_t ramsize)
|
|
|
|
{
|
|
|
|
int shift;
|
|
|
|
|
|
|
|
/* We aim for a hash table of size 1/128 the size of RAM (rounded
|
|
|
|
* up). The PAPR recommendation is actually 1/64 of RAM size, but
|
|
|
|
* that's much more than is needed for Linux guests */
|
|
|
|
shift = ctz64(pow2ceil(ramsize)) - 7;
|
|
|
|
shift = MAX(shift, 18); /* Minimum architected size */
|
|
|
|
shift = MIN(shift, 46); /* Maximum architected size */
|
|
|
|
return shift;
|
|
|
|
}
|
|
|
|
|
2016-02-09 00:21:56 +00:00
|
|
|
static void spapr_reallocate_hpt(sPAPRMachineState *spapr, int shift,
|
|
|
|
Error **errp)
|
2012-09-12 16:57:12 +00:00
|
|
|
{
|
2016-02-09 00:21:56 +00:00
|
|
|
long rc;
|
|
|
|
|
|
|
|
/* Clean up any HPT info from a previous boot */
|
|
|
|
g_free(spapr->htab);
|
|
|
|
spapr->htab = NULL;
|
|
|
|
spapr->htab_shift = 0;
|
|
|
|
close_htab_fd(spapr);
|
|
|
|
|
|
|
|
rc = kvmppc_reset_htab(shift);
|
|
|
|
if (rc < 0) {
|
|
|
|
/* kernel-side HPT needed, but couldn't allocate one */
|
|
|
|
error_setg_errno(errp, errno,
|
|
|
|
"Failed to allocate KVM HPT of order %d (try smaller maxmem?)",
|
|
|
|
shift);
|
|
|
|
/* This is almost certainly fatal, but if the caller really
|
|
|
|
* wants to carry on with shift == 0, it's welcome to try */
|
|
|
|
} else if (rc > 0) {
|
|
|
|
/* kernel-side HPT allocated */
|
|
|
|
if (rc != shift) {
|
|
|
|
error_setg(errp,
|
|
|
|
"Requested order %d HPT, but kernel allocated order %ld (try smaller maxmem?)",
|
|
|
|
shift, rc);
|
2015-09-24 08:22:48 +00:00
|
|
|
}
|
|
|
|
|
2012-09-12 16:57:12 +00:00
|
|
|
spapr->htab_shift = shift;
|
2016-03-08 00:35:15 +00:00
|
|
|
spapr->htab = NULL;
|
2015-09-24 08:22:47 +00:00
|
|
|
} else {
|
2016-02-09 00:21:56 +00:00
|
|
|
/* kernel-side HPT not needed, allocate in userspace instead */
|
|
|
|
size_t size = 1ULL << shift;
|
|
|
|
int i;
|
2015-09-24 08:22:47 +00:00
|
|
|
|
2016-02-09 00:21:56 +00:00
|
|
|
spapr->htab = qemu_memalign(size, size);
|
|
|
|
if (!spapr->htab) {
|
|
|
|
error_setg_errno(errp, errno,
|
|
|
|
"Could not allocate HPT of order %d", shift);
|
|
|
|
return;
|
2015-09-24 08:22:48 +00:00
|
|
|
}
|
|
|
|
|
2016-02-09 00:21:56 +00:00
|
|
|
memset(spapr->htab, 0, size);
|
|
|
|
spapr->htab_shift = shift;
|
2014-11-17 04:12:30 +00:00
|
|
|
|
2016-02-09 00:21:56 +00:00
|
|
|
for (i = 0; i < size / HASH_PTE_SIZE_64; i++) {
|
|
|
|
DIRTY_HPTE(HPTE(spapr->htab, i));
|
2014-11-17 04:12:30 +00:00
|
|
|
}
|
2012-09-12 16:57:12 +00:00
|
|
|
}
|
2011-04-01 04:15:20 +00:00
|
|
|
}
|
|
|
|
|
2014-11-04 22:22:54 +00:00
|
|
|
static int find_unknown_sysbus_device(SysBusDevice *sbdev, void *opaque)
|
|
|
|
{
|
|
|
|
bool matched = false;
|
|
|
|
|
|
|
|
if (object_dynamic_cast(OBJECT(sbdev), TYPE_SPAPR_PCI_HOST_BRIDGE)) {
|
|
|
|
matched = true;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!matched) {
|
|
|
|
error_report("Device %s is not supported by this machine yet.",
|
|
|
|
qdev_fw_name(DEVICE(sbdev)));
|
|
|
|
exit(1);
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2012-09-12 16:57:11 +00:00
|
|
|
static void ppc_spapr_reset(void)
|
Delay creation of pseries device tree until reset
At present, the 'pseries' machine creates a flattened device tree in the
machine->init function to pass to either the guest kernel or to firmware.
However, the machine->init function runs before processing of -device
command line options, which means that the device tree so created will
be (incorrectly) missing devices specified that way.
Supplying a correct device tree is, in any case, part of the required
platform entry conditions. Therefore, this patch moves the creation and
loading of the device tree from machine->init to a reset callback. The
setup of entry point address and initial register state moves with it,
which leads to a slight cleanup.
This is not, alas, quite enough to make a fully working reset for pseries.
For that we would need to reload the firmware images, which on this
machine are loaded into RAM. It's a step in the right direction, though.
Signed-off-by: David Gibson <dwg@au1.ibm.com>
Signed-off-by: Alexander Graf <agraf@suse.de>
2011-04-05 05:12:10 +00:00
|
|
|
{
|
2016-02-09 00:21:56 +00:00
|
|
|
MachineState *machine = MACHINE(qdev_get_machine());
|
|
|
|
sPAPRMachineState *spapr = SPAPR_MACHINE(machine);
|
2013-05-29 20:29:20 +00:00
|
|
|
PowerPCCPU *first_ppc_cpu;
|
spapr: Locate RTAS and device-tree based on real RMA
We currently calculate the final RTAS and FDT location based on
the early estimate of the RMA size, cropped to 256M on KVM since
we only know the real RMA size at reset time which happens much
later in the boot process.
This means the FDT and RTAS end up right below 256M while they
could be much higher, using precious RMA space and limiting
what the OS bootloader can put there which has proved to be
a problem with some OSes (such as when using very large initrd's)
Fortunately, we do the actual copy of the device-tree into guest
memory much later, during reset, late enough to be able to do it
using the final RMA value, we just need to move the calculation
to the right place.
However, RTAS is still loaded too early, so we change the code to
load the tiny blob into qemu memory early on, and then copy it into
guest memory at reset time. It's small enough that the memory usage
doesn't matter.
Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
[aik: fixed errors from checkpatch.pl, defined RTAS_MAX_ADDR]
Signed-off-by: Alexey Kardashevskiy <aik@ozlabs.ru>
[agraf: fix compilation on 32bit hosts]
Signed-off-by: Alexander Graf <agraf@suse.de>
2014-07-21 03:02:04 +00:00
|
|
|
uint32_t rtas_limit;
|
2013-01-17 17:51:17 +00:00
|
|
|
|
2014-11-04 22:22:54 +00:00
|
|
|
/* Check for unknown sysbus devices */
|
|
|
|
foreach_dynamic_sysbus_device(find_unknown_sysbus_device, NULL);
|
|
|
|
|
2016-02-09 00:21:56 +00:00
|
|
|
/* Allocate and/or reset the hash page table */
|
|
|
|
spapr_reallocate_hpt(spapr,
|
|
|
|
spapr_hpt_shift_for_ramsize(machine->maxram_size),
|
|
|
|
&error_fatal);
|
|
|
|
|
|
|
|
/* Update the RMA size if necessary */
|
|
|
|
if (spapr->vrma_adjust) {
|
|
|
|
spapr->rma_size = kvmppc_rma_size(spapr_node0_size(),
|
|
|
|
spapr->htab_shift);
|
|
|
|
}
|
Delay creation of pseries device tree until reset
At present, the 'pseries' machine creates a flattened device tree in the
machine->init function to pass to either the guest kernel or to firmware.
However, the machine->init function runs before processing of -device
command line options, which means that the device tree so created will
be (incorrectly) missing devices specified that way.
Supplying a correct device tree is, in any case, part of the required
platform entry conditions. Therefore, this patch moves the creation and
loading of the device tree from machine->init to a reset callback. The
setup of entry point address and initial register state moves with it,
which leads to a slight cleanup.
This is not, alas, quite enough to make a fully working reset for pseries.
For that we would need to reload the firmware images, which on this
machine are loaded into RAM. It's a step in the right direction, though.
Signed-off-by: David Gibson <dwg@au1.ibm.com>
Signed-off-by: Alexander Graf <agraf@suse.de>
2011-04-05 05:12:10 +00:00
|
|
|
|
2012-09-12 16:57:11 +00:00
|
|
|
qemu_devices_reset();
|
Delay creation of pseries device tree until reset
At present, the 'pseries' machine creates a flattened device tree in the
machine->init function to pass to either the guest kernel or to firmware.
However, the machine->init function runs before processing of -device
command line options, which means that the device tree so created will
be (incorrectly) missing devices specified that way.
Supplying a correct device tree is, in any case, part of the required
platform entry conditions. Therefore, this patch moves the creation and
loading of the device tree from machine->init to a reset callback. The
setup of entry point address and initial register state moves with it,
which leads to a slight cleanup.
This is not, alas, quite enough to make a fully working reset for pseries.
For that we would need to reload the firmware images, which on this
machine are loaded into RAM. It's a step in the right direction, though.
Signed-off-by: David Gibson <dwg@au1.ibm.com>
Signed-off-by: Alexander Graf <agraf@suse.de>
2011-04-05 05:12:10 +00:00
|
|
|
|
spapr: Locate RTAS and device-tree based on real RMA
We currently calculate the final RTAS and FDT location based on
the early estimate of the RMA size, cropped to 256M on KVM since
we only know the real RMA size at reset time which happens much
later in the boot process.
This means the FDT and RTAS end up right below 256M while they
could be much higher, using precious RMA space and limiting
what the OS bootloader can put there which has proved to be
a problem with some OSes (such as when using very large initrd's)
Fortunately, we do the actual copy of the device-tree into guest
memory much later, during reset, late enough to be able to do it
using the final RMA value, we just need to move the calculation
to the right place.
However, RTAS is still loaded too early, so we change the code to
load the tiny blob into qemu memory early on, and then copy it into
guest memory at reset time. It's small enough that the memory usage
doesn't matter.
Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
[aik: fixed errors from checkpatch.pl, defined RTAS_MAX_ADDR]
Signed-off-by: Alexey Kardashevskiy <aik@ozlabs.ru>
[agraf: fix compilation on 32bit hosts]
Signed-off-by: Alexander Graf <agraf@suse.de>
2014-07-21 03:02:04 +00:00
|
|
|
/*
|
|
|
|
* We place the device tree and RTAS just below either the top of the RMA,
|
|
|
|
* or just below 2GB, whichever is lowere, so that it can be
|
|
|
|
* processed with 32-bit real mode code if necessary
|
|
|
|
*/
|
|
|
|
rtas_limit = MIN(spapr->rma_size, RTAS_MAX_ADDR);
|
|
|
|
spapr->rtas_addr = rtas_limit - RTAS_MAX_SIZE;
|
|
|
|
spapr->fdt_addr = spapr->rtas_addr - FDT_MAX_SIZE;
|
|
|
|
|
Delay creation of pseries device tree until reset
At present, the 'pseries' machine creates a flattened device tree in the
machine->init function to pass to either the guest kernel or to firmware.
However, the machine->init function runs before processing of -device
command line options, which means that the device tree so created will
be (incorrectly) missing devices specified that way.
Supplying a correct device tree is, in any case, part of the required
platform entry conditions. Therefore, this patch moves the creation and
loading of the device tree from machine->init to a reset callback. The
setup of entry point address and initial register state moves with it,
which leads to a slight cleanup.
This is not, alas, quite enough to make a fully working reset for pseries.
For that we would need to reload the firmware images, which on this
machine are loaded into RAM. It's a step in the right direction, though.
Signed-off-by: David Gibson <dwg@au1.ibm.com>
Signed-off-by: Alexander Graf <agraf@suse.de>
2011-04-05 05:12:10 +00:00
|
|
|
/* Load the fdt */
|
|
|
|
spapr_finalize_fdt(spapr, spapr->fdt_addr, spapr->rtas_addr,
|
|
|
|
spapr->rtas_size);
|
|
|
|
|
spapr: Locate RTAS and device-tree based on real RMA
We currently calculate the final RTAS and FDT location based on
the early estimate of the RMA size, cropped to 256M on KVM since
we only know the real RMA size at reset time which happens much
later in the boot process.
This means the FDT and RTAS end up right below 256M while they
could be much higher, using precious RMA space and limiting
what the OS bootloader can put there which has proved to be
a problem with some OSes (such as when using very large initrd's)
Fortunately, we do the actual copy of the device-tree into guest
memory much later, during reset, late enough to be able to do it
using the final RMA value, we just need to move the calculation
to the right place.
However, RTAS is still loaded too early, so we change the code to
load the tiny blob into qemu memory early on, and then copy it into
guest memory at reset time. It's small enough that the memory usage
doesn't matter.
Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
[aik: fixed errors from checkpatch.pl, defined RTAS_MAX_ADDR]
Signed-off-by: Alexey Kardashevskiy <aik@ozlabs.ru>
[agraf: fix compilation on 32bit hosts]
Signed-off-by: Alexander Graf <agraf@suse.de>
2014-07-21 03:02:04 +00:00
|
|
|
/* Copy RTAS over */
|
|
|
|
cpu_physical_memory_write(spapr->rtas_addr, spapr->rtas_blob,
|
|
|
|
spapr->rtas_size);
|
|
|
|
|
Delay creation of pseries device tree until reset
At present, the 'pseries' machine creates a flattened device tree in the
machine->init function to pass to either the guest kernel or to firmware.
However, the machine->init function runs before processing of -device
command line options, which means that the device tree so created will
be (incorrectly) missing devices specified that way.
Supplying a correct device tree is, in any case, part of the required
platform entry conditions. Therefore, this patch moves the creation and
loading of the device tree from machine->init to a reset callback. The
setup of entry point address and initial register state moves with it,
which leads to a slight cleanup.
This is not, alas, quite enough to make a fully working reset for pseries.
For that we would need to reload the firmware images, which on this
machine are loaded into RAM. It's a step in the right direction, though.
Signed-off-by: David Gibson <dwg@au1.ibm.com>
Signed-off-by: Alexander Graf <agraf@suse.de>
2011-04-05 05:12:10 +00:00
|
|
|
/* Set up the entry state */
|
2013-05-29 20:29:20 +00:00
|
|
|
first_ppc_cpu = POWERPC_CPU(first_cpu);
|
|
|
|
first_ppc_cpu->env.gpr[3] = spapr->fdt_addr;
|
|
|
|
first_ppc_cpu->env.gpr[5] = 0;
|
|
|
|
first_cpu->halted = 0;
|
2015-07-02 06:23:06 +00:00
|
|
|
first_ppc_cpu->env.nip = SPAPR_ENTRY_POINT;
|
Delay creation of pseries device tree until reset
At present, the 'pseries' machine creates a flattened device tree in the
machine->init function to pass to either the guest kernel or to firmware.
However, the machine->init function runs before processing of -device
command line options, which means that the device tree so created will
be (incorrectly) missing devices specified that way.
Supplying a correct device tree is, in any case, part of the required
platform entry conditions. Therefore, this patch moves the creation and
loading of the device tree from machine->init to a reset callback. The
setup of entry point address and initial register state moves with it,
which leads to a slight cleanup.
This is not, alas, quite enough to make a fully working reset for pseries.
For that we would need to reload the firmware images, which on this
machine are loaded into RAM. It's a step in the right direction, though.
Signed-off-by: David Gibson <dwg@au1.ibm.com>
Signed-off-by: Alexander Graf <agraf@suse.de>
2011-04-05 05:12:10 +00:00
|
|
|
|
|
|
|
}
|
|
|
|
|
2015-07-02 06:23:04 +00:00
|
|
|
static void spapr_create_nvram(sPAPRMachineState *spapr)
|
2012-11-12 16:46:57 +00:00
|
|
|
{
|
2013-07-04 13:09:22 +00:00
|
|
|
DeviceState *dev = qdev_create(&spapr->vio_bus->bus, "spapr-nvram");
|
2013-11-22 09:27:40 +00:00
|
|
|
DriveInfo *dinfo = drive_get(IF_PFLASH, 0, 0);
|
2012-11-12 16:46:57 +00:00
|
|
|
|
2013-11-22 09:27:40 +00:00
|
|
|
if (dinfo) {
|
2015-12-10 16:29:15 +00:00
|
|
|
qdev_prop_set_drive(dev, "drive", blk_by_legacy_dinfo(dinfo),
|
|
|
|
&error_fatal);
|
2012-11-12 16:46:57 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
qdev_init_nofail(dev);
|
|
|
|
|
|
|
|
spapr->nvram = (struct sPAPRNVRAM *)dev;
|
|
|
|
}
|
|
|
|
|
2015-07-02 06:23:04 +00:00
|
|
|
static void spapr_rtc_create(sPAPRMachineState *spapr)
|
2015-02-06 03:55:51 +00:00
|
|
|
{
|
|
|
|
DeviceState *dev = qdev_create(NULL, TYPE_SPAPR_RTC);
|
|
|
|
|
|
|
|
qdev_init_nofail(dev);
|
|
|
|
spapr->rtc = dev;
|
2015-02-06 03:55:53 +00:00
|
|
|
|
|
|
|
object_property_add_alias(qdev_get_machine(), "rtc-time",
|
|
|
|
OBJECT(spapr->rtc), "date", NULL);
|
2015-02-06 03:55:51 +00:00
|
|
|
}
|
|
|
|
|
2012-08-14 11:11:49 +00:00
|
|
|
/* Returns whether we want to use VGA or not */
|
2016-01-20 01:58:39 +00:00
|
|
|
static bool spapr_vga_init(PCIBus *pci_bus, Error **errp)
|
2012-08-06 16:42:00 +00:00
|
|
|
{
|
2012-08-14 11:11:49 +00:00
|
|
|
switch (vga_interface_type) {
|
|
|
|
case VGA_NONE:
|
2014-03-10 14:37:41 +00:00
|
|
|
return false;
|
|
|
|
case VGA_DEVICE:
|
|
|
|
return true;
|
2012-09-08 10:40:45 +00:00
|
|
|
case VGA_STD:
|
2015-09-15 05:51:29 +00:00
|
|
|
case VGA_VIRTIO:
|
2012-09-08 10:40:45 +00:00
|
|
|
return pci_vga_init(pci_bus) != NULL;
|
2012-08-14 11:11:49 +00:00
|
|
|
default:
|
2016-01-20 01:58:39 +00:00
|
|
|
error_setg(errp,
|
|
|
|
"Unsupported VGA mode, only -vga std or -vga virtio is supported");
|
|
|
|
return false;
|
2012-08-06 16:42:00 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-02-06 03:55:52 +00:00
|
|
|
static int spapr_post_load(void *opaque, int version_id)
|
|
|
|
{
|
2015-07-02 06:23:04 +00:00
|
|
|
sPAPRMachineState *spapr = (sPAPRMachineState *)opaque;
|
2015-02-06 03:55:52 +00:00
|
|
|
int err = 0;
|
|
|
|
|
2015-04-09 18:32:39 +00:00
|
|
|
/* In earlier versions, there was no separate qdev for the PAPR
|
2015-02-06 03:55:52 +00:00
|
|
|
* RTC, so the RTC offset was stored directly in sPAPREnvironment.
|
|
|
|
* So when migrating from those versions, poke the incoming offset
|
|
|
|
* value into the RTC device */
|
|
|
|
if (version_id < 3) {
|
|
|
|
err = spapr_rtc_import_offset(spapr->rtc, spapr->rtc_offset);
|
|
|
|
}
|
|
|
|
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool version_before_3(void *opaque, int version_id)
|
|
|
|
{
|
|
|
|
return version_id < 3;
|
|
|
|
}
|
|
|
|
|
2013-07-18 19:33:01 +00:00
|
|
|
static const VMStateDescription vmstate_spapr = {
|
|
|
|
.name = "spapr",
|
2015-02-06 03:55:52 +00:00
|
|
|
.version_id = 3,
|
2013-07-18 19:33:01 +00:00
|
|
|
.minimum_version_id = 1,
|
2015-02-06 03:55:52 +00:00
|
|
|
.post_load = spapr_post_load,
|
2014-04-16 13:24:04 +00:00
|
|
|
.fields = (VMStateField[]) {
|
2015-02-06 03:55:52 +00:00
|
|
|
/* used to be @next_irq */
|
|
|
|
VMSTATE_UNUSED_BUFFER(version_before_3, 0, 4),
|
2013-07-18 19:33:01 +00:00
|
|
|
|
|
|
|
/* RTC offset */
|
2015-07-02 06:23:04 +00:00
|
|
|
VMSTATE_UINT64_TEST(rtc_offset, sPAPRMachineState, version_before_3),
|
2015-02-06 03:55:52 +00:00
|
|
|
|
2015-07-02 06:23:04 +00:00
|
|
|
VMSTATE_PPC_TIMEBASE_V(tb, sPAPRMachineState, 2),
|
2013-07-18 19:33:01 +00:00
|
|
|
VMSTATE_END_OF_LIST()
|
|
|
|
},
|
|
|
|
};
|
|
|
|
|
|
|
|
static int htab_save_setup(QEMUFile *f, void *opaque)
|
|
|
|
{
|
2015-07-02 06:23:04 +00:00
|
|
|
sPAPRMachineState *spapr = opaque;
|
2013-07-18 19:33:01 +00:00
|
|
|
|
|
|
|
/* "Iteration" header */
|
|
|
|
qemu_put_be32(f, spapr->htab_shift);
|
|
|
|
|
2013-07-18 19:33:03 +00:00
|
|
|
if (spapr->htab) {
|
|
|
|
spapr->htab_save_index = 0;
|
|
|
|
spapr->htab_first_pass = true;
|
|
|
|
} else {
|
|
|
|
assert(kvm_enabled());
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2013-07-18 19:33:01 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2015-07-02 06:23:04 +00:00
|
|
|
static void htab_save_first_pass(QEMUFile *f, sPAPRMachineState *spapr,
|
2013-07-18 19:33:01 +00:00
|
|
|
int64_t max_ns)
|
|
|
|
{
|
2016-02-11 00:40:44 +00:00
|
|
|
bool has_timeout = max_ns != -1;
|
2013-07-18 19:33:01 +00:00
|
|
|
int htabslots = HTAB_SIZE(spapr) / HASH_PTE_SIZE_64;
|
|
|
|
int index = spapr->htab_save_index;
|
2013-08-21 15:03:08 +00:00
|
|
|
int64_t starttime = qemu_clock_get_ns(QEMU_CLOCK_REALTIME);
|
2013-07-18 19:33:01 +00:00
|
|
|
|
|
|
|
assert(spapr->htab_first_pass);
|
|
|
|
|
|
|
|
do {
|
|
|
|
int chunkstart;
|
|
|
|
|
|
|
|
/* Consume invalid HPTEs */
|
|
|
|
while ((index < htabslots)
|
|
|
|
&& !HPTE_VALID(HPTE(spapr->htab, index))) {
|
|
|
|
index++;
|
|
|
|
CLEAN_HPTE(HPTE(spapr->htab, index));
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Consume valid HPTEs */
|
|
|
|
chunkstart = index;
|
2014-11-17 04:12:29 +00:00
|
|
|
while ((index < htabslots) && (index - chunkstart < USHRT_MAX)
|
2013-07-18 19:33:01 +00:00
|
|
|
&& HPTE_VALID(HPTE(spapr->htab, index))) {
|
|
|
|
index++;
|
|
|
|
CLEAN_HPTE(HPTE(spapr->htab, index));
|
|
|
|
}
|
|
|
|
|
|
|
|
if (index > chunkstart) {
|
|
|
|
int n_valid = index - chunkstart;
|
|
|
|
|
|
|
|
qemu_put_be32(f, chunkstart);
|
|
|
|
qemu_put_be16(f, n_valid);
|
|
|
|
qemu_put_be16(f, 0);
|
|
|
|
qemu_put_buffer(f, HPTE(spapr->htab, chunkstart),
|
|
|
|
HASH_PTE_SIZE_64 * n_valid);
|
|
|
|
|
2016-02-11 00:40:44 +00:00
|
|
|
if (has_timeout &&
|
|
|
|
(qemu_clock_get_ns(QEMU_CLOCK_REALTIME) - starttime) > max_ns) {
|
2013-07-18 19:33:01 +00:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
} while ((index < htabslots) && !qemu_file_rate_limit(f));
|
|
|
|
|
|
|
|
if (index >= htabslots) {
|
|
|
|
assert(index == htabslots);
|
|
|
|
index = 0;
|
|
|
|
spapr->htab_first_pass = false;
|
|
|
|
}
|
|
|
|
spapr->htab_save_index = index;
|
|
|
|
}
|
|
|
|
|
2015-07-02 06:23:04 +00:00
|
|
|
static int htab_save_later_pass(QEMUFile *f, sPAPRMachineState *spapr,
|
2013-07-18 19:33:03 +00:00
|
|
|
int64_t max_ns)
|
2013-07-18 19:33:01 +00:00
|
|
|
{
|
|
|
|
bool final = max_ns < 0;
|
|
|
|
int htabslots = HTAB_SIZE(spapr) / HASH_PTE_SIZE_64;
|
|
|
|
int examined = 0, sent = 0;
|
|
|
|
int index = spapr->htab_save_index;
|
2013-08-21 15:03:08 +00:00
|
|
|
int64_t starttime = qemu_clock_get_ns(QEMU_CLOCK_REALTIME);
|
2013-07-18 19:33:01 +00:00
|
|
|
|
|
|
|
assert(!spapr->htab_first_pass);
|
|
|
|
|
|
|
|
do {
|
|
|
|
int chunkstart, invalidstart;
|
|
|
|
|
|
|
|
/* Consume non-dirty HPTEs */
|
|
|
|
while ((index < htabslots)
|
|
|
|
&& !HPTE_DIRTY(HPTE(spapr->htab, index))) {
|
|
|
|
index++;
|
|
|
|
examined++;
|
|
|
|
}
|
|
|
|
|
|
|
|
chunkstart = index;
|
|
|
|
/* Consume valid dirty HPTEs */
|
2014-11-17 04:12:29 +00:00
|
|
|
while ((index < htabslots) && (index - chunkstart < USHRT_MAX)
|
2013-07-18 19:33:01 +00:00
|
|
|
&& HPTE_DIRTY(HPTE(spapr->htab, index))
|
|
|
|
&& HPTE_VALID(HPTE(spapr->htab, index))) {
|
|
|
|
CLEAN_HPTE(HPTE(spapr->htab, index));
|
|
|
|
index++;
|
|
|
|
examined++;
|
|
|
|
}
|
|
|
|
|
|
|
|
invalidstart = index;
|
|
|
|
/* Consume invalid dirty HPTEs */
|
2014-11-17 04:12:29 +00:00
|
|
|
while ((index < htabslots) && (index - invalidstart < USHRT_MAX)
|
2013-07-18 19:33:01 +00:00
|
|
|
&& HPTE_DIRTY(HPTE(spapr->htab, index))
|
|
|
|
&& !HPTE_VALID(HPTE(spapr->htab, index))) {
|
|
|
|
CLEAN_HPTE(HPTE(spapr->htab, index));
|
|
|
|
index++;
|
|
|
|
examined++;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (index > chunkstart) {
|
|
|
|
int n_valid = invalidstart - chunkstart;
|
|
|
|
int n_invalid = index - invalidstart;
|
|
|
|
|
|
|
|
qemu_put_be32(f, chunkstart);
|
|
|
|
qemu_put_be16(f, n_valid);
|
|
|
|
qemu_put_be16(f, n_invalid);
|
|
|
|
qemu_put_buffer(f, HPTE(spapr->htab, chunkstart),
|
|
|
|
HASH_PTE_SIZE_64 * n_valid);
|
|
|
|
sent += index - chunkstart;
|
|
|
|
|
2013-08-21 15:03:08 +00:00
|
|
|
if (!final && (qemu_clock_get_ns(QEMU_CLOCK_REALTIME) - starttime) > max_ns) {
|
2013-07-18 19:33:01 +00:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (examined >= htabslots) {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (index >= htabslots) {
|
|
|
|
assert(index == htabslots);
|
|
|
|
index = 0;
|
|
|
|
}
|
|
|
|
} while ((examined < htabslots) && (!qemu_file_rate_limit(f) || final));
|
|
|
|
|
|
|
|
if (index >= htabslots) {
|
|
|
|
assert(index == htabslots);
|
|
|
|
index = 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
spapr->htab_save_index = index;
|
|
|
|
|
2013-07-18 19:33:03 +00:00
|
|
|
return (examined >= htabslots) && (sent == 0) ? 1 : 0;
|
2013-07-18 19:33:01 +00:00
|
|
|
}
|
|
|
|
|
2013-07-18 19:33:03 +00:00
|
|
|
#define MAX_ITERATION_NS 5000000 /* 5 ms */
|
|
|
|
#define MAX_KVM_BUF_SIZE 2048
|
|
|
|
|
2013-07-18 19:33:01 +00:00
|
|
|
static int htab_save_iterate(QEMUFile *f, void *opaque)
|
|
|
|
{
|
2015-07-02 06:23:04 +00:00
|
|
|
sPAPRMachineState *spapr = opaque;
|
2016-02-08 23:28:58 +00:00
|
|
|
int fd;
|
2013-07-18 19:33:03 +00:00
|
|
|
int rc = 0;
|
2013-07-18 19:33:01 +00:00
|
|
|
|
|
|
|
/* Iteration header */
|
|
|
|
qemu_put_be32(f, 0);
|
|
|
|
|
2013-07-18 19:33:03 +00:00
|
|
|
if (!spapr->htab) {
|
|
|
|
assert(kvm_enabled());
|
|
|
|
|
2016-02-08 23:28:58 +00:00
|
|
|
fd = get_htab_fd(spapr);
|
|
|
|
if (fd < 0) {
|
|
|
|
return fd;
|
2014-11-17 04:12:28 +00:00
|
|
|
}
|
|
|
|
|
2016-02-08 23:28:58 +00:00
|
|
|
rc = kvmppc_save_htab(f, fd, MAX_KVM_BUF_SIZE, MAX_ITERATION_NS);
|
2013-07-18 19:33:03 +00:00
|
|
|
if (rc < 0) {
|
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
} else if (spapr->htab_first_pass) {
|
2013-07-18 19:33:01 +00:00
|
|
|
htab_save_first_pass(f, spapr, MAX_ITERATION_NS);
|
|
|
|
} else {
|
2013-07-18 19:33:03 +00:00
|
|
|
rc = htab_save_later_pass(f, spapr, MAX_ITERATION_NS);
|
2013-07-18 19:33:01 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/* End marker */
|
|
|
|
qemu_put_be32(f, 0);
|
|
|
|
qemu_put_be16(f, 0);
|
|
|
|
qemu_put_be16(f, 0);
|
|
|
|
|
2013-07-18 19:33:03 +00:00
|
|
|
return rc;
|
2013-07-18 19:33:01 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static int htab_save_complete(QEMUFile *f, void *opaque)
|
|
|
|
{
|
2015-07-02 06:23:04 +00:00
|
|
|
sPAPRMachineState *spapr = opaque;
|
2016-02-08 23:28:58 +00:00
|
|
|
int fd;
|
2013-07-18 19:33:01 +00:00
|
|
|
|
|
|
|
/* Iteration header */
|
|
|
|
qemu_put_be32(f, 0);
|
|
|
|
|
2013-07-18 19:33:03 +00:00
|
|
|
if (!spapr->htab) {
|
|
|
|
int rc;
|
|
|
|
|
|
|
|
assert(kvm_enabled());
|
|
|
|
|
2016-02-08 23:28:58 +00:00
|
|
|
fd = get_htab_fd(spapr);
|
|
|
|
if (fd < 0) {
|
|
|
|
return fd;
|
2014-11-17 04:12:28 +00:00
|
|
|
}
|
|
|
|
|
2016-02-08 23:28:58 +00:00
|
|
|
rc = kvmppc_save_htab(f, fd, MAX_KVM_BUF_SIZE, -1);
|
2013-07-18 19:33:03 +00:00
|
|
|
if (rc < 0) {
|
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
} else {
|
2016-02-11 00:40:44 +00:00
|
|
|
if (spapr->htab_first_pass) {
|
|
|
|
htab_save_first_pass(f, spapr, -1);
|
|
|
|
}
|
2013-07-18 19:33:03 +00:00
|
|
|
htab_save_later_pass(f, spapr, -1);
|
|
|
|
}
|
2013-07-18 19:33:01 +00:00
|
|
|
|
|
|
|
/* End marker */
|
|
|
|
qemu_put_be32(f, 0);
|
|
|
|
qemu_put_be16(f, 0);
|
|
|
|
qemu_put_be16(f, 0);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int htab_load(QEMUFile *f, void *opaque, int version_id)
|
|
|
|
{
|
2015-07-02 06:23:04 +00:00
|
|
|
sPAPRMachineState *spapr = opaque;
|
2013-07-18 19:33:01 +00:00
|
|
|
uint32_t section_hdr;
|
2013-07-18 19:33:03 +00:00
|
|
|
int fd = -1;
|
2013-07-18 19:33:01 +00:00
|
|
|
|
|
|
|
if (version_id < 1 || version_id > 1) {
|
2016-01-20 01:59:05 +00:00
|
|
|
error_report("htab_load() bad version");
|
2013-07-18 19:33:01 +00:00
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
section_hdr = qemu_get_be32(f);
|
|
|
|
|
|
|
|
if (section_hdr) {
|
2016-02-18 14:40:45 +00:00
|
|
|
Error *local_err = NULL;
|
2016-02-09 00:21:56 +00:00
|
|
|
|
|
|
|
/* First section gives the htab size */
|
|
|
|
spapr_reallocate_hpt(spapr, section_hdr, &local_err);
|
|
|
|
if (local_err) {
|
|
|
|
error_report_err(local_err);
|
2013-07-18 19:33:01 +00:00
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2013-07-18 19:33:03 +00:00
|
|
|
if (!spapr->htab) {
|
|
|
|
assert(kvm_enabled());
|
|
|
|
|
|
|
|
fd = kvmppc_get_htab_fd(true);
|
|
|
|
if (fd < 0) {
|
2016-01-20 01:59:05 +00:00
|
|
|
error_report("Unable to open fd to restore KVM hash table: %s",
|
|
|
|
strerror(errno));
|
2013-07-18 19:33:03 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-07-18 19:33:01 +00:00
|
|
|
while (true) {
|
|
|
|
uint32_t index;
|
|
|
|
uint16_t n_valid, n_invalid;
|
|
|
|
|
|
|
|
index = qemu_get_be32(f);
|
|
|
|
n_valid = qemu_get_be16(f);
|
|
|
|
n_invalid = qemu_get_be16(f);
|
|
|
|
|
|
|
|
if ((index == 0) && (n_valid == 0) && (n_invalid == 0)) {
|
|
|
|
/* End of Stream */
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2013-07-18 19:33:03 +00:00
|
|
|
if ((index + n_valid + n_invalid) >
|
2013-07-18 19:33:01 +00:00
|
|
|
(HTAB_SIZE(spapr) / HASH_PTE_SIZE_64)) {
|
|
|
|
/* Bad index in stream */
|
2016-01-20 01:59:05 +00:00
|
|
|
error_report(
|
|
|
|
"htab_load() bad index %d (%hd+%hd entries) in htab stream (htab_shift=%d)",
|
|
|
|
index, n_valid, n_invalid, spapr->htab_shift);
|
2013-07-18 19:33:01 +00:00
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
2013-07-18 19:33:03 +00:00
|
|
|
if (spapr->htab) {
|
|
|
|
if (n_valid) {
|
|
|
|
qemu_get_buffer(f, HPTE(spapr->htab, index),
|
|
|
|
HASH_PTE_SIZE_64 * n_valid);
|
|
|
|
}
|
|
|
|
if (n_invalid) {
|
|
|
|
memset(HPTE(spapr->htab, index + n_valid), 0,
|
|
|
|
HASH_PTE_SIZE_64 * n_invalid);
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
int rc;
|
|
|
|
|
|
|
|
assert(fd >= 0);
|
|
|
|
|
|
|
|
rc = kvmppc_load_htab_chunk(f, fd, index, n_valid, n_invalid);
|
|
|
|
if (rc < 0) {
|
|
|
|
return rc;
|
|
|
|
}
|
2013-07-18 19:33:01 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-07-18 19:33:03 +00:00
|
|
|
if (!spapr->htab) {
|
|
|
|
assert(fd >= 0);
|
|
|
|
close(fd);
|
|
|
|
}
|
|
|
|
|
2013-07-18 19:33:01 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2016-07-21 09:21:34 +00:00
|
|
|
static void htab_cleanup(void *opaque)
|
|
|
|
{
|
|
|
|
sPAPRMachineState *spapr = opaque;
|
|
|
|
|
|
|
|
close_htab_fd(spapr);
|
|
|
|
}
|
|
|
|
|
2013-07-18 19:33:01 +00:00
|
|
|
static SaveVMHandlers savevm_htab_handlers = {
|
|
|
|
.save_live_setup = htab_save_setup,
|
|
|
|
.save_live_iterate = htab_save_iterate,
|
2015-11-05 18:10:41 +00:00
|
|
|
.save_live_complete_precopy = htab_save_complete,
|
2016-07-21 09:21:34 +00:00
|
|
|
.cleanup = htab_cleanup,
|
2013-07-18 19:33:01 +00:00
|
|
|
.load_state = htab_load,
|
|
|
|
};
|
|
|
|
|
2015-03-18 12:30:44 +00:00
|
|
|
static void spapr_boot_set(void *opaque, const char *boot_device,
|
|
|
|
Error **errp)
|
|
|
|
{
|
|
|
|
MachineState *machine = MACHINE(qdev_get_machine());
|
|
|
|
machine->boot_order = g_strdup(boot_device);
|
|
|
|
}
|
|
|
|
|
2015-08-12 03:16:48 +00:00
|
|
|
/*
|
|
|
|
* Reset routine for LMB DR devices.
|
|
|
|
*
|
|
|
|
* Unlike PCI DR devices, LMB DR devices explicitly register this reset
|
|
|
|
* routine. Reset for PCI DR devices will be handled by PHB reset routine
|
|
|
|
* when it walks all its children devices. LMB devices reset occurs
|
|
|
|
* as part of spapr_ppc_reset().
|
|
|
|
*/
|
|
|
|
static void spapr_drc_reset(void *opaque)
|
|
|
|
{
|
|
|
|
sPAPRDRConnector *drc = opaque;
|
|
|
|
DeviceState *d = DEVICE(drc);
|
|
|
|
|
|
|
|
if (d) {
|
|
|
|
device_reset(d);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static void spapr_create_lmb_dr_connectors(sPAPRMachineState *spapr)
|
|
|
|
{
|
|
|
|
MachineState *machine = MACHINE(spapr);
|
|
|
|
uint64_t lmb_size = SPAPR_MEMORY_BLOCK_SIZE;
|
2015-08-03 05:35:41 +00:00
|
|
|
uint32_t nr_lmbs = (machine->maxram_size - machine->ram_size)/lmb_size;
|
2015-08-12 03:16:48 +00:00
|
|
|
int i;
|
|
|
|
|
|
|
|
for (i = 0; i < nr_lmbs; i++) {
|
|
|
|
sPAPRDRConnector *drc;
|
|
|
|
uint64_t addr;
|
|
|
|
|
2015-08-03 05:35:41 +00:00
|
|
|
addr = i * lmb_size + spapr->hotplug_memory.base;
|
2015-08-12 03:16:48 +00:00
|
|
|
drc = spapr_dr_connector_new(OBJECT(spapr), SPAPR_DR_CONNECTOR_TYPE_LMB,
|
|
|
|
addr/lmb_size);
|
|
|
|
qemu_register_reset(spapr_drc_reset, drc);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If RAM size, maxmem size and individual node mem sizes aren't aligned
|
|
|
|
* to SPAPR_MEMORY_BLOCK_SIZE(256MB), then refuse to start the guest
|
|
|
|
* since we can't support such unaligned sizes with DRCONF_MEMORY.
|
|
|
|
*/
|
2016-01-25 11:46:47 +00:00
|
|
|
static void spapr_validate_node_memory(MachineState *machine, Error **errp)
|
2015-08-12 03:16:48 +00:00
|
|
|
{
|
|
|
|
int i;
|
|
|
|
|
2016-01-25 11:46:47 +00:00
|
|
|
if (machine->ram_size % SPAPR_MEMORY_BLOCK_SIZE) {
|
|
|
|
error_setg(errp, "Memory size 0x" RAM_ADDR_FMT
|
|
|
|
" is not aligned to %llu MiB",
|
|
|
|
machine->ram_size,
|
|
|
|
SPAPR_MEMORY_BLOCK_SIZE / M_BYTE);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (machine->maxram_size % SPAPR_MEMORY_BLOCK_SIZE) {
|
|
|
|
error_setg(errp, "Maximum memory size 0x" RAM_ADDR_FMT
|
|
|
|
" is not aligned to %llu MiB",
|
|
|
|
machine->ram_size,
|
|
|
|
SPAPR_MEMORY_BLOCK_SIZE / M_BYTE);
|
|
|
|
return;
|
2015-08-12 03:16:48 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
for (i = 0; i < nb_numa_nodes; i++) {
|
|
|
|
if (numa_info[i].node_mem % SPAPR_MEMORY_BLOCK_SIZE) {
|
2016-01-25 11:46:47 +00:00
|
|
|
error_setg(errp,
|
|
|
|
"Node %d memory size 0x%" PRIx64
|
|
|
|
" is not aligned to %llu MiB",
|
|
|
|
i, numa_info[i].node_mem,
|
|
|
|
SPAPR_MEMORY_BLOCK_SIZE / M_BYTE);
|
|
|
|
return;
|
2015-08-12 03:16:48 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2011-04-01 04:15:20 +00:00
|
|
|
/* pSeries LPAR / sPAPR hardware init */
|
2014-05-07 14:42:57 +00:00
|
|
|
static void ppc_spapr_init(MachineState *machine)
|
2011-04-01 04:15:20 +00:00
|
|
|
{
|
2015-07-02 06:23:04 +00:00
|
|
|
sPAPRMachineState *spapr = SPAPR_MACHINE(machine);
|
2016-08-05 06:25:33 +00:00
|
|
|
MachineClass *mc = MACHINE_GET_CLASS(machine);
|
2015-08-12 03:16:48 +00:00
|
|
|
sPAPRMachineClass *smc = SPAPR_MACHINE_GET_CLASS(machine);
|
2014-05-07 14:42:57 +00:00
|
|
|
const char *kernel_filename = machine->kernel_filename;
|
|
|
|
const char *kernel_cmdline = machine->kernel_cmdline;
|
|
|
|
const char *initrd_filename = machine->initrd_filename;
|
2012-08-20 17:08:05 +00:00
|
|
|
PCIHostState *phb;
|
2011-04-01 04:15:20 +00:00
|
|
|
int i;
|
2011-10-03 10:56:38 +00:00
|
|
|
MemoryRegion *sysmem = get_system_memory();
|
|
|
|
MemoryRegion *ram = g_new(MemoryRegion, 1);
|
2014-07-10 15:03:41 +00:00
|
|
|
MemoryRegion *rma_region;
|
|
|
|
void *rma = NULL;
|
2012-10-23 10:30:10 +00:00
|
|
|
hwaddr rma_alloc_size;
|
2014-07-03 03:10:06 +00:00
|
|
|
hwaddr node0_size = spapr_node0_size();
|
2012-01-11 19:46:28 +00:00
|
|
|
uint32_t initrd_base = 0;
|
|
|
|
long kernel_size = 0, initrd_size = 0;
|
spapr: Locate RTAS and device-tree based on real RMA
We currently calculate the final RTAS and FDT location based on
the early estimate of the RMA size, cropped to 256M on KVM since
we only know the real RMA size at reset time which happens much
later in the boot process.
This means the FDT and RTAS end up right below 256M while they
could be much higher, using precious RMA space and limiting
what the OS bootloader can put there which has proved to be
a problem with some OSes (such as when using very large initrd's)
Fortunately, we do the actual copy of the device-tree into guest
memory much later, during reset, late enough to be able to do it
using the final RMA value, we just need to move the calculation
to the right place.
However, RTAS is still loaded too early, so we change the code to
load the tiny blob into qemu memory early on, and then copy it into
guest memory at reset time. It's small enough that the memory usage
doesn't matter.
Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
[aik: fixed errors from checkpatch.pl, defined RTAS_MAX_ADDR]
Signed-off-by: Alexey Kardashevskiy <aik@ozlabs.ru>
[agraf: fix compilation on 32bit hosts]
Signed-off-by: Alexander Graf <agraf@suse.de>
2014-07-21 03:02:04 +00:00
|
|
|
long load_limit, fw_size;
|
2013-09-25 07:40:15 +00:00
|
|
|
bool kernel_le = false;
|
2011-04-01 04:15:23 +00:00
|
|
|
char *filename;
|
2016-06-10 00:59:03 +00:00
|
|
|
int smt = kvmppc_smt_threads();
|
|
|
|
int spapr_cores = smp_cpus / smp_threads;
|
|
|
|
int spapr_max_cores = max_cpus / smp_threads;
|
|
|
|
|
2016-08-05 06:25:33 +00:00
|
|
|
if (mc->query_hotpluggable_cpus) {
|
2016-06-10 00:59:03 +00:00
|
|
|
if (smp_cpus % smp_threads) {
|
|
|
|
error_report("smp_cpus (%u) must be multiple of threads (%u)",
|
|
|
|
smp_cpus, smp_threads);
|
|
|
|
exit(1);
|
|
|
|
}
|
|
|
|
if (max_cpus % smp_threads) {
|
|
|
|
error_report("max_cpus (%u) must be multiple of threads (%u)",
|
|
|
|
max_cpus, smp_threads);
|
|
|
|
exit(1);
|
|
|
|
}
|
|
|
|
}
|
2011-04-01 04:15:20 +00:00
|
|
|
|
2016-03-04 09:24:28 +00:00
|
|
|
msi_nonbroken = true;
|
2012-08-07 16:10:37 +00:00
|
|
|
|
2011-11-01 16:49:05 +00:00
|
|
|
QLIST_INIT(&spapr->phbs);
|
|
|
|
|
2011-04-01 04:15:20 +00:00
|
|
|
cpu_ppc_hypercall = emulate_spapr_hypercall;
|
|
|
|
|
2011-09-29 21:39:11 +00:00
|
|
|
/* Allocate RMA if necessary */
|
2014-07-10 15:03:41 +00:00
|
|
|
rma_alloc_size = kvmppc_alloc_rma(&rma);
|
2011-09-29 21:39:11 +00:00
|
|
|
|
|
|
|
if (rma_alloc_size == -1) {
|
2015-05-07 05:33:41 +00:00
|
|
|
error_report("Unable to create RMA");
|
2011-09-29 21:39:11 +00:00
|
|
|
exit(1);
|
|
|
|
}
|
2012-09-12 16:57:12 +00:00
|
|
|
|
2013-11-25 03:14:50 +00:00
|
|
|
if (rma_alloc_size && (rma_alloc_size < node0_size)) {
|
2012-09-12 16:57:12 +00:00
|
|
|
spapr->rma_size = rma_alloc_size;
|
2011-09-29 21:39:11 +00:00
|
|
|
} else {
|
2013-11-25 03:14:50 +00:00
|
|
|
spapr->rma_size = node0_size;
|
2012-09-12 16:57:12 +00:00
|
|
|
|
|
|
|
/* With KVM, we don't actually know whether KVM supports an
|
|
|
|
* unbounded RMA (PR KVM) or is limited by the hash table size
|
|
|
|
* (HV KVM using VRMA), so we always assume the latter
|
|
|
|
*
|
|
|
|
* In that case, we also limit the initial allocations for RTAS
|
|
|
|
* etc... to 256M since we have no way to know what the VRMA size
|
|
|
|
* is going to be as it depends on the size of the hash table
|
|
|
|
* isn't determined yet.
|
|
|
|
*/
|
|
|
|
if (kvm_enabled()) {
|
|
|
|
spapr->vrma_adjust = 1;
|
|
|
|
spapr->rma_size = MIN(spapr->rma_size, 0x10000000);
|
|
|
|
}
|
2016-07-04 21:37:08 +00:00
|
|
|
|
|
|
|
/* Actually we don't support unbounded RMA anymore since we
|
|
|
|
* added proper emulation of HV mode. The max we can get is
|
|
|
|
* 16G which also happens to be what we configure for PAPR
|
|
|
|
* mode so make sure we don't do anything bigger than that
|
|
|
|
*/
|
|
|
|
spapr->rma_size = MIN(spapr->rma_size, 0x400000000ull);
|
2011-09-29 21:39:11 +00:00
|
|
|
}
|
|
|
|
|
2013-11-25 03:14:50 +00:00
|
|
|
if (spapr->rma_size > node0_size) {
|
2016-01-20 01:58:55 +00:00
|
|
|
error_report("Numa node 0 has to span the RMA (%#08"HWADDR_PRIx")",
|
|
|
|
spapr->rma_size);
|
2013-11-25 03:14:50 +00:00
|
|
|
exit(1);
|
|
|
|
}
|
|
|
|
|
spapr: Locate RTAS and device-tree based on real RMA
We currently calculate the final RTAS and FDT location based on
the early estimate of the RMA size, cropped to 256M on KVM since
we only know the real RMA size at reset time which happens much
later in the boot process.
This means the FDT and RTAS end up right below 256M while they
could be much higher, using precious RMA space and limiting
what the OS bootloader can put there which has proved to be
a problem with some OSes (such as when using very large initrd's)
Fortunately, we do the actual copy of the device-tree into guest
memory much later, during reset, late enough to be able to do it
using the final RMA value, we just need to move the calculation
to the right place.
However, RTAS is still loaded too early, so we change the code to
load the tiny blob into qemu memory early on, and then copy it into
guest memory at reset time. It's small enough that the memory usage
doesn't matter.
Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
[aik: fixed errors from checkpatch.pl, defined RTAS_MAX_ADDR]
Signed-off-by: Alexey Kardashevskiy <aik@ozlabs.ru>
[agraf: fix compilation on 32bit hosts]
Signed-off-by: Alexander Graf <agraf@suse.de>
2014-07-21 03:02:04 +00:00
|
|
|
/* Setup a load limit for the ramdisk leaving room for SLOF and FDT */
|
|
|
|
load_limit = MIN(spapr->rma_size, RTAS_MAX_ADDR) - FW_OVERHEAD;
|
2011-04-01 04:15:20 +00:00
|
|
|
|
2013-03-13 15:53:28 +00:00
|
|
|
/* Set up Interrupt Controller before we create the VCPUs */
|
2016-06-28 19:05:15 +00:00
|
|
|
spapr->xics = xics_system_init(machine,
|
|
|
|
DIV_ROUND_UP(max_cpus * smt, smp_threads),
|
|
|
|
XICS_IRQS_SPAPR, &error_fatal);
|
2013-03-13 15:53:28 +00:00
|
|
|
|
2015-08-12 03:16:48 +00:00
|
|
|
if (smc->dr_lmb_enabled) {
|
2016-01-25 11:46:47 +00:00
|
|
|
spapr_validate_node_memory(machine, &error_fatal);
|
2015-08-12 03:16:48 +00:00
|
|
|
}
|
|
|
|
|
2011-04-01 04:15:20 +00:00
|
|
|
/* init CPUs */
|
2015-07-02 06:23:19 +00:00
|
|
|
if (machine->cpu_model == NULL) {
|
|
|
|
machine->cpu_model = kvm_enabled() ? "host" : "POWER7";
|
2011-04-01 04:15:20 +00:00
|
|
|
}
|
2016-06-10 00:59:03 +00:00
|
|
|
|
2016-08-10 19:08:01 +00:00
|
|
|
ppc_cpu_parse_features(machine->cpu_model);
|
|
|
|
|
2016-08-05 06:25:33 +00:00
|
|
|
if (mc->query_hotpluggable_cpus) {
|
2016-06-10 00:59:03 +00:00
|
|
|
char *type = spapr_get_cpu_core_type(machine->cpu_model);
|
|
|
|
|
2016-08-09 16:59:59 +00:00
|
|
|
if (type == NULL) {
|
2016-08-09 16:59:57 +00:00
|
|
|
error_report("Unable to find sPAPR CPU Core definition");
|
|
|
|
exit(1);
|
|
|
|
}
|
|
|
|
|
2016-06-10 00:59:03 +00:00
|
|
|
spapr->cores = g_new0(Object *, spapr_max_cores);
|
2016-06-10 00:59:04 +00:00
|
|
|
for (i = 0; i < spapr_max_cores; i++) {
|
2016-07-22 11:10:36 +00:00
|
|
|
int core_id = i * smp_threads;
|
2016-06-10 00:59:04 +00:00
|
|
|
sPAPRDRConnector *drc =
|
|
|
|
spapr_dr_connector_new(OBJECT(spapr),
|
2016-07-22 11:10:36 +00:00
|
|
|
SPAPR_DR_CONNECTOR_TYPE_CPU,
|
|
|
|
(core_id / smp_threads) * smt);
|
2016-06-10 00:59:04 +00:00
|
|
|
|
|
|
|
qemu_register_reset(spapr_drc_reset, drc);
|
|
|
|
|
|
|
|
if (i < spapr_cores) {
|
2016-08-09 16:59:57 +00:00
|
|
|
Object *core = object_new(type);
|
2016-06-10 00:59:04 +00:00
|
|
|
object_property_set_int(core, smp_threads, "nr-threads",
|
|
|
|
&error_fatal);
|
2016-07-22 11:10:36 +00:00
|
|
|
object_property_set_int(core, core_id, CPU_CORE_PROP_CORE_ID,
|
2016-06-10 00:59:04 +00:00
|
|
|
&error_fatal);
|
|
|
|
object_property_set_bool(core, true, "realized", &error_fatal);
|
2016-06-10 00:59:03 +00:00
|
|
|
}
|
2011-04-01 04:15:20 +00:00
|
|
|
}
|
2016-06-10 00:59:03 +00:00
|
|
|
g_free(type);
|
|
|
|
} else {
|
|
|
|
for (i = 0; i < smp_cpus; i++) {
|
|
|
|
PowerPCCPU *cpu = cpu_ppc_init(machine->cpu_model);
|
|
|
|
if (cpu == NULL) {
|
|
|
|
error_report("Unable to find PowerPC CPU definition");
|
|
|
|
exit(1);
|
|
|
|
}
|
|
|
|
spapr_cpu_init(spapr, cpu, &error_fatal);
|
|
|
|
}
|
2011-04-01 04:15:20 +00:00
|
|
|
}
|
|
|
|
|
2015-05-07 05:33:59 +00:00
|
|
|
if (kvm_enabled()) {
|
|
|
|
/* Enable H_LOGICAL_CI_* so SLOF can talk to in-kernel devices */
|
|
|
|
kvmppc_enable_logical_ci_hcalls();
|
2015-09-08 01:25:13 +00:00
|
|
|
kvmppc_enable_set_mode_hcall();
|
2015-05-07 05:33:59 +00:00
|
|
|
}
|
|
|
|
|
2011-04-01 04:15:20 +00:00
|
|
|
/* allocate RAM */
|
2014-07-10 15:03:42 +00:00
|
|
|
memory_region_allocate_system_memory(ram, NULL, "ppc_spapr.ram",
|
2015-07-02 06:23:05 +00:00
|
|
|
machine->ram_size);
|
2014-07-10 15:03:42 +00:00
|
|
|
memory_region_add_subregion(sysmem, 0, ram);
|
2011-04-01 04:15:20 +00:00
|
|
|
|
2014-07-10 15:03:41 +00:00
|
|
|
if (rma_alloc_size && rma) {
|
|
|
|
rma_region = g_new(MemoryRegion, 1);
|
|
|
|
memory_region_init_ram_ptr(rma_region, NULL, "ppc_spapr.rma",
|
|
|
|
rma_alloc_size, rma);
|
|
|
|
vmstate_register_ram_global(rma_region);
|
|
|
|
memory_region_add_subregion(sysmem, 0, rma_region);
|
|
|
|
}
|
|
|
|
|
2015-06-29 08:44:27 +00:00
|
|
|
/* initialize hotplug memory address space */
|
|
|
|
if (machine->ram_size < machine->maxram_size) {
|
|
|
|
ram_addr_t hotplug_mem_size = machine->maxram_size - machine->ram_size;
|
2016-06-02 14:07:37 +00:00
|
|
|
/*
|
|
|
|
* Limit the number of hotpluggable memory slots to half the number
|
|
|
|
* slots that KVM supports, leaving the other half for PCI and other
|
|
|
|
* devices. However ensure that number of slots doesn't drop below 32.
|
|
|
|
*/
|
|
|
|
int max_memslots = kvm_enabled() ? kvm_get_max_memslots() / 2 :
|
|
|
|
SPAPR_MAX_RAM_SLOTS;
|
2015-06-29 08:44:27 +00:00
|
|
|
|
2016-06-02 14:07:37 +00:00
|
|
|
if (max_memslots < SPAPR_MAX_RAM_SLOTS) {
|
|
|
|
max_memslots = SPAPR_MAX_RAM_SLOTS;
|
|
|
|
}
|
|
|
|
if (machine->ram_slots > max_memslots) {
|
2016-01-20 01:58:55 +00:00
|
|
|
error_report("Specified number of memory slots %"
|
|
|
|
PRIu64" exceeds max supported %d",
|
2016-06-02 14:07:37 +00:00
|
|
|
machine->ram_slots, max_memslots);
|
2016-01-20 01:58:55 +00:00
|
|
|
exit(1);
|
2015-06-29 08:44:27 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
spapr->hotplug_memory.base = ROUND_UP(machine->ram_size,
|
|
|
|
SPAPR_HOTPLUG_MEM_ALIGN);
|
|
|
|
memory_region_init(&spapr->hotplug_memory.mr, OBJECT(spapr),
|
|
|
|
"hotplug-memory", hotplug_mem_size);
|
|
|
|
memory_region_add_subregion(sysmem, spapr->hotplug_memory.base,
|
|
|
|
&spapr->hotplug_memory.mr);
|
|
|
|
}
|
|
|
|
|
2015-08-12 03:16:48 +00:00
|
|
|
if (smc->dr_lmb_enabled) {
|
|
|
|
spapr_create_lmb_dr_connectors(spapr);
|
|
|
|
}
|
|
|
|
|
2011-04-01 04:15:23 +00:00
|
|
|
filename = qemu_find_file(QEMU_FILE_TYPE_BIOS, "spapr-rtas.bin");
|
2015-03-14 15:29:09 +00:00
|
|
|
if (!filename) {
|
2015-05-07 05:33:41 +00:00
|
|
|
error_report("Could not find LPAR rtas '%s'", "spapr-rtas.bin");
|
2015-03-14 15:29:09 +00:00
|
|
|
exit(1);
|
|
|
|
}
|
spapr: Locate RTAS and device-tree based on real RMA
We currently calculate the final RTAS and FDT location based on
the early estimate of the RMA size, cropped to 256M on KVM since
we only know the real RMA size at reset time which happens much
later in the boot process.
This means the FDT and RTAS end up right below 256M while they
could be much higher, using precious RMA space and limiting
what the OS bootloader can put there which has proved to be
a problem with some OSes (such as when using very large initrd's)
Fortunately, we do the actual copy of the device-tree into guest
memory much later, during reset, late enough to be able to do it
using the final RMA value, we just need to move the calculation
to the right place.
However, RTAS is still loaded too early, so we change the code to
load the tiny blob into qemu memory early on, and then copy it into
guest memory at reset time. It's small enough that the memory usage
doesn't matter.
Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
[aik: fixed errors from checkpatch.pl, defined RTAS_MAX_ADDR]
Signed-off-by: Alexey Kardashevskiy <aik@ozlabs.ru>
[agraf: fix compilation on 32bit hosts]
Signed-off-by: Alexander Graf <agraf@suse.de>
2014-07-21 03:02:04 +00:00
|
|
|
spapr->rtas_size = get_image_size(filename);
|
2016-04-25 15:36:06 +00:00
|
|
|
if (spapr->rtas_size < 0) {
|
|
|
|
error_report("Could not get size of LPAR rtas '%s'", filename);
|
|
|
|
exit(1);
|
|
|
|
}
|
spapr: Locate RTAS and device-tree based on real RMA
We currently calculate the final RTAS and FDT location based on
the early estimate of the RMA size, cropped to 256M on KVM since
we only know the real RMA size at reset time which happens much
later in the boot process.
This means the FDT and RTAS end up right below 256M while they
could be much higher, using precious RMA space and limiting
what the OS bootloader can put there which has proved to be
a problem with some OSes (such as when using very large initrd's)
Fortunately, we do the actual copy of the device-tree into guest
memory much later, during reset, late enough to be able to do it
using the final RMA value, we just need to move the calculation
to the right place.
However, RTAS is still loaded too early, so we change the code to
load the tiny blob into qemu memory early on, and then copy it into
guest memory at reset time. It's small enough that the memory usage
doesn't matter.
Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
[aik: fixed errors from checkpatch.pl, defined RTAS_MAX_ADDR]
Signed-off-by: Alexey Kardashevskiy <aik@ozlabs.ru>
[agraf: fix compilation on 32bit hosts]
Signed-off-by: Alexander Graf <agraf@suse.de>
2014-07-21 03:02:04 +00:00
|
|
|
spapr->rtas_blob = g_malloc(spapr->rtas_size);
|
|
|
|
if (load_image_size(filename, spapr->rtas_blob, spapr->rtas_size) < 0) {
|
2015-05-07 05:33:41 +00:00
|
|
|
error_report("Could not load LPAR rtas '%s'", filename);
|
2011-04-01 04:15:23 +00:00
|
|
|
exit(1);
|
|
|
|
}
|
2012-01-11 19:46:28 +00:00
|
|
|
if (spapr->rtas_size > RTAS_MAX_SIZE) {
|
2015-05-07 05:33:41 +00:00
|
|
|
error_report("RTAS too big ! 0x%zx bytes (max is 0x%x)",
|
|
|
|
(size_t)spapr->rtas_size, RTAS_MAX_SIZE);
|
2012-01-11 19:46:28 +00:00
|
|
|
exit(1);
|
|
|
|
}
|
2011-08-21 03:09:37 +00:00
|
|
|
g_free(filename);
|
2011-04-01 04:15:23 +00:00
|
|
|
|
2012-10-08 18:17:39 +00:00
|
|
|
/* Set up EPOW events infrastructure */
|
|
|
|
spapr_events_init(spapr);
|
|
|
|
|
2015-02-06 03:55:47 +00:00
|
|
|
/* Set up the RTC RTAS interfaces */
|
2015-02-06 03:55:51 +00:00
|
|
|
spapr_rtc_create(spapr);
|
2015-02-06 03:55:47 +00:00
|
|
|
|
Implement the PAPR (pSeries) virtualized interrupt controller (xics)
PAPR defines an interrupt control architecture which is logically divided
into ICS (Interrupt Control Presentation, each unit is responsible for
presenting interrupts to a particular "interrupt server", i.e. CPU) and
ICS (Interrupt Control Source, each unit responsible for one or more
hardware interrupts as numbered globally across the system). All PAPR
virtual IO devices expect to deliver interrupts via this mechanism. In
Linux, this interrupt controller system is handled by the "xics" driver.
On pSeries systems, access to the interrupt controller is virtualized via
hypercalls and RTAS methods. However, the virtualized interface is very
similar to the underlying interrupt controller hardware, and similar PICs
exist un-virtualized in some other systems.
This patch implements both the ICP and ICS sides of the PAPR interrupt
controller. For now, only the hypercall virtualized interface is provided,
however it would be relatively straightforward to graft an emulated
register interface onto the underlying interrupt logic if we want to add
a machine with a hardware ICS/ICP system in the future.
There are some limitations in this implementation: it is assumed for now
that only one instance of the ICS exists, although a full xics system can
have several, each responsible for a different group of hardware irqs.
ICP/ICS can handle both level-sensitve (LSI) and message signalled (MSI)
interrupt inputs. For now, this implementation supports only MSI
interrupts, since that is used by PAPR virtual IO devices.
Signed-off-by: Paul Mackerras <paulus@samba.org>
Signed-off-by: David Gibson <dwg@au1.ibm.com>
Signed-off-by: Alexander Graf <agraf@suse.de>
2011-04-01 04:15:25 +00:00
|
|
|
/* Set up VIO bus */
|
2011-04-01 04:15:21 +00:00
|
|
|
spapr->vio_bus = spapr_vio_bus_init();
|
|
|
|
|
2011-05-26 09:52:44 +00:00
|
|
|
for (i = 0; i < MAX_SERIAL_PORTS; i++) {
|
2011-04-01 04:15:21 +00:00
|
|
|
if (serial_hds[i]) {
|
2012-04-25 17:55:41 +00:00
|
|
|
spapr_vty_create(spapr->vio_bus, serial_hds[i]);
|
2011-04-01 04:15:21 +00:00
|
|
|
}
|
|
|
|
}
|
2011-04-01 04:15:20 +00:00
|
|
|
|
2012-11-12 16:46:57 +00:00
|
|
|
/* We always have at least the nvram device on VIO */
|
|
|
|
spapr_create_nvram(spapr);
|
|
|
|
|
2011-10-30 17:16:46 +00:00
|
|
|
/* Set up PCI */
|
2012-08-07 16:10:33 +00:00
|
|
|
spapr_pci_rtas_init();
|
|
|
|
|
2013-03-13 15:53:25 +00:00
|
|
|
phb = spapr_create_phb(spapr, 0);
|
2011-10-30 17:16:46 +00:00
|
|
|
|
2011-05-26 09:52:44 +00:00
|
|
|
for (i = 0; i < nb_nics; i++) {
|
2011-04-01 04:15:29 +00:00
|
|
|
NICInfo *nd = &nd_table[i];
|
|
|
|
|
|
|
|
if (!nd->model) {
|
2011-08-21 03:09:37 +00:00
|
|
|
nd->model = g_strdup("ibmveth");
|
2011-04-01 04:15:29 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
if (strcmp(nd->model, "ibmveth") == 0) {
|
2012-04-25 17:55:41 +00:00
|
|
|
spapr_vlan_create(spapr->vio_bus, nd);
|
2011-04-01 04:15:29 +00:00
|
|
|
} else {
|
2013-06-06 08:48:51 +00:00
|
|
|
pci_nic_init_nofail(&nd_table[i], phb->bus, nd->model, NULL);
|
2011-04-01 04:15:29 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2011-04-01 04:15:31 +00:00
|
|
|
for (i = 0; i <= drive_get_max_bus(IF_SCSI); i++) {
|
2012-04-25 17:55:41 +00:00
|
|
|
spapr_vscsi_create(spapr->vio_bus);
|
2011-04-01 04:15:31 +00:00
|
|
|
}
|
|
|
|
|
2012-08-06 16:42:00 +00:00
|
|
|
/* Graphics */
|
2016-01-20 01:58:39 +00:00
|
|
|
if (spapr_vga_init(phb->bus, &error_fatal)) {
|
2012-08-14 11:22:13 +00:00
|
|
|
spapr->has_graphics = true;
|
2015-03-23 17:05:28 +00:00
|
|
|
machine->usb |= defaults_enabled() && !machine->usb_disabled;
|
2012-08-06 16:42:00 +00:00
|
|
|
}
|
|
|
|
|
2015-01-06 13:29:16 +00:00
|
|
|
if (machine->usb) {
|
2015-12-09 12:34:13 +00:00
|
|
|
if (smc->use_ohci_by_default) {
|
|
|
|
pci_create_simple(phb->bus, -1, "pci-ohci");
|
|
|
|
} else {
|
|
|
|
pci_create_simple(phb->bus, -1, "nec-usb-xhci");
|
|
|
|
}
|
2015-02-04 12:28:14 +00:00
|
|
|
|
2012-08-16 02:03:56 +00:00
|
|
|
if (spapr->has_graphics) {
|
2015-02-04 12:28:14 +00:00
|
|
|
USBBus *usb_bus = usb_bus_find(-1);
|
|
|
|
|
|
|
|
usb_create_simple(usb_bus, "usb-kbd");
|
|
|
|
usb_create_simple(usb_bus, "usb-mouse");
|
2012-08-16 02:03:56 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2012-09-12 16:57:12 +00:00
|
|
|
if (spapr->rma_size < (MIN_RMA_SLOF << 20)) {
|
2016-01-20 01:58:55 +00:00
|
|
|
error_report(
|
|
|
|
"pSeries SLOF firmware requires >= %ldM guest RMA (Real Mode Area memory)",
|
|
|
|
MIN_RMA_SLOF);
|
2012-01-11 19:46:28 +00:00
|
|
|
exit(1);
|
|
|
|
}
|
|
|
|
|
2011-04-01 04:15:20 +00:00
|
|
|
if (kernel_filename) {
|
|
|
|
uint64_t lowaddr = 0;
|
|
|
|
|
|
|
|
kernel_size = load_elf(kernel_filename, translate_kernel_address, NULL,
|
2016-03-04 11:30:21 +00:00
|
|
|
NULL, &lowaddr, NULL, 1, PPC_ELF_MACHINE,
|
|
|
|
0, 0);
|
2014-02-04 04:04:19 +00:00
|
|
|
if (kernel_size == ELF_LOAD_WRONG_ENDIAN) {
|
2013-09-25 07:40:15 +00:00
|
|
|
kernel_size = load_elf(kernel_filename,
|
|
|
|
translate_kernel_address, NULL,
|
2016-03-04 11:30:21 +00:00
|
|
|
NULL, &lowaddr, NULL, 0, PPC_ELF_MACHINE,
|
|
|
|
0, 0);
|
2013-09-25 07:40:15 +00:00
|
|
|
kernel_le = kernel_size > 0;
|
|
|
|
}
|
2011-04-01 04:15:20 +00:00
|
|
|
if (kernel_size < 0) {
|
2016-01-20 01:58:55 +00:00
|
|
|
error_report("error loading %s: %s",
|
|
|
|
kernel_filename, load_elf_strerror(kernel_size));
|
2011-04-01 04:15:20 +00:00
|
|
|
exit(1);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* load initrd */
|
|
|
|
if (initrd_filename) {
|
2012-01-11 19:46:28 +00:00
|
|
|
/* Try to locate the initrd in the gap between the kernel
|
|
|
|
* and the firmware. Add a bit of space just in case
|
|
|
|
*/
|
|
|
|
initrd_base = (KERNEL_LOAD_ADDR + kernel_size + 0x1ffff) & ~0xffff;
|
2011-04-01 04:15:20 +00:00
|
|
|
initrd_size = load_image_targphys(initrd_filename, initrd_base,
|
2012-01-11 19:46:28 +00:00
|
|
|
load_limit - initrd_base);
|
2011-04-01 04:15:20 +00:00
|
|
|
if (initrd_size < 0) {
|
2016-01-20 01:58:55 +00:00
|
|
|
error_report("could not load initial ram disk '%s'",
|
|
|
|
initrd_filename);
|
2011-04-01 04:15:20 +00:00
|
|
|
exit(1);
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
initrd_base = 0;
|
|
|
|
initrd_size = 0;
|
|
|
|
}
|
2012-01-11 19:46:28 +00:00
|
|
|
}
|
Delay creation of pseries device tree until reset
At present, the 'pseries' machine creates a flattened device tree in the
machine->init function to pass to either the guest kernel or to firmware.
However, the machine->init function runs before processing of -device
command line options, which means that the device tree so created will
be (incorrectly) missing devices specified that way.
Supplying a correct device tree is, in any case, part of the required
platform entry conditions. Therefore, this patch moves the creation and
loading of the device tree from machine->init to a reset callback. The
setup of entry point address and initial register state moves with it,
which leads to a slight cleanup.
This is not, alas, quite enough to make a fully working reset for pseries.
For that we would need to reload the firmware images, which on this
machine are loaded into RAM. It's a step in the right direction, though.
Signed-off-by: David Gibson <dwg@au1.ibm.com>
Signed-off-by: Alexander Graf <agraf@suse.de>
2011-04-05 05:12:10 +00:00
|
|
|
|
2013-07-03 19:26:50 +00:00
|
|
|
if (bios_name == NULL) {
|
|
|
|
bios_name = FW_FILE_NAME;
|
|
|
|
}
|
|
|
|
filename = qemu_find_file(QEMU_FILE_TYPE_BIOS, bios_name);
|
2015-03-14 15:29:09 +00:00
|
|
|
if (!filename) {
|
2015-05-07 05:33:40 +00:00
|
|
|
error_report("Could not find LPAR firmware '%s'", bios_name);
|
2015-03-14 15:29:09 +00:00
|
|
|
exit(1);
|
|
|
|
}
|
2012-01-11 19:46:28 +00:00
|
|
|
fw_size = load_image_targphys(filename, 0, FW_MAX_SIZE);
|
2015-05-07 05:33:40 +00:00
|
|
|
if (fw_size <= 0) {
|
|
|
|
error_report("Could not load LPAR firmware '%s'", filename);
|
2012-01-11 19:46:28 +00:00
|
|
|
exit(1);
|
|
|
|
}
|
|
|
|
g_free(filename);
|
|
|
|
|
2015-07-02 06:23:04 +00:00
|
|
|
/* FIXME: Should register things through the MachineState's qdev
|
|
|
|
* interface, this is a legacy from the sPAPREnvironment structure
|
|
|
|
* which predated MachineState but had a similar function */
|
2013-07-18 19:33:01 +00:00
|
|
|
vmstate_register(NULL, 0, &vmstate_spapr, spapr);
|
|
|
|
register_savevm_live(NULL, "spapr/htab", -1, 1,
|
|
|
|
&savevm_htab_handlers, spapr);
|
|
|
|
|
2011-04-01 04:15:20 +00:00
|
|
|
/* Prepare the device tree */
|
2013-10-15 16:33:37 +00:00
|
|
|
spapr->fdt_skel = spapr_create_fdt_skel(initrd_base, initrd_size,
|
2013-09-25 07:40:15 +00:00
|
|
|
kernel_size, kernel_le,
|
2015-05-07 05:33:49 +00:00
|
|
|
kernel_cmdline,
|
|
|
|
spapr->check_exception_irq);
|
Delay creation of pseries device tree until reset
At present, the 'pseries' machine creates a flattened device tree in the
machine->init function to pass to either the guest kernel or to firmware.
However, the machine->init function runs before processing of -device
command line options, which means that the device tree so created will
be (incorrectly) missing devices specified that way.
Supplying a correct device tree is, in any case, part of the required
platform entry conditions. Therefore, this patch moves the creation and
loading of the device tree from machine->init to a reset callback. The
setup of entry point address and initial register state moves with it,
which leads to a slight cleanup.
This is not, alas, quite enough to make a fully working reset for pseries.
For that we would need to reload the firmware images, which on this
machine are loaded into RAM. It's a step in the right direction, though.
Signed-off-by: David Gibson <dwg@au1.ibm.com>
Signed-off-by: Alexander Graf <agraf@suse.de>
2011-04-05 05:12:10 +00:00
|
|
|
assert(spapr->fdt_skel != NULL);
|
2015-03-18 12:30:44 +00:00
|
|
|
|
2015-05-07 05:33:48 +00:00
|
|
|
/* used by RTAS */
|
|
|
|
QTAILQ_INIT(&spapr->ccs_list);
|
|
|
|
qemu_register_reset(spapr_ccs_reset_hook, spapr);
|
|
|
|
|
2015-03-18 12:30:44 +00:00
|
|
|
qemu_register_boot_set(spapr_boot_set, spapr);
|
2011-04-01 04:15:20 +00:00
|
|
|
}
|
|
|
|
|
2013-12-23 15:40:40 +00:00
|
|
|
static int spapr_kvm_type(const char *vm_type)
|
|
|
|
{
|
|
|
|
if (!vm_type) {
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!strcmp(vm_type, "HV")) {
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!strcmp(vm_type, "PR")) {
|
|
|
|
return 2;
|
|
|
|
}
|
|
|
|
|
|
|
|
error_report("Unknown kvm-type specified '%s'", vm_type);
|
|
|
|
exit(1);
|
|
|
|
}
|
|
|
|
|
2014-03-17 02:40:27 +00:00
|
|
|
/*
|
2015-01-19 03:45:12 +00:00
|
|
|
* Implementation of an interface to adjust firmware path
|
2014-03-17 02:40:27 +00:00
|
|
|
* for the bootindex property handling.
|
|
|
|
*/
|
|
|
|
static char *spapr_get_fw_dev_path(FWPathProvider *p, BusState *bus,
|
|
|
|
DeviceState *dev)
|
|
|
|
{
|
|
|
|
#define CAST(type, obj, name) \
|
|
|
|
((type *)object_dynamic_cast(OBJECT(obj), (name)))
|
|
|
|
SCSIDevice *d = CAST(SCSIDevice, dev, TYPE_SCSI_DEVICE);
|
|
|
|
sPAPRPHBState *phb = CAST(sPAPRPHBState, dev, TYPE_SPAPR_PCI_HOST_BRIDGE);
|
|
|
|
|
|
|
|
if (d) {
|
|
|
|
void *spapr = CAST(void, bus->parent, "spapr-vscsi");
|
|
|
|
VirtIOSCSI *virtio = CAST(VirtIOSCSI, bus->parent, TYPE_VIRTIO_SCSI);
|
|
|
|
USBDevice *usb = CAST(USBDevice, bus->parent, TYPE_USB_DEVICE);
|
|
|
|
|
|
|
|
if (spapr) {
|
|
|
|
/*
|
|
|
|
* Replace "channel@0/disk@0,0" with "disk@8000000000000000":
|
|
|
|
* We use SRP luns of the form 8000 | (bus << 8) | (id << 5) | lun
|
|
|
|
* in the top 16 bits of the 64-bit LUN
|
|
|
|
*/
|
|
|
|
unsigned id = 0x8000 | (d->id << 8) | d->lun;
|
|
|
|
return g_strdup_printf("%s@%"PRIX64, qdev_fw_name(dev),
|
|
|
|
(uint64_t)id << 48);
|
|
|
|
} else if (virtio) {
|
|
|
|
/*
|
|
|
|
* We use SRP luns of the form 01000000 | (target << 8) | lun
|
|
|
|
* in the top 32 bits of the 64-bit LUN
|
|
|
|
* Note: the quote above is from SLOF and it is wrong,
|
|
|
|
* the actual binding is:
|
|
|
|
* swap 0100 or 10 << or 20 << ( target lun-id -- srplun )
|
|
|
|
*/
|
|
|
|
unsigned id = 0x1000000 | (d->id << 16) | d->lun;
|
|
|
|
return g_strdup_printf("%s@%"PRIX64, qdev_fw_name(dev),
|
|
|
|
(uint64_t)id << 32);
|
|
|
|
} else if (usb) {
|
|
|
|
/*
|
|
|
|
* We use SRP luns of the form 01000000 | (usb-port << 16) | lun
|
|
|
|
* in the top 32 bits of the 64-bit LUN
|
|
|
|
*/
|
|
|
|
unsigned usb_port = atoi(usb->port->path);
|
|
|
|
unsigned id = 0x1000000 | (usb_port << 16) | d->lun;
|
|
|
|
return g_strdup_printf("%s@%"PRIX64, qdev_fw_name(dev),
|
|
|
|
(uint64_t)id << 32);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (phb) {
|
|
|
|
/* Replace "pci" with "pci@800000020000000" */
|
|
|
|
return g_strdup_printf("pci@%"PRIX64, phb->buid);
|
|
|
|
}
|
|
|
|
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2014-05-30 21:24:32 +00:00
|
|
|
static char *spapr_get_kvm_type(Object *obj, Error **errp)
|
|
|
|
{
|
2015-07-02 06:23:04 +00:00
|
|
|
sPAPRMachineState *spapr = SPAPR_MACHINE(obj);
|
2014-05-30 21:24:32 +00:00
|
|
|
|
2015-07-02 06:23:04 +00:00
|
|
|
return g_strdup(spapr->kvm_type);
|
2014-05-30 21:24:32 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static void spapr_set_kvm_type(Object *obj, const char *value, Error **errp)
|
|
|
|
{
|
2015-07-02 06:23:04 +00:00
|
|
|
sPAPRMachineState *spapr = SPAPR_MACHINE(obj);
|
2014-05-30 21:24:32 +00:00
|
|
|
|
2015-07-02 06:23:04 +00:00
|
|
|
g_free(spapr->kvm_type);
|
|
|
|
spapr->kvm_type = g_strdup(value);
|
2014-05-30 21:24:32 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static void spapr_machine_initfn(Object *obj)
|
|
|
|
{
|
2016-02-08 23:28:58 +00:00
|
|
|
sPAPRMachineState *spapr = SPAPR_MACHINE(obj);
|
|
|
|
|
|
|
|
spapr->htab_fd = -1;
|
2014-05-30 21:24:32 +00:00
|
|
|
object_property_add_str(obj, "kvm-type",
|
|
|
|
spapr_get_kvm_type, spapr_set_kvm_type, NULL);
|
2014-12-16 16:58:05 +00:00
|
|
|
object_property_set_description(obj, "kvm-type",
|
|
|
|
"Specifies the KVM virtualization mode (HV, PR)",
|
|
|
|
NULL);
|
2014-05-30 21:24:32 +00:00
|
|
|
}
|
|
|
|
|
2015-12-28 06:38:26 +00:00
|
|
|
static void spapr_machine_finalizefn(Object *obj)
|
|
|
|
{
|
|
|
|
sPAPRMachineState *spapr = SPAPR_MACHINE(obj);
|
|
|
|
|
|
|
|
g_free(spapr->kvm_type);
|
|
|
|
}
|
|
|
|
|
2014-08-20 12:16:36 +00:00
|
|
|
static void ppc_cpu_do_nmi_on_cpu(void *arg)
|
|
|
|
{
|
|
|
|
CPUState *cs = arg;
|
|
|
|
|
|
|
|
cpu_synchronize_state(cs);
|
|
|
|
ppc_cpu_do_system_reset(cs);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void spapr_nmi(NMIState *n, int cpu_index, Error **errp)
|
|
|
|
{
|
|
|
|
CPUState *cs;
|
|
|
|
|
|
|
|
CPU_FOREACH(cs) {
|
|
|
|
async_run_on_cpu(cs, ppc_cpu_do_nmi_on_cpu, cs);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-09-01 01:22:35 +00:00
|
|
|
static void spapr_add_lmbs(DeviceState *dev, uint64_t addr, uint64_t size,
|
|
|
|
uint32_t node, Error **errp)
|
|
|
|
{
|
|
|
|
sPAPRDRConnector *drc;
|
|
|
|
sPAPRDRConnectorClass *drck;
|
|
|
|
uint32_t nr_lmbs = size/SPAPR_MEMORY_BLOCK_SIZE;
|
|
|
|
int i, fdt_offset, fdt_size;
|
|
|
|
void *fdt;
|
|
|
|
|
|
|
|
for (i = 0; i < nr_lmbs; i++) {
|
|
|
|
drc = spapr_dr_connector_by_id(SPAPR_DR_CONNECTOR_TYPE_LMB,
|
|
|
|
addr/SPAPR_MEMORY_BLOCK_SIZE);
|
|
|
|
g_assert(drc);
|
|
|
|
|
|
|
|
fdt = create_device_tree(&fdt_size);
|
|
|
|
fdt_offset = spapr_populate_memory_node(fdt, node, addr,
|
|
|
|
SPAPR_MEMORY_BLOCK_SIZE);
|
|
|
|
|
|
|
|
drck = SPAPR_DR_CONNECTOR_GET_CLASS(drc);
|
|
|
|
drck->attach(drc, dev, fdt, fdt_offset, !dev->hotplugged, errp);
|
|
|
|
addr += SPAPR_MEMORY_BLOCK_SIZE;
|
|
|
|
}
|
2016-05-24 17:55:04 +00:00
|
|
|
/* send hotplug notification to the
|
|
|
|
* guest only in case of hotplugged memory
|
|
|
|
*/
|
|
|
|
if (dev->hotplugged) {
|
|
|
|
spapr_hotplug_req_add_by_count(SPAPR_DR_CONNECTOR_TYPE_LMB, nr_lmbs);
|
|
|
|
}
|
2015-09-01 01:22:35 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static void spapr_memory_plug(HotplugHandler *hotplug_dev, DeviceState *dev,
|
|
|
|
uint32_t node, Error **errp)
|
|
|
|
{
|
|
|
|
Error *local_err = NULL;
|
|
|
|
sPAPRMachineState *ms = SPAPR_MACHINE(hotplug_dev);
|
|
|
|
PCDIMMDevice *dimm = PC_DIMM(dev);
|
|
|
|
PCDIMMDeviceClass *ddc = PC_DIMM_GET_CLASS(dimm);
|
|
|
|
MemoryRegion *mr = ddc->get_memory_region(dimm);
|
|
|
|
uint64_t align = memory_region_get_alignment(mr);
|
|
|
|
uint64_t size = memory_region_size(mr);
|
|
|
|
uint64_t addr;
|
|
|
|
|
|
|
|
if (size % SPAPR_MEMORY_BLOCK_SIZE) {
|
|
|
|
error_setg(&local_err, "Hotplugged memory size must be a multiple of "
|
|
|
|
"%lld MB", SPAPR_MEMORY_BLOCK_SIZE/M_BYTE);
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
2015-10-28 16:55:06 +00:00
|
|
|
pc_dimm_memory_plug(dev, &ms->hotplug_memory, mr, align, &local_err);
|
2015-09-01 01:22:35 +00:00
|
|
|
if (local_err) {
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
addr = object_property_get_int(OBJECT(dimm), PC_DIMM_ADDR_PROP, &local_err);
|
|
|
|
if (local_err) {
|
|
|
|
pc_dimm_memory_unplug(dev, &ms->hotplug_memory, mr);
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
spapr_add_lmbs(dev, addr, size, node, &error_abort);
|
|
|
|
|
|
|
|
out:
|
|
|
|
error_propagate(errp, local_err);
|
|
|
|
}
|
|
|
|
|
2016-06-10 00:59:04 +00:00
|
|
|
void *spapr_populate_hotplug_cpu_dt(CPUState *cs, int *fdt_offset,
|
|
|
|
sPAPRMachineState *spapr)
|
|
|
|
{
|
|
|
|
PowerPCCPU *cpu = POWERPC_CPU(cs);
|
|
|
|
DeviceClass *dc = DEVICE_GET_CLASS(cs);
|
|
|
|
int id = ppc_get_vcpu_dt_id(cpu);
|
|
|
|
void *fdt;
|
|
|
|
int offset, fdt_size;
|
|
|
|
char *nodename;
|
|
|
|
|
|
|
|
fdt = create_device_tree(&fdt_size);
|
|
|
|
nodename = g_strdup_printf("%s@%x", dc->fw_name, id);
|
|
|
|
offset = fdt_add_subnode(fdt, 0, nodename);
|
|
|
|
|
|
|
|
spapr_populate_cpu_dt(cs, fdt, offset, spapr);
|
|
|
|
g_free(nodename);
|
|
|
|
|
|
|
|
*fdt_offset = offset;
|
|
|
|
return fdt;
|
|
|
|
}
|
|
|
|
|
2015-09-01 01:22:35 +00:00
|
|
|
static void spapr_machine_device_plug(HotplugHandler *hotplug_dev,
|
|
|
|
DeviceState *dev, Error **errp)
|
|
|
|
{
|
|
|
|
sPAPRMachineClass *smc = SPAPR_MACHINE_GET_CLASS(qdev_get_machine());
|
|
|
|
|
|
|
|
if (object_dynamic_cast(OBJECT(dev), TYPE_PC_DIMM)) {
|
2015-06-29 08:44:32 +00:00
|
|
|
int node;
|
2015-09-01 01:22:35 +00:00
|
|
|
|
|
|
|
if (!smc->dr_lmb_enabled) {
|
|
|
|
error_setg(errp, "Memory hotplug not supported for this machine");
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
node = object_property_get_int(OBJECT(dev), PC_DIMM_NODE_PROP, errp);
|
|
|
|
if (*errp) {
|
|
|
|
return;
|
|
|
|
}
|
2016-03-03 09:43:42 +00:00
|
|
|
if (node < 0 || node >= MAX_NODES) {
|
|
|
|
error_setg(errp, "Invaild node %d", node);
|
|
|
|
return;
|
|
|
|
}
|
2015-09-01 01:22:35 +00:00
|
|
|
|
2015-06-29 08:44:32 +00:00
|
|
|
/*
|
|
|
|
* Currently PowerPC kernel doesn't allow hot-adding memory to
|
|
|
|
* memory-less node, but instead will silently add the memory
|
|
|
|
* to the first node that has some memory. This causes two
|
|
|
|
* unexpected behaviours for the user.
|
|
|
|
*
|
|
|
|
* - Memory gets hotplugged to a different node than what the user
|
|
|
|
* specified.
|
|
|
|
* - Since pc-dimm subsystem in QEMU still thinks that memory belongs
|
|
|
|
* to memory-less node, a reboot will set things accordingly
|
|
|
|
* and the previously hotplugged memory now ends in the right node.
|
|
|
|
* This appears as if some memory moved from one node to another.
|
|
|
|
*
|
|
|
|
* So until kernel starts supporting memory hotplug to memory-less
|
|
|
|
* nodes, just prevent such attempts upfront in QEMU.
|
|
|
|
*/
|
|
|
|
if (nb_numa_nodes && !numa_info[node].node_mem) {
|
|
|
|
error_setg(errp, "Can't hotplug memory to memory-less node %d",
|
|
|
|
node);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2015-09-01 01:22:35 +00:00
|
|
|
spapr_memory_plug(hotplug_dev, dev, node, errp);
|
2016-06-10 00:59:04 +00:00
|
|
|
} else if (object_dynamic_cast(OBJECT(dev), TYPE_SPAPR_CPU_CORE)) {
|
|
|
|
spapr_core_plug(hotplug_dev, dev, errp);
|
2015-09-01 01:22:35 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static void spapr_machine_device_unplug(HotplugHandler *hotplug_dev,
|
|
|
|
DeviceState *dev, Error **errp)
|
|
|
|
{
|
2016-08-05 06:25:33 +00:00
|
|
|
MachineClass *mc = MACHINE_GET_CLASS(qdev_get_machine());
|
2016-06-10 00:59:05 +00:00
|
|
|
|
2015-09-01 01:22:35 +00:00
|
|
|
if (object_dynamic_cast(OBJECT(dev), TYPE_PC_DIMM)) {
|
|
|
|
error_setg(errp, "Memory hot unplug not supported by sPAPR");
|
2016-06-10 00:59:05 +00:00
|
|
|
} else if (object_dynamic_cast(OBJECT(dev), TYPE_SPAPR_CPU_CORE)) {
|
2016-08-05 06:25:33 +00:00
|
|
|
if (!mc->query_hotpluggable_cpus) {
|
2016-06-10 00:59:05 +00:00
|
|
|
error_setg(errp, "CPU hot unplug not supported on this machine");
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
spapr_core_unplug(hotplug_dev, dev, errp);
|
2015-09-01 01:22:35 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-06-10 00:59:03 +00:00
|
|
|
static void spapr_machine_device_pre_plug(HotplugHandler *hotplug_dev,
|
|
|
|
DeviceState *dev, Error **errp)
|
|
|
|
{
|
|
|
|
if (object_dynamic_cast(OBJECT(dev), TYPE_SPAPR_CPU_CORE)) {
|
|
|
|
spapr_core_pre_plug(hotplug_dev, dev, errp);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-09-01 01:22:35 +00:00
|
|
|
static HotplugHandler *spapr_get_hotpug_handler(MachineState *machine,
|
|
|
|
DeviceState *dev)
|
|
|
|
{
|
2016-06-10 00:59:03 +00:00
|
|
|
if (object_dynamic_cast(OBJECT(dev), TYPE_PC_DIMM) ||
|
|
|
|
object_dynamic_cast(OBJECT(dev), TYPE_SPAPR_CPU_CORE)) {
|
2015-09-01 01:22:35 +00:00
|
|
|
return HOTPLUG_HANDLER(machine);
|
|
|
|
}
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2015-09-08 01:21:52 +00:00
|
|
|
static unsigned spapr_cpu_index_to_socket_id(unsigned cpu_index)
|
|
|
|
{
|
|
|
|
/* Allocate to NUMA nodes on a "socket" basis (not that concept of
|
|
|
|
* socket means much for the paravirtualized PAPR platform) */
|
|
|
|
return cpu_index / smp_threads / smp_cores;
|
|
|
|
}
|
|
|
|
|
2016-06-10 00:59:08 +00:00
|
|
|
static HotpluggableCPUList *spapr_query_hotpluggable_cpus(MachineState *machine)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
HotpluggableCPUList *head = NULL;
|
|
|
|
sPAPRMachineState *spapr = SPAPR_MACHINE(machine);
|
|
|
|
int spapr_max_cores = max_cpus / smp_threads;
|
|
|
|
|
|
|
|
for (i = 0; i < spapr_max_cores; i++) {
|
|
|
|
HotpluggableCPUList *list_item = g_new0(typeof(*list_item), 1);
|
|
|
|
HotpluggableCPU *cpu_item = g_new0(typeof(*cpu_item), 1);
|
|
|
|
CpuInstanceProperties *cpu_props = g_new0(typeof(*cpu_props), 1);
|
|
|
|
|
|
|
|
cpu_item->type = spapr_get_cpu_core_type(machine->cpu_model);
|
|
|
|
cpu_item->vcpus_count = smp_threads;
|
2016-06-23 21:23:34 +00:00
|
|
|
cpu_props->has_core_id = true;
|
2016-07-22 11:10:36 +00:00
|
|
|
cpu_props->core_id = i * smp_threads;
|
2016-06-10 00:59:08 +00:00
|
|
|
/* TODO: add 'has_node/node' here to describe
|
|
|
|
to which node core belongs */
|
|
|
|
|
|
|
|
cpu_item->props = cpu_props;
|
|
|
|
if (spapr->cores[i]) {
|
|
|
|
cpu_item->has_qom_path = true;
|
|
|
|
cpu_item->qom_path = object_get_canonical_path(spapr->cores[i]);
|
|
|
|
}
|
|
|
|
list_item->value = cpu_item;
|
|
|
|
list_item->next = head;
|
|
|
|
head = list_item;
|
|
|
|
}
|
|
|
|
return head;
|
|
|
|
}
|
|
|
|
|
2014-03-17 02:40:26 +00:00
|
|
|
static void spapr_machine_class_init(ObjectClass *oc, void *data)
|
|
|
|
{
|
|
|
|
MachineClass *mc = MACHINE_CLASS(oc);
|
2015-08-12 03:16:48 +00:00
|
|
|
sPAPRMachineClass *smc = SPAPR_MACHINE_CLASS(oc);
|
2014-03-17 02:40:27 +00:00
|
|
|
FWPathProviderClass *fwc = FW_PATH_PROVIDER_CLASS(oc);
|
2014-08-20 12:16:36 +00:00
|
|
|
NMIClass *nc = NMI_CLASS(oc);
|
2015-09-01 01:22:35 +00:00
|
|
|
HotplugHandlerClass *hc = HOTPLUG_HANDLER_CLASS(oc);
|
2014-04-09 17:34:53 +00:00
|
|
|
|
2015-12-07 03:29:35 +00:00
|
|
|
mc->desc = "pSeries Logical Partition (PAPR compliant)";
|
2015-12-07 03:27:21 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* We set up the default / latest behaviour here. The class_init
|
|
|
|
* functions for the specific versioned machine types can override
|
|
|
|
* these details for backwards compatibility
|
|
|
|
*/
|
2014-04-09 17:34:53 +00:00
|
|
|
mc->init = ppc_spapr_init;
|
|
|
|
mc->reset = ppc_spapr_reset;
|
|
|
|
mc->block_default_type = IF_SCSI;
|
2015-08-06 03:37:24 +00:00
|
|
|
mc->max_cpus = MAX_CPUMASK_BITS;
|
2014-04-09 17:34:53 +00:00
|
|
|
mc->no_parallel = 1;
|
2015-03-18 12:30:44 +00:00
|
|
|
mc->default_boot_order = "";
|
2015-05-07 05:33:58 +00:00
|
|
|
mc->default_ram_size = 512 * M_BYTE;
|
2014-04-09 17:34:53 +00:00
|
|
|
mc->kvm_type = spapr_kvm_type;
|
2014-11-04 22:22:54 +00:00
|
|
|
mc->has_dynamic_sysbus = true;
|
2015-07-24 08:35:13 +00:00
|
|
|
mc->pci_allow_0_address = true;
|
2015-09-01 01:22:35 +00:00
|
|
|
mc->get_hotplug_handler = spapr_get_hotpug_handler;
|
2016-06-10 00:59:03 +00:00
|
|
|
hc->pre_plug = spapr_machine_device_pre_plug;
|
2015-09-01 01:22:35 +00:00
|
|
|
hc->plug = spapr_machine_device_plug;
|
|
|
|
hc->unplug = spapr_machine_device_unplug;
|
2015-09-08 01:21:52 +00:00
|
|
|
mc->cpu_index_to_socket_id = spapr_cpu_index_to_socket_id;
|
2014-04-09 17:34:50 +00:00
|
|
|
|
2015-12-07 03:27:21 +00:00
|
|
|
smc->dr_lmb_enabled = true;
|
2016-08-05 06:25:33 +00:00
|
|
|
mc->query_hotpluggable_cpus = spapr_query_hotpluggable_cpus;
|
2014-03-17 02:40:27 +00:00
|
|
|
fwc->get_dev_path = spapr_get_fw_dev_path;
|
2014-08-20 12:16:36 +00:00
|
|
|
nc->nmi_monitor_handler = spapr_nmi;
|
2014-03-17 02:40:26 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static const TypeInfo spapr_machine_info = {
|
|
|
|
.name = TYPE_SPAPR_MACHINE,
|
|
|
|
.parent = TYPE_MACHINE,
|
2014-09-08 05:30:31 +00:00
|
|
|
.abstract = true,
|
2014-06-25 04:10:24 +00:00
|
|
|
.instance_size = sizeof(sPAPRMachineState),
|
2014-05-30 21:24:32 +00:00
|
|
|
.instance_init = spapr_machine_initfn,
|
2015-12-28 06:38:26 +00:00
|
|
|
.instance_finalize = spapr_machine_finalizefn,
|
2015-07-02 06:23:07 +00:00
|
|
|
.class_size = sizeof(sPAPRMachineClass),
|
2014-03-17 02:40:26 +00:00
|
|
|
.class_init = spapr_machine_class_init,
|
2014-03-17 02:40:27 +00:00
|
|
|
.interfaces = (InterfaceInfo[]) {
|
|
|
|
{ TYPE_FW_PATH_PROVIDER },
|
2014-08-20 12:16:36 +00:00
|
|
|
{ TYPE_NMI },
|
2015-09-01 01:22:35 +00:00
|
|
|
{ TYPE_HOTPLUG_HANDLER },
|
2014-03-17 02:40:27 +00:00
|
|
|
{ }
|
|
|
|
},
|
2014-03-17 02:40:26 +00:00
|
|
|
};
|
|
|
|
|
2015-12-07 03:25:50 +00:00
|
|
|
#define DEFINE_SPAPR_MACHINE(suffix, verstr, latest) \
|
2015-12-07 03:23:20 +00:00
|
|
|
static void spapr_machine_##suffix##_class_init(ObjectClass *oc, \
|
|
|
|
void *data) \
|
|
|
|
{ \
|
|
|
|
MachineClass *mc = MACHINE_CLASS(oc); \
|
|
|
|
spapr_machine_##suffix##_class_options(mc); \
|
2015-12-07 03:25:50 +00:00
|
|
|
if (latest) { \
|
|
|
|
mc->alias = "pseries"; \
|
|
|
|
mc->is_default = 1; \
|
|
|
|
} \
|
2015-12-07 03:23:20 +00:00
|
|
|
} \
|
|
|
|
static void spapr_machine_##suffix##_instance_init(Object *obj) \
|
|
|
|
{ \
|
|
|
|
MachineState *machine = MACHINE(obj); \
|
|
|
|
spapr_machine_##suffix##_instance_options(machine); \
|
|
|
|
} \
|
|
|
|
static const TypeInfo spapr_machine_##suffix##_info = { \
|
|
|
|
.name = MACHINE_TYPE_NAME("pseries-" verstr), \
|
|
|
|
.parent = TYPE_SPAPR_MACHINE, \
|
|
|
|
.class_init = spapr_machine_##suffix##_class_init, \
|
|
|
|
.instance_init = spapr_machine_##suffix##_instance_init, \
|
|
|
|
}; \
|
|
|
|
static void spapr_machine_register_##suffix(void) \
|
|
|
|
{ \
|
|
|
|
type_register(&spapr_machine_##suffix##_info); \
|
|
|
|
} \
|
2016-02-16 20:59:04 +00:00
|
|
|
type_init(spapr_machine_register_##suffix)
|
2015-12-07 03:23:20 +00:00
|
|
|
|
2016-06-03 05:49:42 +00:00
|
|
|
/*
|
|
|
|
* pseries-2.7
|
|
|
|
*/
|
|
|
|
static void spapr_machine_2_7_instance_options(MachineState *machine)
|
|
|
|
{
|
|
|
|
}
|
|
|
|
|
|
|
|
static void spapr_machine_2_7_class_options(MachineClass *mc)
|
|
|
|
{
|
|
|
|
/* Defaults for the latest behaviour inherited from the base class */
|
|
|
|
}
|
|
|
|
|
|
|
|
DEFINE_SPAPR_MACHINE(2_7, "2.7", true);
|
|
|
|
|
2015-12-07 03:28:15 +00:00
|
|
|
/*
|
|
|
|
* pseries-2.6
|
|
|
|
*/
|
2016-06-03 05:49:42 +00:00
|
|
|
#define SPAPR_COMPAT_2_6 \
|
2016-07-04 03:33:07 +00:00
|
|
|
HW_COMPAT_2_6 \
|
|
|
|
{ \
|
|
|
|
.driver = TYPE_SPAPR_PCI_HOST_BRIDGE,\
|
|
|
|
.property = "ddw",\
|
|
|
|
.value = stringify(off),\
|
|
|
|
},
|
2016-06-03 05:49:42 +00:00
|
|
|
|
2015-12-07 03:28:15 +00:00
|
|
|
static void spapr_machine_2_6_instance_options(MachineState *machine)
|
|
|
|
{
|
|
|
|
}
|
|
|
|
|
|
|
|
static void spapr_machine_2_6_class_options(MachineClass *mc)
|
|
|
|
{
|
2016-06-03 05:49:42 +00:00
|
|
|
spapr_machine_2_7_class_options(mc);
|
2016-08-05 06:25:33 +00:00
|
|
|
mc->query_hotpluggable_cpus = NULL;
|
2016-06-03 05:49:42 +00:00
|
|
|
SET_MACHINE_COMPAT(mc, SPAPR_COMPAT_2_6);
|
2015-12-07 03:28:15 +00:00
|
|
|
}
|
|
|
|
|
2016-06-03 05:49:42 +00:00
|
|
|
DEFINE_SPAPR_MACHINE(2_6, "2.6", false);
|
2015-12-07 03:28:15 +00:00
|
|
|
|
2015-12-03 06:34:10 +00:00
|
|
|
/*
|
|
|
|
* pseries-2.5
|
|
|
|
*/
|
2015-12-07 03:28:15 +00:00
|
|
|
#define SPAPR_COMPAT_2_5 \
|
2016-03-21 16:25:24 +00:00
|
|
|
HW_COMPAT_2_5 \
|
|
|
|
{ \
|
|
|
|
.driver = "spapr-vlan", \
|
|
|
|
.property = "use-rx-buffer-pools", \
|
|
|
|
.value = "off", \
|
|
|
|
},
|
2015-12-07 03:28:15 +00:00
|
|
|
|
2015-12-07 03:23:20 +00:00
|
|
|
static void spapr_machine_2_5_instance_options(MachineState *machine)
|
2015-12-03 06:34:10 +00:00
|
|
|
{
|
2015-12-07 03:23:20 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static void spapr_machine_2_5_class_options(MachineClass *mc)
|
|
|
|
{
|
2015-12-09 12:34:13 +00:00
|
|
|
sPAPRMachineClass *smc = SPAPR_MACHINE_CLASS(mc);
|
|
|
|
|
2015-12-07 03:28:15 +00:00
|
|
|
spapr_machine_2_6_class_options(mc);
|
2015-12-09 12:34:13 +00:00
|
|
|
smc->use_ohci_by_default = true;
|
2015-12-07 03:28:15 +00:00
|
|
|
SET_MACHINE_COMPAT(mc, SPAPR_COMPAT_2_5);
|
2015-12-03 06:34:10 +00:00
|
|
|
}
|
|
|
|
|
2015-12-07 03:28:15 +00:00
|
|
|
DEFINE_SPAPR_MACHINE(2_5, "2.5", false);
|
2015-12-03 06:34:10 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* pseries-2.4
|
|
|
|
*/
|
2015-10-16 10:25:53 +00:00
|
|
|
#define SPAPR_COMPAT_2_4 \
|
|
|
|
HW_COMPAT_2_4
|
|
|
|
|
2015-12-07 03:23:20 +00:00
|
|
|
static void spapr_machine_2_4_instance_options(MachineState *machine)
|
2015-12-03 06:34:10 +00:00
|
|
|
{
|
2015-12-07 03:23:20 +00:00
|
|
|
spapr_machine_2_5_instance_options(machine);
|
|
|
|
}
|
2015-12-03 06:34:10 +00:00
|
|
|
|
2015-12-07 03:23:20 +00:00
|
|
|
static void spapr_machine_2_4_class_options(MachineClass *mc)
|
|
|
|
{
|
2015-12-07 03:27:21 +00:00
|
|
|
sPAPRMachineClass *smc = SPAPR_MACHINE_CLASS(mc);
|
|
|
|
|
|
|
|
spapr_machine_2_5_class_options(mc);
|
|
|
|
smc->dr_lmb_enabled = false;
|
2015-12-03 06:47:22 +00:00
|
|
|
SET_MACHINE_COMPAT(mc, SPAPR_COMPAT_2_4);
|
2015-12-03 06:34:10 +00:00
|
|
|
}
|
|
|
|
|
2015-12-07 03:25:50 +00:00
|
|
|
DEFINE_SPAPR_MACHINE(2_4, "2.4", false);
|
2015-12-03 06:34:10 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* pseries-2.3
|
|
|
|
*/
|
2015-05-14 18:53:05 +00:00
|
|
|
#define SPAPR_COMPAT_2_3 \
|
2015-05-07 05:33:52 +00:00
|
|
|
HW_COMPAT_2_3 \
|
|
|
|
{\
|
|
|
|
.driver = "spapr-pci-host-bridge",\
|
|
|
|
.property = "dynamic-reconfiguration",\
|
|
|
|
.value = "off",\
|
|
|
|
},
|
2015-05-14 18:53:05 +00:00
|
|
|
|
2015-12-07 03:23:20 +00:00
|
|
|
static void spapr_machine_2_3_instance_options(MachineState *machine)
|
2015-04-23 06:21:37 +00:00
|
|
|
{
|
2015-12-07 03:23:20 +00:00
|
|
|
spapr_machine_2_4_instance_options(machine);
|
2015-06-12 17:37:52 +00:00
|
|
|
savevm_skip_section_footers();
|
2014-10-08 11:58:24 +00:00
|
|
|
global_state_set_optional();
|
2016-02-18 11:32:18 +00:00
|
|
|
savevm_skip_configuration();
|
2015-04-23 06:21:37 +00:00
|
|
|
}
|
|
|
|
|
2015-12-07 03:23:20 +00:00
|
|
|
static void spapr_machine_2_3_class_options(MachineClass *mc)
|
2014-06-25 04:08:45 +00:00
|
|
|
{
|
2015-12-07 03:27:21 +00:00
|
|
|
spapr_machine_2_4_class_options(mc);
|
2015-12-03 06:47:22 +00:00
|
|
|
SET_MACHINE_COMPAT(mc, SPAPR_COMPAT_2_3);
|
2014-06-25 04:08:45 +00:00
|
|
|
}
|
2015-12-07 03:25:50 +00:00
|
|
|
DEFINE_SPAPR_MACHINE(2_3, "2.3", false);
|
2014-06-25 04:08:45 +00:00
|
|
|
|
2015-12-03 06:34:10 +00:00
|
|
|
/*
|
|
|
|
* pseries-2.2
|
|
|
|
*/
|
|
|
|
|
|
|
|
#define SPAPR_COMPAT_2_2 \
|
|
|
|
HW_COMPAT_2_2 \
|
|
|
|
{\
|
|
|
|
.driver = TYPE_SPAPR_PCI_HOST_BRIDGE,\
|
|
|
|
.property = "mem_win_size",\
|
|
|
|
.value = "0x20000000",\
|
|
|
|
},
|
|
|
|
|
2015-12-07 03:23:20 +00:00
|
|
|
static void spapr_machine_2_2_instance_options(MachineState *machine)
|
2015-12-03 06:34:10 +00:00
|
|
|
{
|
2015-12-07 03:23:20 +00:00
|
|
|
spapr_machine_2_3_instance_options(machine);
|
2016-02-23 16:47:59 +00:00
|
|
|
machine->suppress_vmdesc = true;
|
2015-12-03 06:34:10 +00:00
|
|
|
}
|
|
|
|
|
2015-12-07 03:23:20 +00:00
|
|
|
static void spapr_machine_2_2_class_options(MachineClass *mc)
|
2014-09-08 05:30:31 +00:00
|
|
|
{
|
2015-12-07 03:27:21 +00:00
|
|
|
spapr_machine_2_3_class_options(mc);
|
2015-12-03 06:47:22 +00:00
|
|
|
SET_MACHINE_COMPAT(mc, SPAPR_COMPAT_2_2);
|
2014-09-08 05:30:31 +00:00
|
|
|
}
|
2015-12-07 03:25:50 +00:00
|
|
|
DEFINE_SPAPR_MACHINE(2_2, "2.2", false);
|
2014-09-08 05:30:31 +00:00
|
|
|
|
2015-12-03 06:34:10 +00:00
|
|
|
/*
|
|
|
|
* pseries-2.1
|
|
|
|
*/
|
|
|
|
#define SPAPR_COMPAT_2_1 \
|
|
|
|
HW_COMPAT_2_1
|
2015-01-30 01:53:18 +00:00
|
|
|
|
2015-12-07 03:23:20 +00:00
|
|
|
static void spapr_machine_2_1_instance_options(MachineState *machine)
|
2015-12-03 06:34:10 +00:00
|
|
|
{
|
2015-12-07 03:23:20 +00:00
|
|
|
spapr_machine_2_2_instance_options(machine);
|
2015-12-03 06:34:10 +00:00
|
|
|
}
|
2015-04-23 06:21:37 +00:00
|
|
|
|
2015-12-07 03:23:20 +00:00
|
|
|
static void spapr_machine_2_1_class_options(MachineClass *mc)
|
2015-04-23 06:21:37 +00:00
|
|
|
{
|
2015-12-07 03:27:21 +00:00
|
|
|
spapr_machine_2_2_class_options(mc);
|
2015-12-03 06:47:22 +00:00
|
|
|
SET_MACHINE_COMPAT(mc, SPAPR_COMPAT_2_1);
|
2015-04-23 06:21:37 +00:00
|
|
|
}
|
2015-12-07 03:25:50 +00:00
|
|
|
DEFINE_SPAPR_MACHINE(2_1, "2.1", false);
|
2015-08-12 03:15:56 +00:00
|
|
|
|
2014-03-17 02:40:26 +00:00
|
|
|
static void spapr_machine_register_types(void)
|
2011-04-01 04:15:20 +00:00
|
|
|
{
|
2014-03-17 02:40:26 +00:00
|
|
|
type_register_static(&spapr_machine_info);
|
2011-04-01 04:15:20 +00:00
|
|
|
}
|
|
|
|
|
2014-03-17 02:40:26 +00:00
|
|
|
type_init(spapr_machine_register_types)
|