This patch set contains the sclp defines and events for cpu hotplug,

the initial sclp defines (without code yet) for standby memory (some
 sort of memory hotplug) as well as a cleanup of the kvm register
 synchronization.
 -----BEGIN PGP SIGNATURE-----
 Version: GnuPG v1.4.11 (GNU/Linux)
 
 iQIcBAABAgAGBQJS62TFAAoJEBF7vIC1phx8JxkP/08axK8hluncGLU8P5p1y6JG
 iNiD4j7/zUj+5gDf6eC3r0/F4brhiXhZdBXrXqPpnrzX5NsWuhzlXKJpdmobB7Q6
 G2BR8pDm8iz6ljLv77Um1PCrpPteExKP8HJrWO1JivQmzLvCEZg+muf426rokpPp
 h9W3UpQNKBnvPuLrOx8keU+XyyBi9CzXt/w+KsiB7YWJ/nGBwW8a4MSca+/x0JTo
 XGmpy6x8j0ewqi9LNxq20CSHriNsbdng3/4nauA8oU9RyWGA9sW1F2YwWGRZCGDz
 aXAhzYkzuUUjPytWFrGMAohcbcUZKMhEakiovxt7E+PccQcCyx13pguaR3OzRAZs
 btVlnPevHw3uqxcLmTPG6rYFJ/mDWxWJilykYF6aGp5Jdw3lMx1tQbW1EZwXbkqo
 6oPqSsHe4vs0GXjcGdDS4PbOtode2vMt4mJqZyX+5W+Jr8ZYmIG9b98WqUXB+jfK
 nkjjxT6MPeKhP1UIyncSdNqLnwOIc3uSSiKnrvOs0I7TJbRt1lWlyb/plmRKDqu1
 57XhfKAjtl9tpwaZcZ76x3K8tRzouVj7RfEywpuODLMNLqGGFtObl1OTbRrOjIgM
 lhK3+mqhjpZ/HZTyFOHKpzxId+P7mlyiDppeGU3H5ySrwOeKnDz2hlfTWh4B10c4
 umP35zBhKTeU4qXFYfRG
 =Aodz
 -----END PGP SIGNATURE-----

Merge remote-tracking branch 'remotes/borntraeger/tags/kvm-s390-20140131' into staging

This patch set contains the sclp defines and events for cpu hotplug,
the initial sclp defines (without code yet) for standby memory (some
sort of memory hotplug) as well as a cleanup of the kvm register
synchronization.

# gpg: Signature made Fri 31 Jan 2014 08:54:29 GMT using RSA key ID B5A61C7C
# gpg: Can't check signature: public key not found

* remotes/borntraeger/tags/kvm-s390-20140131:
  s390x/kvm: cleanup partial register handling
  sclp-s390: Define new SCLP codes and structures
  s390-sclp: SCLP Event integration
  s390-sclp: SCLP CPU Info
  s390-sclp: Define New SCLP Codes

Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
This commit is contained in:
Peter Maydell 2014-02-04 18:46:33 +00:00
commit 2b2449f7e4
8 changed files with 320 additions and 117 deletions

View File

@ -3,6 +3,7 @@ obj-y += s390-virtio-hcall.o
obj-y += sclp.o
obj-y += event-facility.o
obj-y += sclpquiesce.o
obj-y += sclpcpu.o
obj-y += ipl.o
obj-y += css.o
obj-y += s390-virtio-ccw.o

View File

@ -32,6 +32,8 @@ struct SCLPEventFacility {
unsigned int receive_mask;
};
SCLPEvent cpu_hotplug;
/* return true if any child has event pending set */
static bool event_pending(SCLPEventFacility *ef)
{
@ -335,6 +337,10 @@ static int init_event_facility(S390SCLPDevice *sdev)
}
qdev_init_nofail(quiesce);
object_initialize(&cpu_hotplug, sizeof(cpu_hotplug), TYPE_SCLP_CPU_HOTPLUG);
qdev_set_parent_bus(DEVICE(&cpu_hotplug), BUS(&event_facility->sbus));
object_property_set_bool(OBJECT(&cpu_hotplug), true, "realized", NULL);
return 0;
}

View File

@ -15,6 +15,7 @@
#include "cpu.h"
#include "sysemu/kvm.h"
#include "exec/memory.h"
#include "sysemu/sysemu.h"
#include "hw/s390x/sclp.h"
@ -31,7 +32,26 @@ static inline S390SCLPDevice *get_event_facility(void)
static void read_SCP_info(SCCB *sccb)
{
ReadInfo *read_info = (ReadInfo *) sccb;
CPUState *cpu;
int shift = 0;
int cpu_count = 0;
int i = 0;
CPU_FOREACH(cpu) {
cpu_count++;
}
/* CPU information */
read_info->entries_cpu = cpu_to_be16(cpu_count);
read_info->offset_cpu = cpu_to_be16(offsetof(ReadInfo, entries));
read_info->highest_cpu = cpu_to_be16(max_cpus);
for (i = 0; i < cpu_count; i++) {
read_info->entries[i].address = i;
read_info->entries[i].type = 0;
}
read_info->facilities = cpu_to_be64(SCLP_HAS_CPU_INFO);
while ((ram_size >> (20 + shift)) > 65535) {
shift++;
@ -41,15 +61,46 @@ static void read_SCP_info(SCCB *sccb)
sccb->h.response_code = cpu_to_be16(SCLP_RC_NORMAL_READ_COMPLETION);
}
/* Provide information about the CPU */
static void sclp_read_cpu_info(SCCB *sccb)
{
ReadCpuInfo *cpu_info = (ReadCpuInfo *) sccb;
CPUState *cpu;
int cpu_count = 0;
int i = 0;
CPU_FOREACH(cpu) {
cpu_count++;
}
cpu_info->nr_configured = cpu_to_be16(cpu_count);
cpu_info->offset_configured = cpu_to_be16(offsetof(ReadCpuInfo, entries));
cpu_info->nr_standby = cpu_to_be16(0);
/* The standby offset is 16-byte for each CPU */
cpu_info->offset_standby = cpu_to_be16(cpu_info->offset_configured
+ cpu_info->nr_configured*sizeof(CPUEntry));
for (i = 0; i < cpu_count; i++) {
cpu_info->entries[i].address = i;
cpu_info->entries[i].type = 0;
}
sccb->h.response_code = cpu_to_be16(SCLP_RC_NORMAL_READ_COMPLETION);
}
static void sclp_execute(SCCB *sccb, uint64_t code)
{
S390SCLPDevice *sdev = get_event_facility();
switch (code) {
switch (code & SCLP_CMD_CODE_MASK) {
case SCLP_CMDW_READ_SCP_INFO:
case SCLP_CMDW_READ_SCP_INFO_FORCED:
read_SCP_info(sccb);
break;
case SCLP_CMDW_READ_CPU_INFO:
sclp_read_cpu_info(sccb);
break;
default:
sdev->sclp_command_handler(sdev->ef, sccb, code);
break;

112
hw/s390x/sclpcpu.c Normal file
View File

@ -0,0 +1,112 @@
/*
* SCLP event type
* Signal CPU - Trigger SCLP interrupt for system CPU configure or
* de-configure
*
* Copyright IBM, Corp. 2013
*
* Authors:
* Thang Pham <thang.pham@us.ibm.com>
*
* This work is licensed under the terms of the GNU GPL, version 2 or (at your
* option) any later version. See the COPYING file in the top-level directory.
*
*/
#include "sysemu/sysemu.h"
#include "hw/s390x/sclp.h"
#include "hw/s390x/event-facility.h"
#include "cpu.h"
#include "sysemu/cpus.h"
#include "sysemu/kvm.h"
typedef struct ConfigMgtData {
EventBufferHeader ebh;
uint8_t reserved;
uint8_t event_qualifier;
} QEMU_PACKED ConfigMgtData;
static qemu_irq irq_cpu_hotplug; /* Only used in this file */
#define EVENT_QUAL_CPU_CHANGE 1
void raise_irq_cpu_hotplug(void)
{
qemu_irq_raise(irq_cpu_hotplug);
}
static unsigned int send_mask(void)
{
return SCLP_EVENT_MASK_CONFIG_MGT_DATA;
}
static unsigned int receive_mask(void)
{
return 0;
}
static int read_event_data(SCLPEvent *event, EventBufferHeader *evt_buf_hdr,
int *slen)
{
ConfigMgtData *cdata = (ConfigMgtData *) evt_buf_hdr;
if (*slen < sizeof(ConfigMgtData)) {
return 0;
}
/* Event is no longer pending */
if (!event->event_pending) {
return 0;
}
event->event_pending = false;
/* Event header data */
cdata->ebh.length = cpu_to_be16(sizeof(ConfigMgtData));
cdata->ebh.type = SCLP_EVENT_CONFIG_MGT_DATA;
cdata->ebh.flags |= SCLP_EVENT_BUFFER_ACCEPTED;
/* Trigger a rescan of CPUs by setting event qualifier */
cdata->event_qualifier = EVENT_QUAL_CPU_CHANGE;
*slen -= sizeof(ConfigMgtData);
return 1;
}
static void trigger_signal(void *opaque, int n, int level)
{
SCLPEvent *event = opaque;
event->event_pending = true;
/* Trigger SCLP read operation */
sclp_service_interrupt(0);
}
static int irq_cpu_hotplug_init(SCLPEvent *event)
{
irq_cpu_hotplug = *qemu_allocate_irqs(trigger_signal, event, 1);
return 0;
}
static void cpu_class_init(ObjectClass *oc, void *data)
{
SCLPEventClass *k = SCLP_EVENT_CLASS(oc);
k->init = irq_cpu_hotplug_init;
k->get_send_mask = send_mask;
k->get_receive_mask = receive_mask;
k->read_event_data = read_event_data;
k->write_event_data = NULL;
}
static const TypeInfo sclp_cpu_info = {
.name = "sclp-cpu-hotplug",
.parent = TYPE_SCLP_EVENT,
.instance_size = sizeof(SCLPEvent),
.class_init = cpu_class_init,
.class_size = sizeof(SCLPEventClass),
};
static void sclp_cpu_register_types(void)
{
type_register_static(&sclp_cpu_info);
}
type_init(sclp_cpu_register_types)

View File

@ -17,10 +17,12 @@
#include <hw/qdev.h>
#include "qemu/thread.h"
#include "hw/s390x/sclp.h"
/* SCLP event types */
#define SCLP_EVENT_OPRTNS_COMMAND 0x01
#define SCLP_EVENT_MESSAGE 0x02
#define SCLP_EVENT_CONFIG_MGT_DATA 0x04
#define SCLP_EVENT_PMSGCMD 0x09
#define SCLP_EVENT_ASCII_CONSOLE_DATA 0x1a
#define SCLP_EVENT_SIGNAL_QUIESCE 0x1d
@ -28,6 +30,7 @@
/* SCLP event masks */
#define SCLP_EVENT_MASK_SIGNAL_QUIESCE 0x00000008
#define SCLP_EVENT_MASK_MSG_ASCII 0x00000040
#define SCLP_EVENT_MASK_CONFIG_MGT_DATA 0x10000000
#define SCLP_EVENT_MASK_OP_CMD 0x80000000
#define SCLP_EVENT_MASK_MSG 0x40000000
#define SCLP_EVENT_MASK_PMSGCMD 0x00800000
@ -43,6 +46,8 @@
#define SCLP_EVENT_GET_CLASS(obj) \
OBJECT_GET_CLASS(SCLPEventClass, (obj), TYPE_SCLP_EVENT)
#define TYPE_SCLP_CPU_HOTPLUG "sclp-cpu-hotplug"
typedef struct WriteEventMask {
SCCBHeader h;
uint16_t _reserved;

View File

@ -17,21 +17,41 @@
#include <hw/sysbus.h>
#include <hw/qdev.h>
#define SCLP_CMD_CODE_MASK 0xffff00ff
/* SCLP command codes */
#define SCLP_CMDW_READ_SCP_INFO 0x00020001
#define SCLP_CMDW_READ_SCP_INFO_FORCED 0x00120001
#define SCLP_READ_STORAGE_ELEMENT_INFO 0x00040001
#define SCLP_ATTACH_STORAGE_ELEMENT 0x00080001
#define SCLP_ASSIGN_STORAGE 0x000D0001
#define SCLP_UNASSIGN_STORAGE 0x000C0001
#define SCLP_CMD_READ_EVENT_DATA 0x00770005
#define SCLP_CMD_WRITE_EVENT_DATA 0x00760005
#define SCLP_CMD_READ_EVENT_DATA 0x00770005
#define SCLP_CMD_WRITE_EVENT_DATA 0x00760005
#define SCLP_CMD_WRITE_EVENT_MASK 0x00780005
/* SCLP Memory hotplug codes */
#define SCLP_FC_ASSIGN_ATTACH_READ_STOR 0xE00000000000ULL
#define SCLP_STARTING_SUBINCREMENT_ID 0x10001
#define SCLP_INCREMENT_UNIT 0x10000
#define MAX_AVAIL_SLOTS 32
/* CPU hotplug SCLP codes */
#define SCLP_HAS_CPU_INFO 0x0C00000000000000ULL
#define SCLP_CMDW_READ_CPU_INFO 0x00010001
#define SCLP_CMDW_CONFIGURE_CPU 0x00110001
#define SCLP_CMDW_DECONFIGURE_CPU 0x00100001
/* SCLP response codes */
#define SCLP_RC_NORMAL_READ_COMPLETION 0x0010
#define SCLP_RC_NORMAL_COMPLETION 0x0020
#define SCLP_RC_SCCB_BOUNDARY_VIOLATION 0x0100
#define SCLP_RC_INVALID_SCLP_COMMAND 0x01f0
#define SCLP_RC_CONTAINED_EQUIPMENT_CHECK 0x0340
#define SCLP_RC_INSUFFICIENT_SCCB_LENGTH 0x0300
#define SCLP_RC_STANDBY_READ_COMPLETION 0x0410
#define SCLP_RC_INVALID_FUNCTION 0x40f0
#define SCLP_RC_NO_EVENT_BUFFERS_STORED 0x60f0
#define SCLP_RC_INVALID_SELECTION_MASK 0x70f0
@ -71,12 +91,66 @@ typedef struct SCCBHeader {
#define SCCB_DATA_LEN (SCCB_SIZE - sizeof(SCCBHeader))
/* CPU information */
typedef struct CPUEntry {
uint8_t address;
uint8_t reserved0[13];
uint8_t type;
uint8_t reserved1;
} QEMU_PACKED CPUEntry;
typedef struct ReadInfo {
SCCBHeader h;
uint16_t rnmax;
uint8_t rnsize;
uint8_t _reserved1[16 - 11]; /* 11-15 */
uint16_t entries_cpu; /* 16-17 */
uint16_t offset_cpu; /* 18-19 */
uint8_t _reserved2[24 - 20]; /* 20-23 */
uint8_t loadparm[8]; /* 24-31 */
uint8_t _reserved3[48 - 32]; /* 32-47 */
uint64_t facilities; /* 48-55 */
uint8_t _reserved0[100 - 56];
uint32_t rnsize2;
uint64_t rnmax2;
uint8_t _reserved4[120-112]; /* 112-119 */
uint16_t highest_cpu;
uint8_t _reserved5[128 - 122]; /* 122-127 */
struct CPUEntry entries[0];
} QEMU_PACKED ReadInfo;
typedef struct ReadCpuInfo {
SCCBHeader h;
uint16_t nr_configured; /* 8-9 */
uint16_t offset_configured; /* 10-11 */
uint16_t nr_standby; /* 12-13 */
uint16_t offset_standby; /* 14-15 */
uint8_t reserved0[24-16]; /* 16-23 */
struct CPUEntry entries[0];
} QEMU_PACKED ReadCpuInfo;
typedef struct ReadStorageElementInfo {
SCCBHeader h;
uint16_t max_id;
uint16_t assigned;
uint16_t standby;
uint8_t _reserved0[16 - 14]; /* 14-15 */
uint32_t entries[0];
} QEMU_PACKED ReadStorageElementInfo;
typedef struct AttachStorageElement {
SCCBHeader h;
uint8_t _reserved0[10 - 8]; /* 8-9 */
uint16_t assigned;
uint8_t _reserved1[16 - 12]; /* 12-15 */
uint32_t entries[0];
} QEMU_PACKED AttachStorageElement;
typedef struct AssignStorage {
SCCBHeader h;
uint16_t rn;
} QEMU_PACKED AssignStorage;
typedef struct SCCB {
SCCBHeader h;
char data[SCCB_DATA_LEN];
@ -114,5 +188,6 @@ typedef struct S390SCLPDeviceClass {
void s390_sclp_init(void);
void sclp_service_interrupt(uint32_t sccb);
void raise_irq_cpu_hotplug(void);
#endif

View File

@ -78,11 +78,6 @@ typedef struct MchkQueue {
uint16_t type;
} MchkQueue;
/* Defined values for CPUS390XState.runtime_reg_dirty_mask */
#define KVM_S390_RUNTIME_DIRTY_NONE 0
#define KVM_S390_RUNTIME_DIRTY_PARTIAL 1
#define KVM_S390_RUNTIME_DIRTY_FULL 2
typedef struct CPUS390XState {
uint64_t regs[16]; /* GP registers */
CPU_DoubleU fregs[16]; /* FP registers */
@ -126,13 +121,6 @@ typedef struct CPUS390XState {
uint64_t cputm;
uint32_t todpr;
/* on S390 the runtime register set has two dirty states:
* a partial dirty state in which only the registers that
* are needed all the time are fetched. And a fully dirty
* state in which all runtime registers are fetched.
*/
uint32_t runtime_reg_dirty_mask;
CPU_COMMON
/* reset does memset(0) up to here */
@ -1076,7 +1064,6 @@ void kvm_s390_io_interrupt(S390CPU *cpu, uint16_t subchannel_id,
uint32_t io_int_word);
void kvm_s390_crw_mchk(S390CPU *cpu);
void kvm_s390_enable_css_support(S390CPU *cpu);
int kvm_s390_get_registers_partial(CPUState *cpu);
int kvm_s390_assign_subch_ioeventfd(EventNotifier *notifier, uint32_t sch,
int vq, bool assign);
int kvm_s390_cpu_restart(S390CPU *cpu);
@ -1094,10 +1081,6 @@ static inline void kvm_s390_crw_mchk(S390CPU *cpu)
static inline void kvm_s390_enable_css_support(S390CPU *cpu)
{
}
static inline int kvm_s390_get_registers_partial(CPUState *cpu)
{
return -ENOSYS;
}
static inline int kvm_s390_assign_subch_ioeventfd(EventNotifier *notifier,
uint32_t sch, int vq,
bool assign)

View File

@ -152,35 +152,32 @@ int kvm_arch_put_registers(CPUState *cs, int level)
}
}
if (env->runtime_reg_dirty_mask == KVM_S390_RUNTIME_DIRTY_FULL) {
reg.id = KVM_REG_S390_CPU_TIMER;
reg.addr = (__u64)&(env->cputm);
ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
if (ret < 0) {
return ret;
}
reg.id = KVM_REG_S390_CLOCK_COMP;
reg.addr = (__u64)&(env->ckc);
ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
if (ret < 0) {
return ret;
}
reg.id = KVM_REG_S390_TODPR;
reg.addr = (__u64)&(env->todpr);
ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
if (ret < 0) {
return ret;
}
}
env->runtime_reg_dirty_mask = KVM_S390_RUNTIME_DIRTY_NONE;
/* Do we need to save more than that? */
if (level == KVM_PUT_RUNTIME_STATE) {
return 0;
}
reg.id = KVM_REG_S390_CPU_TIMER;
reg.addr = (__u64)&(env->cputm);
ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
if (ret < 0) {
return ret;
}
reg.id = KVM_REG_S390_CLOCK_COMP;
reg.addr = (__u64)&(env->ckc);
ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
if (ret < 0) {
return ret;
}
reg.id = KVM_REG_S390_TODPR;
reg.addr = (__u64)&(env->todpr);
ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
if (ret < 0) {
return ret;
}
if (cap_sync_regs &&
cs->kvm_run->kvm_valid_regs & KVM_SYNC_ACRS &&
cs->kvm_run->kvm_valid_regs & KVM_SYNC_CRS) {
@ -216,13 +213,54 @@ int kvm_arch_get_registers(CPUState *cs)
S390CPU *cpu = S390_CPU(cs);
CPUS390XState *env = &cpu->env;
struct kvm_one_reg reg;
int r;
struct kvm_sregs sregs;
struct kvm_regs regs;
int i, r;
r = kvm_s390_get_registers_partial(cs);
if (r < 0) {
return r;
/* get the PSW */
env->psw.addr = cs->kvm_run->psw_addr;
env->psw.mask = cs->kvm_run->psw_mask;
/* the GPRS */
if (cap_sync_regs && cs->kvm_run->kvm_valid_regs & KVM_SYNC_GPRS) {
for (i = 0; i < 16; i++) {
env->regs[i] = cs->kvm_run->s.regs.gprs[i];
}
} else {
r = kvm_vcpu_ioctl(cs, KVM_GET_REGS, &regs);
if (r < 0) {
return r;
}
for (i = 0; i < 16; i++) {
env->regs[i] = regs.gprs[i];
}
}
/* The ACRS and CRS */
if (cap_sync_regs &&
cs->kvm_run->kvm_valid_regs & KVM_SYNC_ACRS &&
cs->kvm_run->kvm_valid_regs & KVM_SYNC_CRS) {
for (i = 0; i < 16; i++) {
env->aregs[i] = cs->kvm_run->s.regs.acrs[i];
env->cregs[i] = cs->kvm_run->s.regs.crs[i];
}
} else {
r = kvm_vcpu_ioctl(cs, KVM_GET_SREGS, &sregs);
if (r < 0) {
return r;
}
for (i = 0; i < 16; i++) {
env->aregs[i] = sregs.acrs[i];
env->cregs[i] = sregs.crs[i];
}
}
/* The prefix */
if (cap_sync_regs && cs->kvm_run->kvm_valid_regs & KVM_SYNC_PREFIX) {
env->psa = cs->kvm_run->s.regs.prefix;
}
/* One Regs */
reg.id = KVM_REG_S390_CPU_TIMER;
reg.addr = (__u64)&(env->cputm);
r = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &reg);
@ -244,69 +282,6 @@ int kvm_arch_get_registers(CPUState *cs)
return r;
}
env->runtime_reg_dirty_mask = KVM_S390_RUNTIME_DIRTY_FULL;
return 0;
}
int kvm_s390_get_registers_partial(CPUState *cs)
{
S390CPU *cpu = S390_CPU(cs);
CPUS390XState *env = &cpu->env;
struct kvm_sregs sregs;
struct kvm_regs regs;
int ret;
int i;
if (env->runtime_reg_dirty_mask) {
return 0;
}
/* get the PSW */
env->psw.addr = cs->kvm_run->psw_addr;
env->psw.mask = cs->kvm_run->psw_mask;
/* the GPRS */
if (cap_sync_regs && cs->kvm_run->kvm_valid_regs & KVM_SYNC_GPRS) {
for (i = 0; i < 16; i++) {
env->regs[i] = cs->kvm_run->s.regs.gprs[i];
}
} else {
ret = kvm_vcpu_ioctl(cs, KVM_GET_REGS, &regs);
if (ret < 0) {
return ret;
}
for (i = 0; i < 16; i++) {
env->regs[i] = regs.gprs[i];
}
}
/* The ACRS and CRS */
if (cap_sync_regs &&
cs->kvm_run->kvm_valid_regs & KVM_SYNC_ACRS &&
cs->kvm_run->kvm_valid_regs & KVM_SYNC_CRS) {
for (i = 0; i < 16; i++) {
env->aregs[i] = cs->kvm_run->s.regs.acrs[i];
env->cregs[i] = cs->kvm_run->s.regs.crs[i];
}
} else {
ret = kvm_vcpu_ioctl(cs, KVM_GET_SREGS, &sregs);
if (ret < 0) {
return ret;
}
for (i = 0; i < 16; i++) {
env->aregs[i] = sregs.acrs[i];
env->cregs[i] = sregs.crs[i];
}
}
/* Finally the prefix */
if (cap_sync_regs && cs->kvm_run->kvm_valid_regs & KVM_SYNC_PREFIX) {
env->psa = cs->kvm_run->s.regs.prefix;
} else {
/* no prefix without sync regs */
}
env->runtime_reg_dirty_mask = KVM_S390_RUNTIME_DIRTY_PARTIAL;
return 0;
}
@ -442,15 +417,13 @@ static int kvm_handle_css_inst(S390CPU *cpu, struct kvm_run *run,
uint8_t ipa0, uint8_t ipa1, uint8_t ipb)
{
CPUS390XState *env = &cpu->env;
CPUState *cs = CPU(cpu);
if (ipa0 != 0xb2) {
/* Not handled for now. */
return -1;
}
kvm_s390_get_registers_partial(cs);
cs->kvm_vcpu_dirty = true;
cpu_synchronize_state(CPU(cpu));
switch (ipa1) {
case PRIV_XSCH:
@ -537,11 +510,9 @@ static int handle_priv(S390CPU *cpu, struct kvm_run *run,
static int handle_hypercall(S390CPU *cpu, struct kvm_run *run)
{
CPUState *cs = CPU(cpu);
CPUS390XState *env = &cpu->env;
kvm_s390_get_registers_partial(cs);
cs->kvm_vcpu_dirty = true;
cpu_synchronize_state(CPU(cpu));
env->regs[2] = s390_virtio_hypercall(env);
return 0;
@ -767,8 +738,7 @@ static int handle_tsch(S390CPU *cpu)
struct kvm_run *run = cs->kvm_run;
int ret;
kvm_s390_get_registers_partial(cs);
cs->kvm_vcpu_dirty = true;
cpu_synchronize_state(cs);
ret = ioinst_handle_tsch(env, env->regs[1], run->s390_tsch.ipb);
if (ret >= 0) {