mirror of
https://github.com/xemu-project/xemu.git
synced 2025-02-10 22:33:20 +00:00
vhost: adapt vhost_verify_ring_mappings() to virtio 1 ring layout
With virtio 1, the vring layout is split in 3 separate regions of contiguous memory for the descriptor table, the available ring and the used ring, as opposed with legacy virtio which uses a single region. In case of memory re-mapping, the code ensures it doesn't affect the vring mapping. This is done in vhost_verify_ring_mappings() which assumes the device is legacy. This patch changes vhost_verify_ring_mappings() to check the mappings of each part of the vring separately. This works for legacy mappings as well. Cc: qemu-stable@nongnu.org Signed-off-by: Greg Kurz <groug@kaod.org> Reviewed-by: Cornelia Huck <cornelia.huck@de.ibm.com> Reviewed-by: Michael S. Tsirkin <mst@redhat.com> Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
This commit is contained in:
parent
cb88ebd754
commit
f1f9e6c596
@ -421,32 +421,73 @@ static inline void vhost_dev_log_resize(struct vhost_dev *dev, uint64_t size)
|
||||
dev->log_size = size;
|
||||
}
|
||||
|
||||
|
||||
static int vhost_verify_ring_part_mapping(void *part,
|
||||
uint64_t part_addr,
|
||||
uint64_t part_size,
|
||||
uint64_t start_addr,
|
||||
uint64_t size)
|
||||
{
|
||||
hwaddr l;
|
||||
void *p;
|
||||
int r = 0;
|
||||
|
||||
if (!ranges_overlap(start_addr, size, part_addr, part_size)) {
|
||||
return 0;
|
||||
}
|
||||
l = part_size;
|
||||
p = cpu_physical_memory_map(part_addr, &l, 1);
|
||||
if (!p || l != part_size) {
|
||||
r = -ENOMEM;
|
||||
}
|
||||
if (p != part) {
|
||||
r = -EBUSY;
|
||||
}
|
||||
cpu_physical_memory_unmap(p, l, 0, 0);
|
||||
return r;
|
||||
}
|
||||
|
||||
static int vhost_verify_ring_mappings(struct vhost_dev *dev,
|
||||
uint64_t start_addr,
|
||||
uint64_t size)
|
||||
{
|
||||
int i;
|
||||
int i, j;
|
||||
int r = 0;
|
||||
const char *part_name[] = {
|
||||
"descriptor table",
|
||||
"available ring",
|
||||
"used ring"
|
||||
};
|
||||
|
||||
for (i = 0; !r && i < dev->nvqs; ++i) {
|
||||
for (i = 0; i < dev->nvqs; ++i) {
|
||||
struct vhost_virtqueue *vq = dev->vqs + i;
|
||||
hwaddr l;
|
||||
void *p;
|
||||
|
||||
if (!ranges_overlap(start_addr, size, vq->ring_phys, vq->ring_size)) {
|
||||
continue;
|
||||
j = 0;
|
||||
r = vhost_verify_ring_part_mapping(vq->desc, vq->desc_phys,
|
||||
vq->desc_size, start_addr, size);
|
||||
if (!r) {
|
||||
break;
|
||||
}
|
||||
l = vq->ring_size;
|
||||
p = cpu_physical_memory_map(vq->ring_phys, &l, 1);
|
||||
if (!p || l != vq->ring_size) {
|
||||
error_report("Unable to map ring buffer for ring %d", i);
|
||||
r = -ENOMEM;
|
||||
|
||||
j++;
|
||||
r = vhost_verify_ring_part_mapping(vq->avail, vq->avail_phys,
|
||||
vq->avail_size, start_addr, size);
|
||||
if (!r) {
|
||||
break;
|
||||
}
|
||||
if (p != vq->ring) {
|
||||
error_report("Ring buffer relocated for ring %d", i);
|
||||
r = -EBUSY;
|
||||
|
||||
j++;
|
||||
r = vhost_verify_ring_part_mapping(vq->used, vq->used_phys,
|
||||
vq->used_size, start_addr, size);
|
||||
if (!r) {
|
||||
break;
|
||||
}
|
||||
cpu_physical_memory_unmap(p, l, 0, 0);
|
||||
}
|
||||
|
||||
if (r == -ENOMEM) {
|
||||
error_report("Unable to map %s for ring %d", part_name[j], i);
|
||||
} else if (r == -EBUSY) {
|
||||
error_report("%s relocated for ring %d", part_name[j], i);
|
||||
}
|
||||
return r;
|
||||
}
|
||||
@ -860,15 +901,15 @@ static int vhost_virtqueue_start(struct vhost_dev *dev,
|
||||
}
|
||||
}
|
||||
|
||||
s = l = virtio_queue_get_desc_size(vdev, idx);
|
||||
a = virtio_queue_get_desc_addr(vdev, idx);
|
||||
vq->desc_size = s = l = virtio_queue_get_desc_size(vdev, idx);
|
||||
vq->desc_phys = a = virtio_queue_get_desc_addr(vdev, idx);
|
||||
vq->desc = cpu_physical_memory_map(a, &l, 0);
|
||||
if (!vq->desc || l != s) {
|
||||
r = -ENOMEM;
|
||||
goto fail_alloc_desc;
|
||||
}
|
||||
s = l = virtio_queue_get_avail_size(vdev, idx);
|
||||
a = virtio_queue_get_avail_addr(vdev, idx);
|
||||
vq->avail_size = s = l = virtio_queue_get_avail_size(vdev, idx);
|
||||
vq->avail_phys = a = virtio_queue_get_avail_addr(vdev, idx);
|
||||
vq->avail = cpu_physical_memory_map(a, &l, 0);
|
||||
if (!vq->avail || l != s) {
|
||||
r = -ENOMEM;
|
||||
|
@ -14,6 +14,10 @@ struct vhost_virtqueue {
|
||||
void *avail;
|
||||
void *used;
|
||||
int num;
|
||||
unsigned long long desc_phys;
|
||||
unsigned desc_size;
|
||||
unsigned long long avail_phys;
|
||||
unsigned avail_size;
|
||||
unsigned long long used_phys;
|
||||
unsigned used_size;
|
||||
void *ring;
|
||||
|
Loading…
x
Reference in New Issue
Block a user