mirror of
https://github.com/xemu-project/xemu.git
synced 2024-11-23 11:39:53 +00:00
virtio: fix reachable assertion due to stale value of cached region size
In virtqueue_{split,packed}_get_avail_bytes() descriptors are read
in a loop via MemoryRegionCache regions and calls to
vring_{split,packed}_desc_read() - these take a region cache and the
index of the descriptor to be read.
For direct descriptors we use a cache provided by the caller, whose
size matches that of the virtqueue vring. We limit the number of
descriptors we can read by the size of that vring:
max = vq->vring.num;
...
MemoryRegionCache *desc_cache = &caches->desc;
For indirect descriptors, we initialize a new cache and limit the
number of descriptors by the size of the intermediate descriptor:
len = address_space_cache_init(&indirect_desc_cache,
vdev->dma_as,
desc.addr, desc.len, false);
desc_cache = &indirect_desc_cache;
...
max = desc.len / sizeof(VRingDesc);
However, the first initialization of `max` is done outside the loop
where we process guest descriptors, while the second one is done
inside. This means that a sequence of an indirect descriptor followed
by a direct one will leave a stale value in `max`. If the second
descriptor's `next` field is smaller than the stale value, but
greater than the size of the virtqueue ring (and thus the cached
region), a failed assertion will be triggered in
address_space_read_cached() down the call chain.
Fix this by initializing `max` inside the loop in both functions.
Fixes: 9796d0ac8f
("virtio: use address_space_map/unmap to access descriptors")
Signed-off-by: Carlos López <clopez@suse.de>
Message-Id: <20230302100358.3613-1-clopez@suse.de>
Reviewed-by: Michael S. Tsirkin <mst@redhat.com>
Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
(cherry picked from commit bbc1c327d7974261c61566cdb950cc5fa0196b41)
Signed-off-by: Michael Tokarev <mjt@tls.msk.ru>
This commit is contained in:
parent
a641521e50
commit
2a0afe1bde
@ -1478,7 +1478,7 @@ static void virtqueue_split_get_avail_bytes(VirtQueue *vq,
|
||||
VRingMemoryRegionCaches *caches)
|
||||
{
|
||||
VirtIODevice *vdev = vq->vdev;
|
||||
unsigned int max, idx;
|
||||
unsigned int idx;
|
||||
unsigned int total_bufs, in_total, out_total;
|
||||
MemoryRegionCache indirect_desc_cache = MEMORY_REGION_CACHE_INVALID;
|
||||
int64_t len = 0;
|
||||
@ -1487,13 +1487,12 @@ static void virtqueue_split_get_avail_bytes(VirtQueue *vq,
|
||||
idx = vq->last_avail_idx;
|
||||
total_bufs = in_total = out_total = 0;
|
||||
|
||||
max = vq->vring.num;
|
||||
|
||||
while ((rc = virtqueue_num_heads(vq, idx)) > 0) {
|
||||
MemoryRegionCache *desc_cache = &caches->desc;
|
||||
unsigned int num_bufs;
|
||||
VRingDesc desc;
|
||||
unsigned int i;
|
||||
unsigned int max = vq->vring.num;
|
||||
|
||||
num_bufs = total_bufs;
|
||||
|
||||
@ -1615,7 +1614,7 @@ static void virtqueue_packed_get_avail_bytes(VirtQueue *vq,
|
||||
VRingMemoryRegionCaches *caches)
|
||||
{
|
||||
VirtIODevice *vdev = vq->vdev;
|
||||
unsigned int max, idx;
|
||||
unsigned int idx;
|
||||
unsigned int total_bufs, in_total, out_total;
|
||||
MemoryRegionCache *desc_cache;
|
||||
MemoryRegionCache indirect_desc_cache = MEMORY_REGION_CACHE_INVALID;
|
||||
@ -1627,14 +1626,14 @@ static void virtqueue_packed_get_avail_bytes(VirtQueue *vq,
|
||||
wrap_counter = vq->last_avail_wrap_counter;
|
||||
total_bufs = in_total = out_total = 0;
|
||||
|
||||
max = vq->vring.num;
|
||||
|
||||
for (;;) {
|
||||
unsigned int num_bufs = total_bufs;
|
||||
unsigned int i = idx;
|
||||
int rc;
|
||||
unsigned int max = vq->vring.num;
|
||||
|
||||
desc_cache = &caches->desc;
|
||||
|
||||
vring_packed_desc_read(vdev, &desc, desc_cache, idx, true);
|
||||
if (!is_desc_avail(desc.flags, wrap_counter)) {
|
||||
break;
|
||||
|
Loading…
Reference in New Issue
Block a user