mirror of
https://github.com/xemu-project/xemu.git
synced 2024-12-11 05:23:58 +00:00
virtio: event suppression support for packed ring
This patch implements event suppression through device/driver area. Please refer virtio specification for more information. Signed-off-by: Wei Xu <wexu@redhat.com> Signed-off-by: Jason Wang <jasowang@redhat.com> Signed-off-by: Eugenio Pérez <eperezma@redhat.com> Message-Id: <20191025083527.30803-7-eperezma@redhat.com> Reviewed-by: Michael S. Tsirkin <mst@redhat.com> Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
This commit is contained in:
parent
86044b24e8
commit
683f766567
@ -240,6 +240,44 @@ static void vring_split_desc_read(VirtIODevice *vdev, VRingDesc *desc,
|
|||||||
virtio_tswap16s(vdev, &desc->next);
|
virtio_tswap16s(vdev, &desc->next);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void vring_packed_event_read(VirtIODevice *vdev,
|
||||||
|
MemoryRegionCache *cache,
|
||||||
|
VRingPackedDescEvent *e)
|
||||||
|
{
|
||||||
|
hwaddr off_off = offsetof(VRingPackedDescEvent, off_wrap);
|
||||||
|
hwaddr off_flags = offsetof(VRingPackedDescEvent, flags);
|
||||||
|
|
||||||
|
address_space_read_cached(cache, off_flags, &e->flags,
|
||||||
|
sizeof(e->flags));
|
||||||
|
/* Make sure flags is seen before off_wrap */
|
||||||
|
smp_rmb();
|
||||||
|
address_space_read_cached(cache, off_off, &e->off_wrap,
|
||||||
|
sizeof(e->off_wrap));
|
||||||
|
virtio_tswap16s(vdev, &e->off_wrap);
|
||||||
|
virtio_tswap16s(vdev, &e->flags);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void vring_packed_off_wrap_write(VirtIODevice *vdev,
|
||||||
|
MemoryRegionCache *cache,
|
||||||
|
uint16_t off_wrap)
|
||||||
|
{
|
||||||
|
hwaddr off = offsetof(VRingPackedDescEvent, off_wrap);
|
||||||
|
|
||||||
|
virtio_tswap16s(vdev, &off_wrap);
|
||||||
|
address_space_write_cached(cache, off, &off_wrap, sizeof(off_wrap));
|
||||||
|
address_space_cache_invalidate(cache, off, sizeof(off_wrap));
|
||||||
|
}
|
||||||
|
|
||||||
|
static void vring_packed_flags_write(VirtIODevice *vdev,
|
||||||
|
MemoryRegionCache *cache, uint16_t flags)
|
||||||
|
{
|
||||||
|
hwaddr off = offsetof(VRingPackedDescEvent, flags);
|
||||||
|
|
||||||
|
virtio_tswap16s(vdev, &flags);
|
||||||
|
address_space_write_cached(cache, off, &flags, sizeof(flags));
|
||||||
|
address_space_cache_invalidate(cache, off, sizeof(flags));
|
||||||
|
}
|
||||||
|
|
||||||
/* Called within rcu_read_lock(). */
|
/* Called within rcu_read_lock(). */
|
||||||
static VRingMemoryRegionCaches *vring_get_region_caches(struct VirtQueue *vq)
|
static VRingMemoryRegionCaches *vring_get_region_caches(struct VirtQueue *vq)
|
||||||
{
|
{
|
||||||
@ -347,14 +385,8 @@ static inline void vring_set_avail_event(VirtQueue *vq, uint16_t val)
|
|||||||
address_space_cache_invalidate(&caches->used, pa, sizeof(val));
|
address_space_cache_invalidate(&caches->used, pa, sizeof(val));
|
||||||
}
|
}
|
||||||
|
|
||||||
void virtio_queue_set_notification(VirtQueue *vq, int enable)
|
static void virtio_queue_split_set_notification(VirtQueue *vq, int enable)
|
||||||
{
|
{
|
||||||
vq->notification = enable;
|
|
||||||
|
|
||||||
if (!vq->vring.desc) {
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
rcu_read_lock();
|
rcu_read_lock();
|
||||||
if (virtio_vdev_has_feature(vq->vdev, VIRTIO_RING_F_EVENT_IDX)) {
|
if (virtio_vdev_has_feature(vq->vdev, VIRTIO_RING_F_EVENT_IDX)) {
|
||||||
vring_set_avail_event(vq, vring_avail_idx(vq));
|
vring_set_avail_event(vq, vring_avail_idx(vq));
|
||||||
@ -370,6 +402,51 @@ void virtio_queue_set_notification(VirtQueue *vq, int enable)
|
|||||||
rcu_read_unlock();
|
rcu_read_unlock();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void virtio_queue_packed_set_notification(VirtQueue *vq, int enable)
|
||||||
|
{
|
||||||
|
uint16_t off_wrap;
|
||||||
|
VRingPackedDescEvent e;
|
||||||
|
VRingMemoryRegionCaches *caches;
|
||||||
|
|
||||||
|
rcu_read_lock();
|
||||||
|
caches = vring_get_region_caches(vq);
|
||||||
|
vring_packed_event_read(vq->vdev, &caches->used, &e);
|
||||||
|
|
||||||
|
if (!enable) {
|
||||||
|
e.flags = VRING_PACKED_EVENT_FLAG_DISABLE;
|
||||||
|
} else if (virtio_vdev_has_feature(vq->vdev, VIRTIO_RING_F_EVENT_IDX)) {
|
||||||
|
off_wrap = vq->shadow_avail_idx | vq->shadow_avail_wrap_counter << 15;
|
||||||
|
vring_packed_off_wrap_write(vq->vdev, &caches->used, off_wrap);
|
||||||
|
/* Make sure off_wrap is wrote before flags */
|
||||||
|
smp_wmb();
|
||||||
|
e.flags = VRING_PACKED_EVENT_FLAG_DESC;
|
||||||
|
} else {
|
||||||
|
e.flags = VRING_PACKED_EVENT_FLAG_ENABLE;
|
||||||
|
}
|
||||||
|
|
||||||
|
vring_packed_flags_write(vq->vdev, &caches->used, e.flags);
|
||||||
|
if (enable) {
|
||||||
|
/* Expose avail event/used flags before caller checks the avail idx. */
|
||||||
|
smp_mb();
|
||||||
|
}
|
||||||
|
rcu_read_unlock();
|
||||||
|
}
|
||||||
|
|
||||||
|
void virtio_queue_set_notification(VirtQueue *vq, int enable)
|
||||||
|
{
|
||||||
|
vq->notification = enable;
|
||||||
|
|
||||||
|
if (!vq->vring.desc) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (virtio_vdev_has_feature(vq->vdev, VIRTIO_F_RING_PACKED)) {
|
||||||
|
virtio_queue_packed_set_notification(vq, enable);
|
||||||
|
} else {
|
||||||
|
virtio_queue_split_set_notification(vq, enable);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
int virtio_queue_ready(VirtQueue *vq)
|
int virtio_queue_ready(VirtQueue *vq)
|
||||||
{
|
{
|
||||||
return vq->vring.avail != 0;
|
return vq->vring.avail != 0;
|
||||||
@ -2287,8 +2364,7 @@ static void virtio_set_isr(VirtIODevice *vdev, int value)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Called within rcu_read_lock(). */
|
static bool virtio_split_should_notify(VirtIODevice *vdev, VirtQueue *vq)
|
||||||
static bool virtio_should_notify(VirtIODevice *vdev, VirtQueue *vq)
|
|
||||||
{
|
{
|
||||||
uint16_t old, new;
|
uint16_t old, new;
|
||||||
bool v;
|
bool v;
|
||||||
@ -2311,6 +2387,54 @@ static bool virtio_should_notify(VirtIODevice *vdev, VirtQueue *vq)
|
|||||||
return !v || vring_need_event(vring_get_used_event(vq), new, old);
|
return !v || vring_need_event(vring_get_used_event(vq), new, old);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static bool vring_packed_need_event(VirtQueue *vq, bool wrap,
|
||||||
|
uint16_t off_wrap, uint16_t new,
|
||||||
|
uint16_t old)
|
||||||
|
{
|
||||||
|
int off = off_wrap & ~(1 << 15);
|
||||||
|
|
||||||
|
if (wrap != off_wrap >> 15) {
|
||||||
|
off -= vq->vring.num;
|
||||||
|
}
|
||||||
|
|
||||||
|
return vring_need_event(off, new, old);
|
||||||
|
}
|
||||||
|
|
||||||
|
static bool virtio_packed_should_notify(VirtIODevice *vdev, VirtQueue *vq)
|
||||||
|
{
|
||||||
|
VRingPackedDescEvent e;
|
||||||
|
uint16_t old, new;
|
||||||
|
bool v;
|
||||||
|
VRingMemoryRegionCaches *caches;
|
||||||
|
|
||||||
|
caches = vring_get_region_caches(vq);
|
||||||
|
vring_packed_event_read(vdev, &caches->avail, &e);
|
||||||
|
|
||||||
|
old = vq->signalled_used;
|
||||||
|
new = vq->signalled_used = vq->used_idx;
|
||||||
|
v = vq->signalled_used_valid;
|
||||||
|
vq->signalled_used_valid = true;
|
||||||
|
|
||||||
|
if (e.flags == VRING_PACKED_EVENT_FLAG_DISABLE) {
|
||||||
|
return false;
|
||||||
|
} else if (e.flags == VRING_PACKED_EVENT_FLAG_ENABLE) {
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
return !v || vring_packed_need_event(vq, vq->used_wrap_counter,
|
||||||
|
e.off_wrap, new, old);
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Called within rcu_read_lock(). */
|
||||||
|
static bool virtio_should_notify(VirtIODevice *vdev, VirtQueue *vq)
|
||||||
|
{
|
||||||
|
if (virtio_vdev_has_feature(vdev, VIRTIO_F_RING_PACKED)) {
|
||||||
|
return virtio_packed_should_notify(vdev, vq);
|
||||||
|
} else {
|
||||||
|
return virtio_split_should_notify(vdev, vq);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
void virtio_notify_irqfd(VirtIODevice *vdev, VirtQueue *vq)
|
void virtio_notify_irqfd(VirtIODevice *vdev, VirtQueue *vq)
|
||||||
{
|
{
|
||||||
bool should_notify;
|
bool should_notify;
|
||||||
|
Loading…
Reference in New Issue
Block a user