mirror of
https://github.com/xemu-project/xemu.git
synced 2024-11-28 14:00:44 +00:00
virtio: Use auto rcu_read macros
Use RCU_READ_LOCK_GUARD and WITH_RCU_READ_LOCK_GUARD to replace the manual rcu_read_(un)lock calls. I think the only change is virtio_load which was missing unlocks in error paths; those end up being fatal errors so it's not that important anyway. Signed-off-by: Dr. David Alan Gilbert <dgilbert@redhat.com> Message-Id: <20191028161109.60205-1-dgilbert@redhat.com> Reviewed-by: Michael S. Tsirkin <mst@redhat.com> Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
This commit is contained in:
parent
068ddfa970
commit
b5f53d04a5
@ -387,7 +387,8 @@ static inline void vring_set_avail_event(VirtQueue *vq, uint16_t val)
|
||||
|
||||
static void virtio_queue_split_set_notification(VirtQueue *vq, int enable)
|
||||
{
|
||||
rcu_read_lock();
|
||||
RCU_READ_LOCK_GUARD();
|
||||
|
||||
if (virtio_vdev_has_feature(vq->vdev, VIRTIO_RING_F_EVENT_IDX)) {
|
||||
vring_set_avail_event(vq, vring_avail_idx(vq));
|
||||
} else if (enable) {
|
||||
@ -399,7 +400,6 @@ static void virtio_queue_split_set_notification(VirtQueue *vq, int enable)
|
||||
/* Expose avail event/used flags before caller checks the avail idx. */
|
||||
smp_mb();
|
||||
}
|
||||
rcu_read_unlock();
|
||||
}
|
||||
|
||||
static void virtio_queue_packed_set_notification(VirtQueue *vq, int enable)
|
||||
@ -408,7 +408,7 @@ static void virtio_queue_packed_set_notification(VirtQueue *vq, int enable)
|
||||
VRingPackedDescEvent e;
|
||||
VRingMemoryRegionCaches *caches;
|
||||
|
||||
rcu_read_lock();
|
||||
RCU_READ_LOCK_GUARD();
|
||||
caches = vring_get_region_caches(vq);
|
||||
vring_packed_event_read(vq->vdev, &caches->used, &e);
|
||||
|
||||
@ -429,7 +429,6 @@ static void virtio_queue_packed_set_notification(VirtQueue *vq, int enable)
|
||||
/* Expose avail event/used flags before caller checks the avail idx. */
|
||||
smp_mb();
|
||||
}
|
||||
rcu_read_unlock();
|
||||
}
|
||||
|
||||
void virtio_queue_set_notification(VirtQueue *vq, int enable)
|
||||
@ -577,9 +576,8 @@ static int virtio_queue_split_empty(VirtQueue *vq)
|
||||
return 0;
|
||||
}
|
||||
|
||||
rcu_read_lock();
|
||||
RCU_READ_LOCK_GUARD();
|
||||
empty = vring_avail_idx(vq) == vq->last_avail_idx;
|
||||
rcu_read_unlock();
|
||||
return empty;
|
||||
}
|
||||
|
||||
@ -601,12 +599,8 @@ static int virtio_queue_packed_empty_rcu(VirtQueue *vq)
|
||||
|
||||
static int virtio_queue_packed_empty(VirtQueue *vq)
|
||||
{
|
||||
bool empty;
|
||||
|
||||
rcu_read_lock();
|
||||
empty = virtio_queue_packed_empty_rcu(vq);
|
||||
rcu_read_unlock();
|
||||
return empty;
|
||||
RCU_READ_LOCK_GUARD();
|
||||
return virtio_queue_packed_empty_rcu(vq);
|
||||
}
|
||||
|
||||
int virtio_queue_empty(VirtQueue *vq)
|
||||
@ -859,10 +853,9 @@ void virtqueue_flush(VirtQueue *vq, unsigned int count)
|
||||
void virtqueue_push(VirtQueue *vq, const VirtQueueElement *elem,
|
||||
unsigned int len)
|
||||
{
|
||||
rcu_read_lock();
|
||||
RCU_READ_LOCK_GUARD();
|
||||
virtqueue_fill(vq, elem, len, 0);
|
||||
virtqueue_flush(vq, 1);
|
||||
rcu_read_unlock();
|
||||
}
|
||||
|
||||
/* Called within rcu_read_lock(). */
|
||||
@ -943,7 +936,8 @@ static void virtqueue_split_get_avail_bytes(VirtQueue *vq,
|
||||
int64_t len = 0;
|
||||
int rc;
|
||||
|
||||
rcu_read_lock();
|
||||
RCU_READ_LOCK_GUARD();
|
||||
|
||||
idx = vq->last_avail_idx;
|
||||
total_bufs = in_total = out_total = 0;
|
||||
|
||||
@ -1033,7 +1027,6 @@ done:
|
||||
if (out_bytes) {
|
||||
*out_bytes = out_total;
|
||||
}
|
||||
rcu_read_unlock();
|
||||
return;
|
||||
|
||||
err:
|
||||
@ -1083,7 +1076,7 @@ static void virtqueue_packed_get_avail_bytes(VirtQueue *vq,
|
||||
VRingPackedDesc desc;
|
||||
bool wrap_counter;
|
||||
|
||||
rcu_read_lock();
|
||||
RCU_READ_LOCK_GUARD();
|
||||
idx = vq->last_avail_idx;
|
||||
wrap_counter = vq->last_avail_wrap_counter;
|
||||
total_bufs = in_total = out_total = 0;
|
||||
@ -1176,7 +1169,6 @@ done:
|
||||
if (out_bytes) {
|
||||
*out_bytes = out_total;
|
||||
}
|
||||
rcu_read_unlock();
|
||||
return;
|
||||
|
||||
err:
|
||||
@ -1360,7 +1352,7 @@ static void *virtqueue_split_pop(VirtQueue *vq, size_t sz)
|
||||
VRingDesc desc;
|
||||
int rc;
|
||||
|
||||
rcu_read_lock();
|
||||
RCU_READ_LOCK_GUARD();
|
||||
if (virtio_queue_empty_rcu(vq)) {
|
||||
goto done;
|
||||
}
|
||||
@ -1469,7 +1461,6 @@ static void *virtqueue_split_pop(VirtQueue *vq, size_t sz)
|
||||
trace_virtqueue_pop(vq, elem, elem->in_num, elem->out_num);
|
||||
done:
|
||||
address_space_cache_destroy(&indirect_desc_cache);
|
||||
rcu_read_unlock();
|
||||
|
||||
return elem;
|
||||
|
||||
@ -1494,7 +1485,7 @@ static void *virtqueue_packed_pop(VirtQueue *vq, size_t sz)
|
||||
uint16_t id;
|
||||
int rc;
|
||||
|
||||
rcu_read_lock();
|
||||
RCU_READ_LOCK_GUARD();
|
||||
if (virtio_queue_packed_empty_rcu(vq)) {
|
||||
goto done;
|
||||
}
|
||||
@ -1600,7 +1591,6 @@ static void *virtqueue_packed_pop(VirtQueue *vq, size_t sz)
|
||||
trace_virtqueue_pop(vq, elem, elem->in_num, elem->out_num);
|
||||
done:
|
||||
address_space_cache_destroy(&indirect_desc_cache);
|
||||
rcu_read_unlock();
|
||||
|
||||
return elem;
|
||||
|
||||
@ -2437,13 +2427,10 @@ static bool virtio_should_notify(VirtIODevice *vdev, VirtQueue *vq)
|
||||
|
||||
void virtio_notify_irqfd(VirtIODevice *vdev, VirtQueue *vq)
|
||||
{
|
||||
bool should_notify;
|
||||
rcu_read_lock();
|
||||
should_notify = virtio_should_notify(vdev, vq);
|
||||
rcu_read_unlock();
|
||||
|
||||
if (!should_notify) {
|
||||
return;
|
||||
WITH_RCU_READ_LOCK_GUARD() {
|
||||
if (!virtio_should_notify(vdev, vq)) {
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
trace_virtio_notify_irqfd(vdev, vq);
|
||||
@ -2475,13 +2462,10 @@ static void virtio_irq(VirtQueue *vq)
|
||||
|
||||
void virtio_notify(VirtIODevice *vdev, VirtQueue *vq)
|
||||
{
|
||||
bool should_notify;
|
||||
rcu_read_lock();
|
||||
should_notify = virtio_should_notify(vdev, vq);
|
||||
rcu_read_unlock();
|
||||
|
||||
if (!should_notify) {
|
||||
return;
|
||||
WITH_RCU_READ_LOCK_GUARD() {
|
||||
if (!virtio_should_notify(vdev, vq)) {
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
trace_virtio_notify(vdev, vq);
|
||||
@ -3032,7 +3016,7 @@ int virtio_load(VirtIODevice *vdev, QEMUFile *f, int version_id)
|
||||
vdev->start_on_kick = true;
|
||||
}
|
||||
|
||||
rcu_read_lock();
|
||||
RCU_READ_LOCK_GUARD();
|
||||
for (i = 0; i < num; i++) {
|
||||
if (vdev->vq[i].vring.desc) {
|
||||
uint16_t nheads;
|
||||
@ -3087,7 +3071,6 @@ int virtio_load(VirtIODevice *vdev, QEMUFile *f, int version_id)
|
||||
}
|
||||
}
|
||||
}
|
||||
rcu_read_unlock();
|
||||
|
||||
if (vdc->post_load) {
|
||||
ret = vdc->post_load(vdev);
|
||||
@ -3297,12 +3280,11 @@ static void virtio_queue_packed_restore_last_avail_idx(VirtIODevice *vdev,
|
||||
static void virtio_queue_split_restore_last_avail_idx(VirtIODevice *vdev,
|
||||
int n)
|
||||
{
|
||||
rcu_read_lock();
|
||||
RCU_READ_LOCK_GUARD();
|
||||
if (vdev->vq[n].vring.desc) {
|
||||
vdev->vq[n].last_avail_idx = vring_used_idx(&vdev->vq[n]);
|
||||
vdev->vq[n].shadow_avail_idx = vdev->vq[n].last_avail_idx;
|
||||
}
|
||||
rcu_read_unlock();
|
||||
}
|
||||
|
||||
void virtio_queue_restore_last_avail_idx(VirtIODevice *vdev, int n)
|
||||
@ -3322,11 +3304,10 @@ static void virtio_queue_packed_update_used_idx(VirtIODevice *vdev, int n)
|
||||
|
||||
static void virtio_split_packed_update_used_idx(VirtIODevice *vdev, int n)
|
||||
{
|
||||
rcu_read_lock();
|
||||
RCU_READ_LOCK_GUARD();
|
||||
if (vdev->vq[n].vring.desc) {
|
||||
vdev->vq[n].used_idx = vring_used_idx(&vdev->vq[n]);
|
||||
}
|
||||
rcu_read_unlock();
|
||||
}
|
||||
|
||||
void virtio_queue_update_used_idx(VirtIODevice *vdev, int n)
|
||||
|
Loading…
Reference in New Issue
Block a user