From: "Dr. David Alan Gilbert (git)" <dgilbert@redhat.com>
To: qemu-devel@nongnu.org, mst@redhat.com
Subject: [PATCH v2] virtio: Use auto rcu_read macros
Date: Mon, 28 Oct 2019 16:11:09 +0000 [thread overview]
Message-ID: <20191028161109.60205-1-dgilbert@redhat.com> (raw)
From: "Dr. David Alan Gilbert" <dgilbert@redhat.com>
Use RCU_READ_LOCK_GUARD and WITH_RCU_READ_LOCK_GUARD
to replace the manual rcu_read_(un)lock calls.
I think the only change is virtio_load which was missing unlocks
in error paths; those end up being fatal errors so it's not
that important anyway.
Signed-off-by: Dr. David Alan Gilbert <dgilbert@redhat.com>
---
hw/virtio/virtio.c | 65 ++++++++++++++++------------------------------
1 file changed, 23 insertions(+), 42 deletions(-)
diff --git a/hw/virtio/virtio.c b/hw/virtio/virtio.c
index 2e91dec567..7cb67702d0 100644
--- a/hw/virtio/virtio.c
+++ b/hw/virtio/virtio.c
@@ -387,7 +387,8 @@ static inline void vring_set_avail_event(VirtQueue *vq, uint16_t val)
static void virtio_queue_split_set_notification(VirtQueue *vq, int enable)
{
- rcu_read_lock();
+ RCU_READ_LOCK_GUARD();
+
if (virtio_vdev_has_feature(vq->vdev, VIRTIO_RING_F_EVENT_IDX)) {
vring_set_avail_event(vq, vring_avail_idx(vq));
} else if (enable) {
@@ -399,7 +400,6 @@ static void virtio_queue_split_set_notification(VirtQueue *vq, int enable)
/* Expose avail event/used flags before caller checks the avail idx. */
smp_mb();
}
- rcu_read_unlock();
}
static void virtio_queue_packed_set_notification(VirtQueue *vq, int enable)
@@ -408,7 +408,7 @@ static void virtio_queue_packed_set_notification(VirtQueue *vq, int enable)
VRingPackedDescEvent e;
VRingMemoryRegionCaches *caches;
- rcu_read_lock();
+ RCU_READ_LOCK_GUARD();
caches = vring_get_region_caches(vq);
vring_packed_event_read(vq->vdev, &caches->used, &e);
@@ -429,7 +429,6 @@ static void virtio_queue_packed_set_notification(VirtQueue *vq, int enable)
/* Expose avail event/used flags before caller checks the avail idx. */
smp_mb();
}
- rcu_read_unlock();
}
void virtio_queue_set_notification(VirtQueue *vq, int enable)
@@ -577,9 +576,8 @@ static int virtio_queue_split_empty(VirtQueue *vq)
return 0;
}
- rcu_read_lock();
+ RCU_READ_LOCK_GUARD();
empty = vring_avail_idx(vq) == vq->last_avail_idx;
- rcu_read_unlock();
return empty;
}
@@ -601,12 +599,8 @@ static int virtio_queue_packed_empty_rcu(VirtQueue *vq)
static int virtio_queue_packed_empty(VirtQueue *vq)
{
- bool empty;
-
- rcu_read_lock();
- empty = virtio_queue_packed_empty_rcu(vq);
- rcu_read_unlock();
- return empty;
+ RCU_READ_LOCK_GUARD();
+ return virtio_queue_packed_empty_rcu(vq);
}
int virtio_queue_empty(VirtQueue *vq)
@@ -859,10 +853,9 @@ void virtqueue_flush(VirtQueue *vq, unsigned int count)
void virtqueue_push(VirtQueue *vq, const VirtQueueElement *elem,
unsigned int len)
{
- rcu_read_lock();
+ RCU_READ_LOCK_GUARD();
virtqueue_fill(vq, elem, len, 0);
virtqueue_flush(vq, 1);
- rcu_read_unlock();
}
/* Called within rcu_read_lock(). */
@@ -943,7 +936,8 @@ static void virtqueue_split_get_avail_bytes(VirtQueue *vq,
int64_t len = 0;
int rc;
- rcu_read_lock();
+ RCU_READ_LOCK_GUARD();
+
idx = vq->last_avail_idx;
total_bufs = in_total = out_total = 0;
@@ -1033,7 +1027,6 @@ done:
if (out_bytes) {
*out_bytes = out_total;
}
- rcu_read_unlock();
return;
err:
@@ -1083,7 +1076,7 @@ static void virtqueue_packed_get_avail_bytes(VirtQueue *vq,
VRingPackedDesc desc;
bool wrap_counter;
- rcu_read_lock();
+ RCU_READ_LOCK_GUARD();
idx = vq->last_avail_idx;
wrap_counter = vq->last_avail_wrap_counter;
total_bufs = in_total = out_total = 0;
@@ -1176,7 +1169,6 @@ done:
if (out_bytes) {
*out_bytes = out_total;
}
- rcu_read_unlock();
return;
err:
@@ -1360,7 +1352,7 @@ static void *virtqueue_split_pop(VirtQueue *vq, size_t sz)
VRingDesc desc;
int rc;
- rcu_read_lock();
+ RCU_READ_LOCK_GUARD();
if (virtio_queue_empty_rcu(vq)) {
goto done;
}
@@ -1469,7 +1461,6 @@ static void *virtqueue_split_pop(VirtQueue *vq, size_t sz)
trace_virtqueue_pop(vq, elem, elem->in_num, elem->out_num);
done:
address_space_cache_destroy(&indirect_desc_cache);
- rcu_read_unlock();
return elem;
@@ -1494,7 +1485,7 @@ static void *virtqueue_packed_pop(VirtQueue *vq, size_t sz)
uint16_t id;
int rc;
- rcu_read_lock();
+ RCU_READ_LOCK_GUARD();
if (virtio_queue_packed_empty_rcu(vq)) {
goto done;
}
@@ -1600,7 +1591,6 @@ static void *virtqueue_packed_pop(VirtQueue *vq, size_t sz)
trace_virtqueue_pop(vq, elem, elem->in_num, elem->out_num);
done:
address_space_cache_destroy(&indirect_desc_cache);
- rcu_read_unlock();
return elem;
@@ -2437,13 +2427,10 @@ static bool virtio_should_notify(VirtIODevice *vdev, VirtQueue *vq)
void virtio_notify_irqfd(VirtIODevice *vdev, VirtQueue *vq)
{
- bool should_notify;
- rcu_read_lock();
- should_notify = virtio_should_notify(vdev, vq);
- rcu_read_unlock();
-
- if (!should_notify) {
- return;
+ WITH_RCU_READ_LOCK_GUARD() {
+ if (!virtio_should_notify(vdev, vq)) {
+ return;
+ }
}
trace_virtio_notify_irqfd(vdev, vq);
@@ -2475,13 +2462,10 @@ static void virtio_irq(VirtQueue *vq)
void virtio_notify(VirtIODevice *vdev, VirtQueue *vq)
{
- bool should_notify;
- rcu_read_lock();
- should_notify = virtio_should_notify(vdev, vq);
- rcu_read_unlock();
-
- if (!should_notify) {
- return;
+ WITH_RCU_READ_LOCK_GUARD() {
+ if (!virtio_should_notify(vdev, vq)) {
+ return;
+ }
}
trace_virtio_notify(vdev, vq);
@@ -3032,7 +3016,7 @@ int virtio_load(VirtIODevice *vdev, QEMUFile *f, int version_id)
vdev->start_on_kick = true;
}
- rcu_read_lock();
+ RCU_READ_LOCK_GUARD();
for (i = 0; i < num; i++) {
if (vdev->vq[i].vring.desc) {
uint16_t nheads;
@@ -3087,7 +3071,6 @@ int virtio_load(VirtIODevice *vdev, QEMUFile *f, int version_id)
}
}
}
- rcu_read_unlock();
return 0;
}
@@ -3290,12 +3273,11 @@ static void virtio_queue_packed_restore_last_avail_idx(VirtIODevice *vdev,
static void virtio_queue_split_restore_last_avail_idx(VirtIODevice *vdev,
int n)
{
- rcu_read_lock();
+ RCU_READ_LOCK_GUARD();
if (vdev->vq[n].vring.desc) {
vdev->vq[n].last_avail_idx = vring_used_idx(&vdev->vq[n]);
vdev->vq[n].shadow_avail_idx = vdev->vq[n].last_avail_idx;
}
- rcu_read_unlock();
}
void virtio_queue_restore_last_avail_idx(VirtIODevice *vdev, int n)
@@ -3315,11 +3297,10 @@ static void virtio_queue_packed_update_used_idx(VirtIODevice *vdev, int n)
static void virtio_split_packed_update_used_idx(VirtIODevice *vdev, int n)
{
- rcu_read_lock();
+ RCU_READ_LOCK_GUARD();
if (vdev->vq[n].vring.desc) {
vdev->vq[n].used_idx = vring_used_idx(&vdev->vq[n]);
}
- rcu_read_unlock();
}
void virtio_queue_update_used_idx(VirtIODevice *vdev, int n)
--
2.23.0
reply other threads:[~2019-10-28 17:29 UTC|newest]
Thread overview: [no followups] expand[flat|nested] mbox.gz Atom feed
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20191028161109.60205-1-dgilbert@redhat.com \
--to=dgilbert@redhat.com \
--cc=mst@redhat.com \
--cc=qemu-devel@nongnu.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).