qemu-devel.nongnu.org archive mirror
 help / color / mirror / Atom feed
From: "Michael S. Tsirkin" <mst@redhat.com>
To: qemu-devel@nongnu.org
Cc: Peter Maydell <peter.maydell@linaro.org>,
	"Dr. David Alan Gilbert" <dgilbert@redhat.com>
Subject: [PULL 14/14] virtio: Use auto rcu_read macros
Date: Tue, 29 Oct 2019 19:38:51 -0400	[thread overview]
Message-ID: <20191029225932.14585-15-mst@redhat.com> (raw)
In-Reply-To: <20191029225932.14585-1-mst@redhat.com>

From: "Dr. David Alan Gilbert" <dgilbert@redhat.com>

Use RCU_READ_LOCK_GUARD and WITH_RCU_READ_LOCK_GUARD
to replace the manual rcu_read_(un)lock calls.

I think the only change is virtio_load which was missing unlocks
in error paths; those end up being fatal errors so it's not
that important anyway.

Signed-off-by: Dr. David Alan Gilbert <dgilbert@redhat.com>
Message-Id: <20191028161109.60205-1-dgilbert@redhat.com>
Reviewed-by: Michael S. Tsirkin <mst@redhat.com>
Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
---
 hw/virtio/virtio.c | 65 ++++++++++++++++------------------------------
 1 file changed, 23 insertions(+), 42 deletions(-)

diff --git a/hw/virtio/virtio.c b/hw/virtio/virtio.c
index 6348cd30e9..762df12f4c 100644
--- a/hw/virtio/virtio.c
+++ b/hw/virtio/virtio.c
@@ -387,7 +387,8 @@ static inline void vring_set_avail_event(VirtQueue *vq, uint16_t val)
 
 static void virtio_queue_split_set_notification(VirtQueue *vq, int enable)
 {
-    rcu_read_lock();
+    RCU_READ_LOCK_GUARD();
+
     if (virtio_vdev_has_feature(vq->vdev, VIRTIO_RING_F_EVENT_IDX)) {
         vring_set_avail_event(vq, vring_avail_idx(vq));
     } else if (enable) {
@@ -399,7 +400,6 @@ static void virtio_queue_split_set_notification(VirtQueue *vq, int enable)
         /* Expose avail event/used flags before caller checks the avail idx. */
         smp_mb();
     }
-    rcu_read_unlock();
 }
 
 static void virtio_queue_packed_set_notification(VirtQueue *vq, int enable)
@@ -408,7 +408,7 @@ static void virtio_queue_packed_set_notification(VirtQueue *vq, int enable)
     VRingPackedDescEvent e;
     VRingMemoryRegionCaches *caches;
 
-    rcu_read_lock();
+    RCU_READ_LOCK_GUARD();
     caches  = vring_get_region_caches(vq);
     vring_packed_event_read(vq->vdev, &caches->used, &e);
 
@@ -429,7 +429,6 @@ static void virtio_queue_packed_set_notification(VirtQueue *vq, int enable)
         /* Expose avail event/used flags before caller checks the avail idx. */
         smp_mb();
     }
-    rcu_read_unlock();
 }
 
 void virtio_queue_set_notification(VirtQueue *vq, int enable)
@@ -577,9 +576,8 @@ static int virtio_queue_split_empty(VirtQueue *vq)
         return 0;
     }
 
-    rcu_read_lock();
+    RCU_READ_LOCK_GUARD();
     empty = vring_avail_idx(vq) == vq->last_avail_idx;
-    rcu_read_unlock();
     return empty;
 }
 
@@ -601,12 +599,8 @@ static int virtio_queue_packed_empty_rcu(VirtQueue *vq)
 
 static int virtio_queue_packed_empty(VirtQueue *vq)
 {
-    bool empty;
-
-    rcu_read_lock();
-    empty = virtio_queue_packed_empty_rcu(vq);
-    rcu_read_unlock();
-    return empty;
+    RCU_READ_LOCK_GUARD();
+    return virtio_queue_packed_empty_rcu(vq);
 }
 
 int virtio_queue_empty(VirtQueue *vq)
@@ -859,10 +853,9 @@ void virtqueue_flush(VirtQueue *vq, unsigned int count)
 void virtqueue_push(VirtQueue *vq, const VirtQueueElement *elem,
                     unsigned int len)
 {
-    rcu_read_lock();
+    RCU_READ_LOCK_GUARD();
     virtqueue_fill(vq, elem, len, 0);
     virtqueue_flush(vq, 1);
-    rcu_read_unlock();
 }
 
 /* Called within rcu_read_lock().  */
@@ -943,7 +936,8 @@ static void virtqueue_split_get_avail_bytes(VirtQueue *vq,
     int64_t len = 0;
     int rc;
 
-    rcu_read_lock();
+    RCU_READ_LOCK_GUARD();
+
     idx = vq->last_avail_idx;
     total_bufs = in_total = out_total = 0;
 
@@ -1033,7 +1027,6 @@ done:
     if (out_bytes) {
         *out_bytes = out_total;
     }
-    rcu_read_unlock();
     return;
 
 err:
@@ -1083,7 +1076,7 @@ static void virtqueue_packed_get_avail_bytes(VirtQueue *vq,
     VRingPackedDesc desc;
     bool wrap_counter;
 
-    rcu_read_lock();
+    RCU_READ_LOCK_GUARD();
     idx = vq->last_avail_idx;
     wrap_counter = vq->last_avail_wrap_counter;
     total_bufs = in_total = out_total = 0;
@@ -1176,7 +1169,6 @@ done:
     if (out_bytes) {
         *out_bytes = out_total;
     }
-    rcu_read_unlock();
     return;
 
 err:
@@ -1360,7 +1352,7 @@ static void *virtqueue_split_pop(VirtQueue *vq, size_t sz)
     VRingDesc desc;
     int rc;
 
-    rcu_read_lock();
+    RCU_READ_LOCK_GUARD();
     if (virtio_queue_empty_rcu(vq)) {
         goto done;
     }
@@ -1469,7 +1461,6 @@ static void *virtqueue_split_pop(VirtQueue *vq, size_t sz)
     trace_virtqueue_pop(vq, elem, elem->in_num, elem->out_num);
 done:
     address_space_cache_destroy(&indirect_desc_cache);
-    rcu_read_unlock();
 
     return elem;
 
@@ -1494,7 +1485,7 @@ static void *virtqueue_packed_pop(VirtQueue *vq, size_t sz)
     uint16_t id;
     int rc;
 
-    rcu_read_lock();
+    RCU_READ_LOCK_GUARD();
     if (virtio_queue_packed_empty_rcu(vq)) {
         goto done;
     }
@@ -1600,7 +1591,6 @@ static void *virtqueue_packed_pop(VirtQueue *vq, size_t sz)
     trace_virtqueue_pop(vq, elem, elem->in_num, elem->out_num);
 done:
     address_space_cache_destroy(&indirect_desc_cache);
-    rcu_read_unlock();
 
     return elem;
 
@@ -2437,13 +2427,10 @@ static bool virtio_should_notify(VirtIODevice *vdev, VirtQueue *vq)
 
 void virtio_notify_irqfd(VirtIODevice *vdev, VirtQueue *vq)
 {
-    bool should_notify;
-    rcu_read_lock();
-    should_notify = virtio_should_notify(vdev, vq);
-    rcu_read_unlock();
-
-    if (!should_notify) {
-        return;
+    WITH_RCU_READ_LOCK_GUARD() {
+        if (!virtio_should_notify(vdev, vq)) {
+            return;
+        }
     }
 
     trace_virtio_notify_irqfd(vdev, vq);
@@ -2475,13 +2462,10 @@ static void virtio_irq(VirtQueue *vq)
 
 void virtio_notify(VirtIODevice *vdev, VirtQueue *vq)
 {
-    bool should_notify;
-    rcu_read_lock();
-    should_notify = virtio_should_notify(vdev, vq);
-    rcu_read_unlock();
-
-    if (!should_notify) {
-        return;
+    WITH_RCU_READ_LOCK_GUARD() {
+        if (!virtio_should_notify(vdev, vq)) {
+            return;
+        }
     }
 
     trace_virtio_notify(vdev, vq);
@@ -3032,7 +3016,7 @@ int virtio_load(VirtIODevice *vdev, QEMUFile *f, int version_id)
         vdev->start_on_kick = true;
     }
 
-    rcu_read_lock();
+    RCU_READ_LOCK_GUARD();
     for (i = 0; i < num; i++) {
         if (vdev->vq[i].vring.desc) {
             uint16_t nheads;
@@ -3087,7 +3071,6 @@ int virtio_load(VirtIODevice *vdev, QEMUFile *f, int version_id)
             }
         }
     }
-    rcu_read_unlock();
 
     if (vdc->post_load) {
         ret = vdc->post_load(vdev);
@@ -3297,12 +3280,11 @@ static void virtio_queue_packed_restore_last_avail_idx(VirtIODevice *vdev,
 static void virtio_queue_split_restore_last_avail_idx(VirtIODevice *vdev,
                                                       int n)
 {
-    rcu_read_lock();
+    RCU_READ_LOCK_GUARD();
     if (vdev->vq[n].vring.desc) {
         vdev->vq[n].last_avail_idx = vring_used_idx(&vdev->vq[n]);
         vdev->vq[n].shadow_avail_idx = vdev->vq[n].last_avail_idx;
     }
-    rcu_read_unlock();
 }
 
 void virtio_queue_restore_last_avail_idx(VirtIODevice *vdev, int n)
@@ -3322,11 +3304,10 @@ static void virtio_queue_packed_update_used_idx(VirtIODevice *vdev, int n)
 
 static void virtio_split_packed_update_used_idx(VirtIODevice *vdev, int n)
 {
-    rcu_read_lock();
+    RCU_READ_LOCK_GUARD();
     if (vdev->vq[n].vring.desc) {
         vdev->vq[n].used_idx = vring_used_idx(&vdev->vq[n]);
     }
-    rcu_read_unlock();
 }
 
 void virtio_queue_update_used_idx(VirtIODevice *vdev, int n)
-- 
MST


  parent reply	other threads:[~2019-10-29 23:40 UTC|newest]

Thread overview: 23+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2019-10-29 22:59 [PULL 00/14] virtio: features, cleanups Michael S. Tsirkin
2019-10-29 23:00 ` [PULL 01/14] qdev/qbus: add hidden device support Michael S. Tsirkin
2019-10-29 23:00 ` [PULL 02/14] pci: add option for net failover Michael S. Tsirkin
2019-10-29 23:00 ` [PULL 03/14] pci: mark devices partially unplugged Michael S. Tsirkin
2019-10-29 23:00 ` [PULL 04/14] pci: mark device having guest unplug request pending Michael S. Tsirkin
2019-10-29 23:00 ` [PULL 05/14] qapi: add unplug primary event Michael S. Tsirkin
2020-06-29 16:05   ` Eric Blake
2020-06-29 16:07     ` Eric Blake
2019-10-29 23:00 ` [PULL 06/14] qapi: add failover negotiated event Michael S. Tsirkin
2019-10-29 23:00 ` [PULL 07/14] migration: allow unplug during migration for failover devices Michael S. Tsirkin
2019-10-29 23:00 ` [PULL 08/14] migration: add new migration state wait-unplug Michael S. Tsirkin
2020-06-27 21:49   ` Peter Maydell
2020-06-29 12:09     ` Dr. David Alan Gilbert
2020-06-29 14:00       ` Peter Maydell
2019-10-29 23:00 ` [PULL 09/14] libqos: tolerate wait-unplug migration state Michael S. Tsirkin
2019-10-29 23:01 ` [PULL 10/14] net/virtio: add failover support Michael S. Tsirkin
2019-11-12 10:08   ` Peter Maydell
2019-10-29 23:01 ` [PULL 11/14] vfio: unplug failover primary device before migration Michael S. Tsirkin
2019-11-12 10:13   ` Peter Maydell
2019-10-29 23:01 ` [PULL 12/14] virtio/vhost: Use auto_rcu_read macros Michael S. Tsirkin
2019-10-29 23:01 ` [PULL 13/14] virtio_net: use RCU_READ_LOCK_GUARD Michael S. Tsirkin
2019-10-29 23:38 ` Michael S. Tsirkin [this message]
2019-10-30 11:10 ` [PULL 00/14] virtio: features, cleanups Peter Maydell

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20191029225932.14585-15-mst@redhat.com \
    --to=mst@redhat.com \
    --cc=dgilbert@redhat.com \
    --cc=peter.maydell@linaro.org \
    --cc=qemu-devel@nongnu.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).