qemu-devel.nongnu.org archive mirror
 help / color / mirror / Atom feed
From: "Alex Bennée" <alex.bennee@linaro.org>
To: qemu-devel@nongnu.org
Cc: slp@redhat.com, mathieu.poirier@linaro.org, mst@redhat.com,
	viresh.kumar@linaro.org, stefanha@redhat.com,
	marcandre.lureau@redhat.com,
	"Alex Bennée" <alex.bennee@linaro.org>
Subject: [PATCH v1 13/13] virtio/vhost-user: dynamically assign VhostUserHostNotifiers
Date: Mon, 21 Mar 2022 15:30:37 +0000	[thread overview]
Message-ID: <20220321153037.3622127-14-alex.bennee@linaro.org> (raw)
In-Reply-To: <20220321153037.3622127-1-alex.bennee@linaro.org>

At a couple of hundred bytes per notifier allocating one for every
potential queue is very wasteful as most devices only have a few
queues. Instead of having this handled statically dynamically assign
them and track in a GPtrArray.

[AJB: it's hard to trigger the vhost notifiers code, I assume as it
requires a KVM guest with appropriate backend]

Signed-off-by: Alex Bennée <alex.bennee@linaro.org>
---
 include/hw/virtio/vhost-user.h | 42 ++++++++++++++++-
 hw/virtio/vhost-user.c         | 83 +++++++++++++++++++++++++++-------
 hw/virtio/trace-events         |  1 +
 3 files changed, 108 insertions(+), 18 deletions(-)

diff --git a/include/hw/virtio/vhost-user.h b/include/hw/virtio/vhost-user.h
index 6e0e8a71a3..c6e693cd3f 100644
--- a/include/hw/virtio/vhost-user.h
+++ b/include/hw/virtio/vhost-user.h
@@ -11,21 +11,61 @@
 #include "chardev/char-fe.h"
 #include "hw/virtio/virtio.h"
 
+/**
+ * VhostUserHostNotifier - notifier information for one queue
+ * @rcu: rcu_head for cleanup
+ * @mr: memory region of notifier
+ * @addr: current mapped address
+ * @unmap_addr: address to be un-mapped
+ * @idx: virtioqueue index
+ *
+ * The VhostUserHostNotifier entries are re-used. When an old mapping
+ * is to be released it is moved to @unmap_addr and @addr is replaced.
+ * Once the RCU process has completed the unmap @unmap_addr is
+ * cleared.
+ */
 typedef struct VhostUserHostNotifier {
     struct rcu_head rcu;
     MemoryRegion mr;
     void *addr;
     void *unmap_addr;
+    int idx;
 } VhostUserHostNotifier;
 
+/**
+ * VhostUserState - shared state for all vhost-user devices
+ * @chr: the character backend for the socket
+ * @notifiers: GPtrArray of @VhostUserHostnotifier
+ * @memory_slots:
+ */
 typedef struct VhostUserState {
     CharBackend *chr;
-    VhostUserHostNotifier notifier[VIRTIO_QUEUE_MAX];
+    GPtrArray *notifiers;
     int memory_slots;
     bool supports_config;
 } VhostUserState;
 
+/**
+ * vhost_user_init() - initialise shared vhost_user state
+ * @user: allocated area for storing shared state
+ * @chr: the chardev for the vhost socket
+ * @errp: error handle
+ *
+ * User can either directly g_new() space for the state or embed
+ * VhostUserState in their larger device structure and just point to
+ * it.
+ *
+ * Return: true on success, false on error while setting errp.
+ */
 bool vhost_user_init(VhostUserState *user, CharBackend *chr, Error **errp);
+
+/**
+ * vhost_user_cleanup() - cleanup state
+ * @user: ptr to use state
+ *
+ * Cleans up shared state and notifiers, callee is responsible for
+ * freeing the @VhostUserState memory itself.
+ */
 void vhost_user_cleanup(VhostUserState *user);
 
 #endif
diff --git a/hw/virtio/vhost-user.c b/hw/virtio/vhost-user.c
index 6ce082861b..4c0423de55 100644
--- a/hw/virtio/vhost-user.c
+++ b/hw/virtio/vhost-user.c
@@ -1174,14 +1174,16 @@ static void vhost_user_host_notifier_free(VhostUserHostNotifier *n)
     n->unmap_addr = NULL;
 }
 
-static void vhost_user_host_notifier_remove(VhostUserState *user,
-                                            VirtIODevice *vdev, int queue_idx)
+/*
+ * clean-up function for notifier, will finally free the structure
+ * under rcu.
+ */
+static void vhost_user_host_notifier_remove(VhostUserHostNotifier *n,
+                                            VirtIODevice *vdev)
 {
-    VhostUserHostNotifier *n = &user->notifier[queue_idx];
-
     if (n->addr) {
         if (vdev) {
-            virtio_queue_set_host_notifier_mr(vdev, queue_idx, &n->mr, false);
+            virtio_queue_set_host_notifier_mr(vdev, n->idx, &n->mr, false);
         }
         assert(!n->unmap_addr);
         n->unmap_addr = n->addr;
@@ -1225,6 +1227,15 @@ static int vhost_user_set_vring_enable(struct vhost_dev *dev, int enable)
     return 0;
 }
 
+static VhostUserHostNotifier *fetch_notifier(VhostUserState *u,
+                                             int idx)
+{
+    if (idx >= u->notifiers->len) {
+        return NULL;
+    }
+    return g_ptr_array_index(u->notifiers, idx);
+}
+
 static int vhost_user_get_vring_base(struct vhost_dev *dev,
                                      struct vhost_vring_state *ring)
 {
@@ -1237,7 +1248,10 @@ static int vhost_user_get_vring_base(struct vhost_dev *dev,
     };
     struct vhost_user *u = dev->opaque;
 
-    vhost_user_host_notifier_remove(u->user, dev->vdev, ring->index);
+    VhostUserHostNotifier *n = fetch_notifier(u->user, ring->index);
+    if (n) {
+        vhost_user_host_notifier_remove(n, dev->vdev);
+    }
 
     ret = vhost_user_write(dev, &msg, NULL, 0);
     if (ret < 0) {
@@ -1502,6 +1516,29 @@ static int vhost_user_slave_handle_config_change(struct vhost_dev *dev)
     return dev->config_ops->vhost_dev_config_notifier(dev);
 }
 
+/*
+ * Fetch or create the notifier for a given idx. Newly created
+ * notifiers are added to the pointer array that tracks them.
+ */
+static VhostUserHostNotifier *fetch_or_create_notifier(VhostUserState *u,
+                                                       int idx)
+{
+    VhostUserHostNotifier *n = NULL;
+    if (idx >= u->notifiers->len) {
+        g_ptr_array_set_size(u->notifiers, idx);
+    }
+
+    n = g_ptr_array_index(u->notifiers, idx);
+    if (!n) {
+        n = g_new0(VhostUserHostNotifier, 1);
+        n->idx = idx;
+        g_ptr_array_insert(u->notifiers, idx, n);
+        trace_vhost_user_create_notifier(idx, n);
+    }
+
+    return n;
+}
+
 static int vhost_user_slave_handle_vring_host_notifier(struct vhost_dev *dev,
                                                        VhostUserVringArea *area,
                                                        int fd)
@@ -1521,9 +1558,12 @@ static int vhost_user_slave_handle_vring_host_notifier(struct vhost_dev *dev,
         return -EINVAL;
     }
 
-    n = &user->notifier[queue_idx];
-
-    vhost_user_host_notifier_remove(user, vdev, queue_idx);
+    /*
+     * Fetch notifier and invalidate any old data before setting up
+     * new mapped address.
+     */
+    n = fetch_or_create_notifier(user, queue_idx);
+    vhost_user_host_notifier_remove(n, vdev);
 
     if (area->u64 & VHOST_USER_VRING_NOFD_MASK) {
         return 0;
@@ -2526,6 +2566,20 @@ static int vhost_user_set_inflight_fd(struct vhost_dev *dev,
     return vhost_user_write(dev, &msg, &inflight->fd, 1);
 }
 
+static void vhost_user_state_destroy(gpointer data)
+{
+    VhostUserHostNotifier *n = (VhostUserHostNotifier *) data;
+    if (n) {
+        vhost_user_host_notifier_remove(n, NULL);
+        object_unparent(OBJECT(&n->mr));
+        /*
+         * We can't free until vhost_user_host_notifier_remove has
+         * done it's thing so schedule the free with RCU.
+         */
+        g_free_rcu(n, rcu);
+    }
+}
+
 bool vhost_user_init(VhostUserState *user, CharBackend *chr, Error **errp)
 {
     if (user->chr) {
@@ -2534,23 +2588,18 @@ bool vhost_user_init(VhostUserState *user, CharBackend *chr, Error **errp)
     }
     user->chr = chr;
     user->memory_slots = 0;
+    user->notifiers = g_ptr_array_new_full(VIRTIO_QUEUE_MAX / 4,
+                                           &vhost_user_state_destroy);
     return true;
 }
 
 void vhost_user_cleanup(VhostUserState *user)
 {
-    int i;
-    VhostUserHostNotifier *n;
-
     if (!user->chr) {
         return;
     }
     memory_region_transaction_begin();
-    for (i = 0; i < VIRTIO_QUEUE_MAX; i++) {
-        n = &user->notifier[i];
-        vhost_user_host_notifier_remove(user, NULL, i);
-        object_unparent(OBJECT(&n->mr));
-    }
+    user->notifiers = (GPtrArray *) g_ptr_array_free(user->notifiers, true);
     memory_region_transaction_commit();
     user->chr = NULL;
 }
diff --git a/hw/virtio/trace-events b/hw/virtio/trace-events
index fd213e2a27..b40392a593 100644
--- a/hw/virtio/trace-events
+++ b/hw/virtio/trace-events
@@ -23,6 +23,7 @@ vhost_user_postcopy_waker_found(uint64_t client_addr) "0x%"PRIx64
 vhost_user_postcopy_waker_nomatch(const char *rb, uint64_t rb_offset) "%s + 0x%"PRIx64
 vhost_user_read(uint32_t req, uint32_t flags) "req:%d flags:0x%"PRIx32""
 vhost_user_write(uint32_t req, uint32_t flags) "req:%d flags:0x%"PRIx32""
+vhost_user_create_notifier(int idx, void *n) "idx:%d n:%p"
 
 # vhost-vdpa.c
 vhost_vdpa_dma_map(void *vdpa, int fd, uint32_t msg_type, uint64_t iova, uint64_t size, uint64_t uaddr, uint8_t perm, uint8_t type) "vdpa:%p fd: %d msg_type: %"PRIu32" iova: 0x%"PRIx64" size: 0x%"PRIx64" uaddr: 0x%"PRIx64" perm: 0x%"PRIx8" type: %"PRIu8
-- 
2.30.2



  parent reply	other threads:[~2022-03-21 16:03 UTC|newest]

Thread overview: 29+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2022-03-21 15:30 [PATCH v1 00/13] various virtio docs, fixes and tweaks Alex Bennée
2022-03-21 15:30 ` [PATCH v1 01/13] hw/virtio: move virtio-pci.h into shared include space Alex Bennée
2022-03-21 22:27   ` Philippe Mathieu-Daudé
2022-03-21 15:30 ` [PATCH v1 02/13] virtio-pci: add notification trace points Alex Bennée
2022-03-21 15:30 ` [PATCH v1 03/13] hw/virtio: add vhost_user_[read|write] " Alex Bennée
2022-03-21 22:29   ` Philippe Mathieu-Daudé
2022-03-21 15:30 ` [PATCH v1 04/13] docs: vhost-user: clean up request/reply description Alex Bennée
2022-03-21 22:30   ` Philippe Mathieu-Daudé
2022-03-21 15:30 ` [PATCH v1 05/13] docs: vhost-user: rewrite section on ring state machine Alex Bennée
2022-03-21 15:30 ` [PATCH v1 06/13] docs: vhost-user: replace master/slave with front-end/back-end Alex Bennée
2022-03-21 15:30 ` [PATCH v1 07/13] vhost-user.rst: add clarifying language about protocol negotiation Alex Bennée
2022-03-21 15:30 ` [PATCH v1 08/13] libvhost-user: expose vu_request_to_string Alex Bennée
2022-03-21 22:31   ` Philippe Mathieu-Daudé
2022-03-21 15:30 ` [PATCH v1 09/13] docs/devel: start documenting writing VirtIO devices Alex Bennée
2022-03-21 15:30 ` [PATCH v1 10/13] include/hw: start documenting the vhost API Alex Bennée
2022-03-21 15:30 ` [PATCH v1 11/13] contrib/vhost-user-blk: fix 32 bit build and enable Alex Bennée
2022-03-21 22:32   ` Philippe Mathieu-Daudé
2022-05-16 10:46     ` Alex Bennée
2022-03-21 15:30 ` [PATCH v1 12/13] hw/virtio/vhost-user: don't suppress F_CONFIG when supported Alex Bennée
2022-03-22 14:02   ` Michael S. Tsirkin
2022-03-22 15:54     ` Alex Bennée
2022-03-22 16:22       ` Michael S. Tsirkin
2022-03-21 15:30 ` Alex Bennée [this message]
2022-12-06 10:54   ` [PATCH v1 13/13] virtio/vhost-user: dynamically assign VhostUserHostNotifiers Philippe Mathieu-Daudé
2022-12-06 15:45   ` Stefan Hajnoczi
2022-03-22 13:56 ` [PATCH v1 00/13] various virtio docs, fixes and tweaks Michael S. Tsirkin
2022-03-22 15:50   ` Alex Bennée
2022-03-22 16:13     ` Michael S. Tsirkin
2022-05-13 10:15 ` Michael S. Tsirkin

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20220321153037.3622127-14-alex.bennee@linaro.org \
    --to=alex.bennee@linaro.org \
    --cc=marcandre.lureau@redhat.com \
    --cc=mathieu.poirier@linaro.org \
    --cc=mst@redhat.com \
    --cc=qemu-devel@nongnu.org \
    --cc=slp@redhat.com \
    --cc=stefanha@redhat.com \
    --cc=viresh.kumar@linaro.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).