From: "Eugenio Pérez" <eperezma@redhat.com>
To: qemu-devel@nongnu.org
Cc: "Michael S. Tsirkin" <mst@redhat.com>,
Jason Wang <jasowang@redhat.com>, Peter Xu <peterx@redhat.com>,
virtualization@lists.linux-foundation.org,
Eli Cohen <eli@mellanox.com>, Eric Blake <eblake@redhat.com>,
Parav Pandit <parav@mellanox.com>, Cindy Lu <lulu@redhat.com>,
"Fangyi \(Eric\)" <eric.fangyi@huawei.com>,
Markus Armbruster <armbru@redhat.com>,
yebiaoxiang@huawei.com, Liuxiangdong <liuxiangdong5@huawei.com>,
Stefano Garzarella <sgarzare@redhat.com>,
Laurent Vivier <lvivier@redhat.com>,
Eduardo Habkost <ehabkost@redhat.com>,
Richard Henderson <richard.henderson@linaro.org>,
Gautam Dawar <gdawar@xilinx.com>,
Xiao W Wang <xiao.w.wang@intel.com>,
Stefan Hajnoczi <stefanha@redhat.com>,
Juan Quintela <quintela@redhat.com>,
Harpreet Singh Anand <hanand@xilinx.com>,
Paolo Bonzini <pbonzini@redhat.com>,
Lingshan <lingshan.zhu@intel.com>
Subject: [PATCH v2 03/14] vhost: Add Shadow VirtQueue call forwarding capabilities
Date: Sun, 27 Feb 2022 14:41:00 +0100 [thread overview]
Message-ID: <20220227134111.3254066-4-eperezma@redhat.com> (raw)
In-Reply-To: <20220227134111.3254066-1-eperezma@redhat.com>
This will make qemu aware of the device used buffers, allowing it to
write the guest memory with its contents if needed.
Signed-off-by: Eugenio Pérez <eperezma@redhat.com>
---
hw/virtio/vhost-shadow-virtqueue.h | 4 ++++
hw/virtio/vhost-shadow-virtqueue.c | 34 ++++++++++++++++++++++++++++++
hw/virtio/vhost-vdpa.c | 31 +++++++++++++++++++++++++--
3 files changed, 67 insertions(+), 2 deletions(-)
diff --git a/hw/virtio/vhost-shadow-virtqueue.h b/hw/virtio/vhost-shadow-virtqueue.h
index 1cbc87d5d8..1d4c160d0a 100644
--- a/hw/virtio/vhost-shadow-virtqueue.h
+++ b/hw/virtio/vhost-shadow-virtqueue.h
@@ -28,9 +28,13 @@ typedef struct VhostShadowVirtqueue {
* So shadow virtqueue must not clean it, or we would lose VirtQueue one.
*/
EventNotifier svq_kick;
+
+ /* Guest's call notifier, where the SVQ calls guest. */
+ EventNotifier svq_call;
} VhostShadowVirtqueue;
void vhost_svq_set_svq_kick_fd(VhostShadowVirtqueue *svq, int svq_kick_fd);
+void vhost_svq_set_guest_call_notifier(VhostShadowVirtqueue *svq, int call_fd);
void vhost_svq_stop(VhostShadowVirtqueue *svq);
diff --git a/hw/virtio/vhost-shadow-virtqueue.c b/hw/virtio/vhost-shadow-virtqueue.c
index a5d0659f86..54c701a196 100644
--- a/hw/virtio/vhost-shadow-virtqueue.c
+++ b/hw/virtio/vhost-shadow-virtqueue.c
@@ -23,6 +23,38 @@ static void vhost_handle_guest_kick(EventNotifier *n)
event_notifier_set(&svq->hdev_kick);
}
+/* Forward vhost notifications */
+static void vhost_svq_handle_call(EventNotifier *n)
+{
+ VhostShadowVirtqueue *svq = container_of(n, VhostShadowVirtqueue,
+ hdev_call);
+ event_notifier_test_and_clear(n);
+ event_notifier_set(&svq->svq_call);
+}
+
+/**
+ * Set the call notifier for the SVQ to call the guest
+ *
+ * @svq Shadow virtqueue
+ * @call_fd call notifier
+ *
+ * Called on BQL context.
+ */
+void vhost_svq_set_guest_call_notifier(VhostShadowVirtqueue *svq, int call_fd)
+{
+ if (call_fd == VHOST_FILE_UNBIND) {
+ /*
+ * Fail event_notifier_set if called handling device call.
+ *
+ * SVQ still needs device notifications, since it needs to keep
+ * forwarding used buffers even with the unbind.
+ */
+ memset(&svq->svq_call, 0, sizeof(svq->svq_call));
+ } else {
+ event_notifier_init_fd(&svq->svq_call, call_fd);
+ }
+}
+
/**
* Set a new file descriptor for the guest to kick the SVQ and notify for avail
*
@@ -90,6 +122,7 @@ VhostShadowVirtqueue *vhost_svq_new(void)
}
event_notifier_init_fd(&svq->svq_kick, VHOST_FILE_UNBIND);
+ event_notifier_set_handler(&svq->hdev_call, vhost_svq_handle_call);
return g_steal_pointer(&svq);
err_init_hdev_call:
@@ -109,6 +142,7 @@ void vhost_svq_free(gpointer pvq)
VhostShadowVirtqueue *vq = pvq;
vhost_svq_stop(vq);
event_notifier_cleanup(&vq->hdev_kick);
+ event_notifier_set_handler(&vq->hdev_call, NULL);
event_notifier_cleanup(&vq->hdev_call);
g_free(vq);
}
diff --git a/hw/virtio/vhost-vdpa.c b/hw/virtio/vhost-vdpa.c
index 454bf50735..c73215751d 100644
--- a/hw/virtio/vhost-vdpa.c
+++ b/hw/virtio/vhost-vdpa.c
@@ -724,6 +724,13 @@ static int vhost_vdpa_set_vring_dev_kick(struct vhost_dev *dev,
return vhost_vdpa_call(dev, VHOST_SET_VRING_KICK, file);
}
+static int vhost_vdpa_set_vring_dev_call(struct vhost_dev *dev,
+ struct vhost_vring_file *file)
+{
+ trace_vhost_vdpa_set_vring_call(dev, file->index, file->fd);
+ return vhost_vdpa_call(dev, VHOST_SET_VRING_CALL, file);
+}
+
/**
* Set the shadow virtqueue descriptors to the device
*
@@ -731,6 +738,9 @@ static int vhost_vdpa_set_vring_dev_kick(struct vhost_dev *dev,
* @svq The shadow virtqueue
* @idx The index of the virtqueue in the vhost device
* @errp Error
+ *
+ * Note that this function does not rewind kick file descriptor if cannot set
+ * call one.
*/
static bool vhost_vdpa_svq_setup(struct vhost_dev *dev,
VhostShadowVirtqueue *svq,
@@ -747,6 +757,14 @@ static bool vhost_vdpa_svq_setup(struct vhost_dev *dev,
r = vhost_vdpa_set_vring_dev_kick(dev, &file);
if (unlikely(r != 0)) {
error_setg_errno(errp, -r, "Can't set device kick fd");
+ return false;
+ }
+
+ event_notifier = &svq->hdev_call;
+ file.fd = event_notifier_get_fd(event_notifier);
+ r = vhost_vdpa_set_vring_dev_call(dev, &file);
+ if (unlikely(r != 0)) {
+ error_setg_errno(errp, -r, "Can't set device call fd");
}
return r == 0;
@@ -872,8 +890,17 @@ static int vhost_vdpa_set_vring_kick(struct vhost_dev *dev,
static int vhost_vdpa_set_vring_call(struct vhost_dev *dev,
struct vhost_vring_file *file)
{
- trace_vhost_vdpa_set_vring_call(dev, file->index, file->fd);
- return vhost_vdpa_call(dev, VHOST_SET_VRING_CALL, file);
+ struct vhost_vdpa *v = dev->opaque;
+
+ if (v->shadow_vqs_enabled) {
+ int vdpa_idx = file->index - dev->vq_index;
+ VhostShadowVirtqueue *svq = g_ptr_array_index(v->shadow_vqs, vdpa_idx);
+
+ vhost_svq_set_guest_call_notifier(svq, file->fd);
+ return 0;
+ } else {
+ return vhost_vdpa_set_vring_dev_call(dev, file);
+ }
}
static int vhost_vdpa_get_features(struct vhost_dev *dev,
--
2.27.0
next prev parent reply other threads:[~2022-02-27 13:49 UTC|newest]
Thread overview: 50+ messages / expand[flat|nested] mbox.gz Atom feed top
2022-02-27 13:40 [PATCH v2 00/14] vDPA shadow virtqueue Eugenio Pérez
2022-02-27 13:40 ` [PATCH v2 01/14] vhost: Add VhostShadowVirtqueue Eugenio Pérez
2022-02-27 13:40 ` [PATCH v2 02/14] vhost: Add Shadow VirtQueue kick forwarding capabilities Eugenio Pérez
2022-02-28 2:57 ` Jason Wang
2022-03-01 18:49 ` Eugenio Perez Martin
2022-03-03 7:12 ` Jason Wang
2022-03-03 9:24 ` Eugenio Perez Martin
2022-03-04 1:39 ` Jason Wang
2022-02-27 13:41 ` Eugenio Pérez [this message]
2022-02-28 3:18 ` [PATCH v2 03/14] vhost: Add Shadow VirtQueue call " Jason Wang
2022-03-01 11:18 ` Eugenio Perez Martin
2022-02-27 13:41 ` [PATCH v2 04/14] vhost: Add vhost_svq_valid_features to shadow vq Eugenio Pérez
2022-02-28 3:25 ` Jason Wang
2022-03-01 19:18 ` Eugenio Perez Martin
2022-02-27 13:41 ` [PATCH v2 05/14] virtio: Add vhost_shadow_vq_get_vring_addr Eugenio Pérez
2022-02-27 13:41 ` [PATCH v2 06/14] vdpa: adapt vhost_ops callbacks to svq Eugenio Pérez
2022-02-28 3:59 ` Jason Wang
2022-03-01 19:31 ` Eugenio Perez Martin
2022-02-27 13:41 ` [PATCH v2 07/14] vhost: Shadow virtqueue buffers forwarding Eugenio Pérez
2022-02-28 5:39 ` Jason Wang
2022-03-02 18:23 ` Eugenio Perez Martin
2022-03-03 7:35 ` Jason Wang
2022-02-27 13:41 ` [PATCH v2 08/14] util: Add iova_tree_alloc Eugenio Pérez
2022-02-28 6:39 ` Jason Wang
2022-03-01 10:06 ` Eugenio Perez Martin
2022-03-03 7:16 ` Jason Wang
2022-02-27 13:41 ` [PATCH v2 09/14] vhost: Add VhostIOVATree Eugenio Pérez
2022-02-28 7:06 ` Jason Wang
2022-03-03 16:32 ` Eugenio Perez Martin
2022-03-04 2:04 ` Jason Wang
2022-03-04 8:01 ` Eugenio Perez Martin
2022-03-07 3:41 ` Jason Wang
2022-03-07 8:56 ` Eugenio Perez Martin
2022-02-27 13:41 ` [PATCH v2 10/14] vdpa: Add custom IOTLB translations to SVQ Eugenio Pérez
2022-02-28 7:36 ` Jason Wang
2022-03-01 8:50 ` Eugenio Perez Martin
2022-03-03 7:33 ` Jason Wang
2022-03-03 11:35 ` Eugenio Perez Martin
2022-03-07 4:24 ` Jason Wang
2022-03-07 7:44 ` Eugenio Perez Martin
2022-02-27 13:41 ` [PATCH v2 11/14] vdpa: Adapt vhost_vdpa_get_vring_base " Eugenio Pérez
2022-02-28 7:38 ` Jason Wang
2022-03-01 7:51 ` Eugenio Perez Martin
2022-02-27 13:41 ` [PATCH v2 12/14] vdpa: Never set log_base addr if SVQ is enabled Eugenio Pérez
2022-02-27 13:41 ` [PATCH v2 13/14] vdpa: Expose VHOST_F_LOG_ALL on SVQ Eugenio Pérez
2022-02-27 13:41 ` [PATCH v2 14/14] vdpa: Add x-svq to NetdevVhostVDPAOptions Eugenio Pérez
2022-02-28 2:32 ` [PATCH v2 00/14] vDPA shadow virtqueue Jason Wang
2022-03-01 11:36 ` Eugenio Perez Martin
2022-02-28 7:41 ` Jason Wang
2022-03-02 20:30 ` Eugenio Perez Martin
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20220227134111.3254066-4-eperezma@redhat.com \
--to=eperezma@redhat.com \
--cc=armbru@redhat.com \
--cc=eblake@redhat.com \
--cc=ehabkost@redhat.com \
--cc=eli@mellanox.com \
--cc=eric.fangyi@huawei.com \
--cc=gdawar@xilinx.com \
--cc=hanand@xilinx.com \
--cc=jasowang@redhat.com \
--cc=lingshan.zhu@intel.com \
--cc=liuxiangdong5@huawei.com \
--cc=lulu@redhat.com \
--cc=lvivier@redhat.com \
--cc=mst@redhat.com \
--cc=parav@mellanox.com \
--cc=pbonzini@redhat.com \
--cc=peterx@redhat.com \
--cc=qemu-devel@nongnu.org \
--cc=quintela@redhat.com \
--cc=richard.henderson@linaro.org \
--cc=sgarzare@redhat.com \
--cc=stefanha@redhat.com \
--cc=virtualization@lists.linux-foundation.org \
--cc=xiao.w.wang@intel.com \
--cc=yebiaoxiang@huawei.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).