From: "Eugenio Pérez" <eperezma@redhat.com>
To: qemu-devel@nongnu.org
Cc: "Michael S. Tsirkin" <mst@redhat.com>,
Jason Wang <jasowang@redhat.com>, Peter Xu <peterx@redhat.com>,
virtualization@lists.linux-foundation.org,
Eli Cohen <eli@mellanox.com>, Eric Blake <eblake@redhat.com>,
Parav Pandit <parav@mellanox.com>, Cindy Lu <lulu@redhat.com>,
"Fangyi \(Eric\)" <eric.fangyi@huawei.com>,
Markus Armbruster <armbru@redhat.com>,
yebiaoxiang@huawei.com, Liuxiangdong <liuxiangdong5@huawei.com>,
Stefano Garzarella <sgarzare@redhat.com>,
Laurent Vivier <lvivier@redhat.com>,
Eduardo Habkost <ehabkost@redhat.com>,
Richard Henderson <richard.henderson@linaro.org>,
Gautam Dawar <gdawar@xilinx.com>,
Xiao W Wang <xiao.w.wang@intel.com>,
Stefan Hajnoczi <stefanha@redhat.com>,
Juan Quintela <quintela@redhat.com>,
Harpreet Singh Anand <hanand@xilinx.com>,
Zhu Lingshan <lingshan.zhu@intel.com>
Subject: [PATCH v3 03/14] vhost: Add Shadow VirtQueue call forwarding capabilities
Date: Wed, 2 Mar 2022 21:30:01 +0100 [thread overview]
Message-ID: <20220302203012.3476835-4-eperezma@redhat.com> (raw)
In-Reply-To: <20220302203012.3476835-1-eperezma@redhat.com>
This will make qemu aware of the device used buffers, allowing it to
write the guest memory with its contents if needed.
Signed-off-by: Eugenio Pérez <eperezma@redhat.com>
---
hw/virtio/vhost-shadow-virtqueue.h | 4 ++++
hw/virtio/vhost-shadow-virtqueue.c | 38 ++++++++++++++++++++++++++++++
hw/virtio/vhost-vdpa.c | 31 ++++++++++++++++++++++--
3 files changed, 71 insertions(+), 2 deletions(-)
diff --git a/hw/virtio/vhost-shadow-virtqueue.h b/hw/virtio/vhost-shadow-virtqueue.h
index 1cbc87d5d8..cbc5213579 100644
--- a/hw/virtio/vhost-shadow-virtqueue.h
+++ b/hw/virtio/vhost-shadow-virtqueue.h
@@ -28,9 +28,13 @@ typedef struct VhostShadowVirtqueue {
* So shadow virtqueue must not clean it, or we would lose VirtQueue one.
*/
EventNotifier svq_kick;
+
+ /* Guest's call notifier, where the SVQ calls guest. */
+ EventNotifier svq_call;
} VhostShadowVirtqueue;
void vhost_svq_set_svq_kick_fd(VhostShadowVirtqueue *svq, int svq_kick_fd);
+void vhost_svq_set_svq_call_fd(VhostShadowVirtqueue *svq, int call_fd);
void vhost_svq_stop(VhostShadowVirtqueue *svq);
diff --git a/hw/virtio/vhost-shadow-virtqueue.c b/hw/virtio/vhost-shadow-virtqueue.c
index c96dbdf152..5c1e09be5d 100644
--- a/hw/virtio/vhost-shadow-virtqueue.c
+++ b/hw/virtio/vhost-shadow-virtqueue.c
@@ -27,6 +27,42 @@ static void vhost_handle_guest_kick(EventNotifier *n)
event_notifier_set(&svq->hdev_kick);
}
+/**
+ * Forward vhost notifications
+ *
+ * @n: hdev call event notifier, the one that device set to notify svq.
+ */
+static void vhost_svq_handle_call(EventNotifier *n)
+{
+ VhostShadowVirtqueue *svq = container_of(n, VhostShadowVirtqueue,
+ hdev_call);
+ event_notifier_test_and_clear(n);
+ event_notifier_set(&svq->svq_call);
+}
+
+/**
+ * Set the call notifier for the SVQ to call the guest
+ *
+ * @svq: Shadow virtqueue
+ * @call_fd: call notifier
+ *
+ * Called on BQL context.
+ */
+void vhost_svq_set_svq_call_fd(VhostShadowVirtqueue *svq, int call_fd)
+{
+ if (call_fd == VHOST_FILE_UNBIND) {
+ /*
+ * Fail event_notifier_set if called handling device call.
+ *
+ * SVQ still needs device notifications, since it needs to keep
+ * forwarding used buffers even with the unbind.
+ */
+ memset(&svq->svq_call, 0, sizeof(svq->svq_call));
+ } else {
+ event_notifier_init_fd(&svq->svq_call, call_fd);
+ }
+}
+
/**
* Set a new file descriptor for the guest to kick the SVQ and notify for avail
*
@@ -94,6 +130,7 @@ VhostShadowVirtqueue *vhost_svq_new(void)
}
event_notifier_init_fd(&svq->svq_kick, VHOST_FILE_UNBIND);
+ event_notifier_set_handler(&svq->hdev_call, vhost_svq_handle_call);
return g_steal_pointer(&svq);
err_init_hdev_call:
@@ -113,6 +150,7 @@ void vhost_svq_free(gpointer pvq)
VhostShadowVirtqueue *vq = pvq;
vhost_svq_stop(vq);
event_notifier_cleanup(&vq->hdev_kick);
+ event_notifier_set_handler(&vq->hdev_call, NULL);
event_notifier_cleanup(&vq->hdev_call);
g_free(vq);
}
diff --git a/hw/virtio/vhost-vdpa.c b/hw/virtio/vhost-vdpa.c
index 1dd799b3ef..d5865a5d77 100644
--- a/hw/virtio/vhost-vdpa.c
+++ b/hw/virtio/vhost-vdpa.c
@@ -724,6 +724,13 @@ static int vhost_vdpa_set_vring_dev_kick(struct vhost_dev *dev,
return vhost_vdpa_call(dev, VHOST_SET_VRING_KICK, file);
}
+static int vhost_vdpa_set_vring_dev_call(struct vhost_dev *dev,
+ struct vhost_vring_file *file)
+{
+ trace_vhost_vdpa_set_vring_call(dev, file->index, file->fd);
+ return vhost_vdpa_call(dev, VHOST_SET_VRING_CALL, file);
+}
+
/**
* Set the shadow virtqueue descriptors to the device
*
@@ -731,6 +738,9 @@ static int vhost_vdpa_set_vring_dev_kick(struct vhost_dev *dev,
* @svq: The shadow virtqueue
* @idx: The index of the virtqueue in the vhost device
* @errp: Error
+ *
+ * Note that this function does not rewind kick file descriptor if cannot set
+ * call one.
*/
static bool vhost_vdpa_svq_setup(struct vhost_dev *dev,
VhostShadowVirtqueue *svq,
@@ -747,6 +757,14 @@ static bool vhost_vdpa_svq_setup(struct vhost_dev *dev,
r = vhost_vdpa_set_vring_dev_kick(dev, &file);
if (unlikely(r != 0)) {
error_setg_errno(errp, -r, "Can't set device kick fd");
+ return false;
+ }
+
+ event_notifier = &svq->hdev_call;
+ file.fd = event_notifier_get_fd(event_notifier);
+ r = vhost_vdpa_set_vring_dev_call(dev, &file);
+ if (unlikely(r != 0)) {
+ error_setg_errno(errp, -r, "Can't set device call fd");
}
return r == 0;
@@ -872,8 +890,17 @@ static int vhost_vdpa_set_vring_kick(struct vhost_dev *dev,
static int vhost_vdpa_set_vring_call(struct vhost_dev *dev,
struct vhost_vring_file *file)
{
- trace_vhost_vdpa_set_vring_call(dev, file->index, file->fd);
- return vhost_vdpa_call(dev, VHOST_SET_VRING_CALL, file);
+ struct vhost_vdpa *v = dev->opaque;
+
+ if (v->shadow_vqs_enabled) {
+ int vdpa_idx = file->index - dev->vq_index;
+ VhostShadowVirtqueue *svq = g_ptr_array_index(v->shadow_vqs, vdpa_idx);
+
+ vhost_svq_set_svq_call_fd(svq, file->fd);
+ return 0;
+ } else {
+ return vhost_vdpa_set_vring_dev_call(dev, file);
+ }
}
static int vhost_vdpa_get_features(struct vhost_dev *dev,
--
2.27.0
next prev parent reply other threads:[~2022-03-02 20:33 UTC|newest]
Thread overview: 21+ messages / expand[flat|nested] mbox.gz Atom feed top
2022-03-02 20:29 [PATCH v3 00/14] vDPA shadow virtqueue Eugenio Pérez
2022-03-02 20:29 ` [PATCH v3 01/14] vhost: Add VhostShadowVirtqueue Eugenio Pérez
2022-03-02 20:30 ` [PATCH v3 02/14] vhost: Add Shadow VirtQueue kick forwarding capabilities Eugenio Pérez
2022-03-02 20:30 ` Eugenio Pérez [this message]
2022-03-02 20:30 ` [PATCH v3 04/14] vhost: Add vhost_svq_valid_features to shadow vq Eugenio Pérez
2022-03-02 20:30 ` [PATCH v3 05/14] virtio: Add vhost_svq_get_vring_addr Eugenio Pérez
2022-03-02 20:30 ` [PATCH v3 06/14] vdpa: adapt vhost_ops callbacks to svq Eugenio Pérez
2022-03-02 20:30 ` [PATCH v3 07/14] vhost: Shadow virtqueue buffers forwarding Eugenio Pérez
2022-03-02 20:30 ` [PATCH v3 08/14] util: Add iova_tree_alloc_map Eugenio Pérez
2022-03-02 20:30 ` [PATCH v3 09/14] vhost: Add VhostIOVATree Eugenio Pérez
2022-03-02 20:30 ` [PATCH v3 10/14] vdpa: Add custom IOTLB translations to SVQ Eugenio Pérez
2022-03-02 20:30 ` [PATCH v3 11/14] vdpa: Adapt vhost_vdpa_get_vring_base " Eugenio Pérez
2022-03-02 20:30 ` [PATCH v3 12/14] vdpa: Never set log_base addr if SVQ is enabled Eugenio Pérez
2022-03-02 20:30 ` [PATCH v3 13/14] vdpa: Expose VHOST_F_LOG_ALL on SVQ Eugenio Pérez
2022-03-02 20:30 ` [PATCH v3 14/14] vdpa: Add x-svq to NetdevVhostVDPAOptions Eugenio Pérez
2022-03-03 6:08 ` Markus Armbruster
2022-03-03 9:53 ` Eugenio Perez Martin
2022-03-03 11:59 ` Markus Armbruster
2022-03-03 17:23 ` Eugenio Perez Martin
2022-03-04 6:29 ` Markus Armbruster
2022-03-04 8:54 ` Eugenio Perez Martin
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20220302203012.3476835-4-eperezma@redhat.com \
--to=eperezma@redhat.com \
--cc=armbru@redhat.com \
--cc=eblake@redhat.com \
--cc=ehabkost@redhat.com \
--cc=eli@mellanox.com \
--cc=eric.fangyi@huawei.com \
--cc=gdawar@xilinx.com \
--cc=hanand@xilinx.com \
--cc=jasowang@redhat.com \
--cc=lingshan.zhu@intel.com \
--cc=liuxiangdong5@huawei.com \
--cc=lulu@redhat.com \
--cc=lvivier@redhat.com \
--cc=mst@redhat.com \
--cc=parav@mellanox.com \
--cc=peterx@redhat.com \
--cc=qemu-devel@nongnu.org \
--cc=quintela@redhat.com \
--cc=richard.henderson@linaro.org \
--cc=sgarzare@redhat.com \
--cc=stefanha@redhat.com \
--cc=virtualization@lists.linux-foundation.org \
--cc=xiao.w.wang@intel.com \
--cc=yebiaoxiang@huawei.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).