From: "Eugenio Pérez" <eperezma@redhat.com>
To: qemu-devel@nongnu.org
Cc: Shannon <shannon.nelson@amd.com>,
Parav Pandit <parav@mellanox.com>,
Stefano Garzarella <sgarzare@redhat.com>,
"Michael S. Tsirkin" <mst@redhat.com>,
yin31149@gmail.com, Jason Wang <jasowang@redhat.com>,
Yajun Wu <yajunw@nvidia.com>,
Zhu Lingshan <lingshan.zhu@intel.com>,
Lei Yang <leiyang@redhat.com>,
Dragos Tatulea <dtatulea@nvidia.com>,
Juan Quintela <quintela@redhat.com>,
Laurent Vivier <lvivier@redhat.com>,
si-wei.liu@oracle.com, Gautam Dawar <gdawar@xilinx.com>
Subject: [RFC PATCH 06/18] vdpa: move file descriptor to vhost_vdpa_shared
Date: Thu, 19 Oct 2023 16:34:43 +0200 [thread overview]
Message-ID: <20231019143455.2377694-7-eperezma@redhat.com> (raw)
In-Reply-To: <20231019143455.2377694-1-eperezma@redhat.com>
Next patches will register the vhost_vdpa memory listener while the VM
is migrating at the destination, so we can map the memory to the device
before stopping the VM at the source. The main goal is to reduce the
downtime.
However, the destination QEMU is unaware of which vhost_vdpa device will
register its memory_listener. If the source guest has CVQ enabled, it
will be the CVQ device. Otherwise, it will be the first one.
Move the file descriptor to VhostVDPAShared so all vhost_vdpa can use
it, rather than always in the first / last vhost_vdpa.
Signed-off-by: Eugenio Pérez <eperezma@redhat.com>
---
include/hw/virtio/vhost-vdpa.h | 2 +-
hw/virtio/vdpa-dev.c | 2 +-
hw/virtio/vhost-vdpa.c | 14 +++++++-------
net/vhost-vdpa.c | 11 ++++-------
4 files changed, 13 insertions(+), 16 deletions(-)
diff --git a/include/hw/virtio/vhost-vdpa.h b/include/hw/virtio/vhost-vdpa.h
index 01e0f25e27..796a180afa 100644
--- a/include/hw/virtio/vhost-vdpa.h
+++ b/include/hw/virtio/vhost-vdpa.h
@@ -32,6 +32,7 @@ typedef struct VhostVDPAHostNotifier {
/* Info shared by all vhost_vdpa device models */
typedef struct vhost_vdpa_shared {
+ int device_fd;
struct vhost_vdpa_iova_range iova_range;
/* IOVA mapping used by the Shadow Virtqueue */
@@ -42,7 +43,6 @@ typedef struct vhost_vdpa_shared {
} VhostVDPAShared;
typedef struct vhost_vdpa {
- int device_fd;
int index;
uint32_t msg_type;
bool iotlb_batch_begin_sent;
diff --git a/hw/virtio/vdpa-dev.c b/hw/virtio/vdpa-dev.c
index 457960d28a..8774986571 100644
--- a/hw/virtio/vdpa-dev.c
+++ b/hw/virtio/vdpa-dev.c
@@ -66,7 +66,6 @@ static void vhost_vdpa_device_realize(DeviceState *dev, Error **errp)
if (*errp) {
return;
}
- v->vdpa.device_fd = v->vhostfd;
v->vdev_id = vhost_vdpa_device_get_u32(v->vhostfd,
VHOST_VDPA_GET_DEVICE_ID, errp);
@@ -115,6 +114,7 @@ static void vhost_vdpa_device_realize(DeviceState *dev, Error **errp)
goto free_vqs;
}
v->vdpa.shared = g_new0(VhostVDPAShared, 1);
+ v->vdpa.shared->device_fd = v->vhostfd;
v->vdpa.shared->iova_range = iova_range;
ret = vhost_dev_init(&v->dev, &v->vdpa, VHOST_BACKEND_TYPE_VDPA, 0, NULL);
diff --git a/hw/virtio/vhost-vdpa.c b/hw/virtio/vhost-vdpa.c
index 4f52bfa9ee..298aefd065 100644
--- a/hw/virtio/vhost-vdpa.c
+++ b/hw/virtio/vhost-vdpa.c
@@ -90,7 +90,7 @@ int vhost_vdpa_dma_map(struct vhost_vdpa *v, uint32_t asid, hwaddr iova,
hwaddr size, void *vaddr, bool readonly)
{
struct vhost_msg_v2 msg = {};
- int fd = v->device_fd;
+ int fd = v->shared->device_fd;
int ret = 0;
msg.type = v->msg_type;
@@ -122,7 +122,7 @@ int vhost_vdpa_dma_unmap(struct vhost_vdpa *v, uint32_t asid, hwaddr iova,
hwaddr size)
{
struct vhost_msg_v2 msg = {};
- int fd = v->device_fd;
+ int fd = v->shared->device_fd;
int ret = 0;
msg.type = v->msg_type;
@@ -145,7 +145,7 @@ int vhost_vdpa_dma_unmap(struct vhost_vdpa *v, uint32_t asid, hwaddr iova,
static void vhost_vdpa_listener_begin_batch(struct vhost_vdpa *v)
{
- int fd = v->device_fd;
+ int fd = v->shared->device_fd;
struct vhost_msg_v2 msg = {
.type = v->msg_type,
.iotlb.type = VHOST_IOTLB_BATCH_BEGIN,
@@ -173,7 +173,7 @@ static void vhost_vdpa_listener_commit(MemoryListener *listener)
struct vhost_vdpa *v = container_of(listener, struct vhost_vdpa, listener);
struct vhost_dev *dev = v->dev;
struct vhost_msg_v2 msg = {};
- int fd = v->device_fd;
+ int fd = v->shared->device_fd;
if (!(dev->backend_cap & (0x1ULL << VHOST_BACKEND_F_IOTLB_BATCH))) {
return;
@@ -498,7 +498,7 @@ static int vhost_vdpa_call(struct vhost_dev *dev, unsigned long int request,
void *arg)
{
struct vhost_vdpa *v = dev->opaque;
- int fd = v->device_fd;
+ int fd = v->shared->device_fd;
int ret;
assert(dev->vhost_ops->backend_type == VHOST_BACKEND_TYPE_VDPA);
@@ -656,7 +656,7 @@ static int vhost_vdpa_host_notifier_init(struct vhost_dev *dev, int queue_index)
struct vhost_vdpa *v = dev->opaque;
VirtIODevice *vdev = dev->vdev;
VhostVDPAHostNotifier *n;
- int fd = v->device_fd;
+ int fd = v->shared->device_fd;
void *addr;
char *name;
@@ -1285,7 +1285,7 @@ static void vhost_vdpa_suspend(struct vhost_dev *dev)
if (dev->backend_cap & BIT_ULL(VHOST_BACKEND_F_SUSPEND)) {
trace_vhost_vdpa_suspend(dev);
- r = ioctl(v->device_fd, VHOST_VDPA_SUSPEND);
+ r = ioctl(v->shared->device_fd, VHOST_VDPA_SUSPEND);
if (unlikely(r)) {
error_report("Cannot suspend: %s(%d)", g_strerror(errno), errno);
} else {
diff --git a/net/vhost-vdpa.c b/net/vhost-vdpa.c
index 01202350ea..4d42d7a742 100644
--- a/net/vhost-vdpa.c
+++ b/net/vhost-vdpa.c
@@ -233,14 +233,11 @@ static void vhost_vdpa_cleanup(NetClientState *nc)
vhost_net_cleanup(s->vhost_net);
g_free(s->vhost_net);
s->vhost_net = NULL;
- }
- if (s->vhost_vdpa.device_fd >= 0) {
- qemu_close(s->vhost_vdpa.device_fd);
- s->vhost_vdpa.device_fd = -1;
}
if (s->vhost_vdpa.index != 0) {
return;
}
+ qemu_close(s->vhost_vdpa.shared->device_fd);
g_free(s->vhost_vdpa.shared);
}
@@ -439,7 +436,7 @@ static int vhost_vdpa_set_address_space_id(struct vhost_vdpa *v,
};
int r;
- r = ioctl(v->device_fd, VHOST_VDPA_SET_GROUP_ASID, &asid);
+ r = ioctl(v->shared->device_fd, VHOST_VDPA_SET_GROUP_ASID, &asid);
if (unlikely(r < 0)) {
error_report("Can't set vq group %u asid %u, errno=%d (%s)",
asid.index, asid.num, errno, g_strerror(errno));
@@ -535,7 +532,7 @@ static int vhost_vdpa_net_cvq_start(NetClientState *nc)
return 0;
}
- cvq_group = vhost_vdpa_get_vring_group(v->device_fd,
+ cvq_group = vhost_vdpa_get_vring_group(v->shared->device_fd,
v->dev->vq_index_end - 1,
&err);
if (unlikely(cvq_group < 0)) {
@@ -1439,7 +1436,6 @@ static NetClientState *net_vhost_vdpa_init(NetClientState *peer,
qemu_set_info_str(nc, TYPE_VHOST_VDPA);
s = DO_UPCAST(VhostVDPAState, nc, nc);
- s->vhost_vdpa.device_fd = vdpa_device_fd;
s->vhost_vdpa.index = queue_pair_index;
s->always_svq = svq;
s->migration_state.notify = vdpa_net_migration_state_notifier;
@@ -1448,6 +1444,7 @@ static NetClientState *net_vhost_vdpa_init(NetClientState *peer,
vhost_vdpa_net_valid_svq_features(features,
&s->vhost_vdpa.migration_blocker);
s->vhost_vdpa.shared = g_new0(VhostVDPAShared, 1);
+ s->vhost_vdpa.shared->device_fd = vdpa_device_fd;
s->vhost_vdpa.shared->iova_range = iova_range;
s->vhost_vdpa.shared->shadow_data = svq;
} else if (!is_datapath) {
--
2.39.3
next prev parent reply other threads:[~2023-10-19 14:37 UTC|newest]
Thread overview: 34+ messages / expand[flat|nested] mbox.gz Atom feed top
2023-10-19 14:34 [RFC PATCH 00/18] Map memory at destination .load_setup in vDPA-net migration Eugenio Pérez
2023-10-19 14:34 ` [RFC PATCH 01/18] vdpa: add VhostVDPAShared Eugenio Pérez
2023-10-19 14:34 ` [RFC PATCH 02/18] vdpa: move iova tree to the shared struct Eugenio Pérez
2023-11-02 9:36 ` Si-Wei Liu
2023-11-24 17:11 ` Eugenio Perez Martin
2023-10-19 14:34 ` [RFC PATCH 03/18] vdpa: move iova_range to vhost_vdpa_shared Eugenio Pérez
2023-10-19 14:34 ` [RFC PATCH 04/18] vdpa: move shadow_data " Eugenio Pérez
2023-11-02 8:47 ` Si-Wei Liu
2023-11-02 15:45 ` Eugenio Perez Martin
2023-10-19 14:34 ` [RFC PATCH 05/18] vdpa: use vdpa shared for tracing Eugenio Pérez
2023-10-19 14:34 ` Eugenio Pérez [this message]
2023-10-19 14:34 ` [RFC PATCH 07/18] vdpa: move iotlb_batch_begin_sent to vhost_vdpa_shared Eugenio Pérez
2023-10-19 14:34 ` [RFC PATCH 08/18] vdpa: move backend_cap " Eugenio Pérez
2023-10-19 14:34 ` [RFC PATCH 09/18] vdpa: remove msg type of vhost_vdpa Eugenio Pérez
2023-10-19 14:34 ` [RFC PATCH 10/18] vdpa: move iommu_list to vhost_vdpa_shared Eugenio Pérez
2023-10-19 14:34 ` [RFC PATCH 11/18] vdpa: use VhostVDPAShared in vdpa_dma_map and unmap Eugenio Pérez
2023-10-19 14:34 ` [RFC PATCH 12/18] vdpa: use dev_shared in vdpa_iommu Eugenio Pérez
2023-10-19 14:34 ` [RFC PATCH 13/18] vdpa: move memory listener to vhost_vdpa_shared Eugenio Pérez
2023-10-19 14:34 ` [RFC PATCH 14/18] vdpa: do not set virtio status bits if unneeded Eugenio Pérez
2023-10-19 14:34 ` [RFC PATCH 15/18] vdpa: add vhost_vdpa_load_setup Eugenio Pérez
2023-11-02 8:48 ` Si-Wei Liu
2023-11-02 15:24 ` Eugenio Perez Martin
2023-10-19 14:34 ` [RFC PATCH 16/18] vdpa: add vhost_vdpa_net_load_setup NetClient callback Eugenio Pérez
2023-10-19 14:34 ` [RFC PATCH 17/18] vdpa: use shadow_data instead of first device v->shadow_vqs_enabled Eugenio Pérez
2023-10-19 14:34 ` [RFC PATCH 18/18] virtio_net: register incremental migration handlers Eugenio Pérez
2023-11-02 4:36 ` [RFC PATCH 00/18] Map memory at destination .load_setup in vDPA-net migration Jason Wang
2023-11-02 10:12 ` Si-Wei Liu
2023-11-02 12:37 ` Eugenio Perez Martin
2023-11-03 20:19 ` Si-Wei Liu
2023-12-05 14:23 ` Eugenio Perez Martin
2023-12-06 0:36 ` Si-Wei Liu
2023-11-06 4:17 ` Jason Wang
2023-11-03 20:40 ` Si-Wei Liu
2023-11-06 9:04 ` Eugenio Perez Martin
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20231019143455.2377694-7-eperezma@redhat.com \
--to=eperezma@redhat.com \
--cc=dtatulea@nvidia.com \
--cc=gdawar@xilinx.com \
--cc=jasowang@redhat.com \
--cc=leiyang@redhat.com \
--cc=lingshan.zhu@intel.com \
--cc=lvivier@redhat.com \
--cc=mst@redhat.com \
--cc=parav@mellanox.com \
--cc=qemu-devel@nongnu.org \
--cc=quintela@redhat.com \
--cc=sgarzare@redhat.com \
--cc=shannon.nelson@amd.com \
--cc=si-wei.liu@oracle.com \
--cc=yajunw@nvidia.com \
--cc=yin31149@gmail.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).