From: Steve Sistare <steven.sistare@oracle.com>
To: qemu-devel@nongnu.org
Cc: "Michael S. Tsirkin" <mst@redhat.com>,
Jason Wang <jasowang@redhat.com>,
Philippe Mathieu-Daude <philmd@linaro.org>,
Eugenio Perez Martin <eperezma@redhat.com>,
Peter Xu <peterx@redhat.com>, Fabiano Rosas <farosas@suse.de>,
Si-Wei Liu <si-wei.liu@oracle.com>,
Steve Sistare <steven.sistare@oracle.com>
Subject: [RFC V1 3/7] vdpa/cpr: preserve device fd
Date: Fri, 12 Jul 2024 07:02:07 -0700 [thread overview]
Message-ID: <1720792931-456433-4-git-send-email-steven.sistare@oracle.com> (raw)
In-Reply-To: <1720792931-456433-1-git-send-email-steven.sistare@oracle.com>
Save the vdpa device fd in CPR state when it is created, and fetch the fd
from that state after CPR. Remember that the fd was reused, for subsequent
patches.
Signed-off-by: Steve Sistare <steven.sistare@oracle.com>
---
include/hw/virtio/vhost-vdpa.h | 3 +++
net/vhost-vdpa.c | 24 ++++++++++++++++++------
2 files changed, 21 insertions(+), 6 deletions(-)
diff --git a/include/hw/virtio/vhost-vdpa.h b/include/hw/virtio/vhost-vdpa.h
index 0a9575b469..427458cfed 100644
--- a/include/hw/virtio/vhost-vdpa.h
+++ b/include/hw/virtio/vhost-vdpa.h
@@ -54,6 +54,9 @@ typedef struct vhost_vdpa_shared {
/* Vdpa must send shadow addresses as IOTLB key for data queues, not GPA */
bool shadow_data;
+ /* Device descriptor is being reused after CPR restart */
+ bool reused;
+
/* SVQ switching is in progress, or already completed? */
SVQTransitionState svq_switching;
} VhostVDPAShared;
diff --git a/net/vhost-vdpa.c b/net/vhost-vdpa.c
index daa38428c5..e6010e8900 100644
--- a/net/vhost-vdpa.c
+++ b/net/vhost-vdpa.c
@@ -12,6 +12,7 @@
#include "qemu/osdep.h"
#include "clients.h"
#include "hw/virtio/virtio-net.h"
+#include "migration/cpr.h"
#include "net/vhost_net.h"
#include "net/vhost-vdpa.h"
#include "hw/virtio/vhost-vdpa.h"
@@ -240,8 +241,10 @@ static void vhost_vdpa_cleanup(NetClientState *nc)
if (s->vhost_vdpa.index != 0) {
return;
}
+ cpr_delete_fd(nc->name, 0);
qemu_close(s->vhost_vdpa.shared->device_fd);
g_free(s->vhost_vdpa.shared);
+ s->vhost_vdpa.shared = NULL;
}
/** Dummy SetSteeringEBPF to support RSS for vhost-vdpa backend */
@@ -1675,6 +1678,7 @@ static NetClientState *net_vhost_vdpa_init(NetClientState *peer,
int nvqs,
bool is_datapath,
bool svq,
+ bool reused,
struct vhost_vdpa_iova_range iova_range,
uint64_t features,
VhostVDPAShared *shared,
@@ -1712,6 +1716,7 @@ static NetClientState *net_vhost_vdpa_init(NetClientState *peer,
&s->vhost_vdpa.migration_blocker);
s->vhost_vdpa.shared = g_new0(VhostVDPAShared, 1);
s->vhost_vdpa.shared->device_fd = vdpa_device_fd;
+ s->vhost_vdpa.shared->reused = reused;
s->vhost_vdpa.shared->iova_range = iova_range;
s->vhost_vdpa.shared->shadow_data = svq;
} else if (!is_datapath) {
@@ -1793,6 +1798,7 @@ int net_init_vhost_vdpa(const Netdev *netdev, const char *name,
struct vhost_vdpa_iova_range iova_range;
NetClientState *nc;
int queue_pairs, r, i = 0, has_cvq = 0;
+ bool reused;
assert(netdev->type == NET_CLIENT_DRIVER_VHOST_VDPA);
opts = &netdev->u.vhost_vdpa;
@@ -1808,13 +1814,17 @@ int net_init_vhost_vdpa(const Netdev *netdev, const char *name,
return -1;
}
- if (opts->vhostdev) {
+ vdpa_device_fd = cpr_find_fd(name, 0);
+ reused = (vdpa_device_fd != -1);
+
+ if (opts->vhostdev && vdpa_device_fd == -1) {
vdpa_device_fd = qemu_open(opts->vhostdev, O_RDWR, errp);
if (vdpa_device_fd == -1) {
return -errno;
}
- } else {
- /* has_vhostfd */
+ cpr_save_fd(name, 0, vdpa_device_fd);
+
+ } else if (opts->vhostfd) {
vdpa_device_fd = monitor_fd_param(monitor_cur(), opts->vhostfd, errp);
if (vdpa_device_fd == -1) {
error_prepend(errp, "vhost-vdpa: unable to parse vhostfd: ");
@@ -1855,7 +1865,8 @@ int net_init_vhost_vdpa(const Netdev *netdev, const char *name,
}
ncs[i] = net_vhost_vdpa_init(peer, TYPE_VHOST_VDPA, name,
vdpa_device_fd, i, 2, true, opts->x_svq,
- iova_range, features, shared, errp);
+ reused, iova_range, features, shared,
+ errp);
if (!ncs[i])
goto err;
}
@@ -1866,8 +1877,8 @@ int net_init_vhost_vdpa(const Netdev *netdev, const char *name,
nc = net_vhost_vdpa_init(peer, TYPE_VHOST_VDPA, name,
vdpa_device_fd, i, 1, false,
- opts->x_svq, iova_range, features, shared,
- errp);
+ opts->x_svq, reused, iova_range, features,
+ shared, errp);
if (!nc)
goto err;
}
@@ -1882,6 +1893,7 @@ err:
}
qemu_close(vdpa_device_fd);
+ cpr_delete_fd(name, 0);
return -1;
}
--
2.39.3
next prev parent reply other threads:[~2024-07-12 14:04 UTC|newest]
Thread overview: 10+ messages / expand[flat|nested] mbox.gz Atom feed top
2024-07-12 14:02 [RFC V1 0/7] Live update: vdpa Steve Sistare
2024-07-12 14:02 ` [RFC V1 1/7] migration: cpr_needed_for_reuse Steve Sistare
2024-07-12 14:02 ` [RFC V1 2/7] migration: skip dirty memory tracking for cpr Steve Sistare
2024-08-12 18:57 ` Fabiano Rosas
2024-08-14 19:54 ` Steven Sistare
2024-07-12 14:02 ` Steve Sistare [this message]
2024-07-12 14:02 ` [RFC V1 4/7] vdpa/cpr: kernel interfaces Steve Sistare
2024-07-12 14:02 ` [RFC V1 5/7] vdpa/cpr: use VHOST_NEW_OWNER Steve Sistare
2024-07-12 14:02 ` [RFC V1 6/7] vdpa/cpr: pass shadow parameter to dma functions Steve Sistare
2024-07-12 14:02 ` [RFC V1 7/7] vdpa/cpr: preserve dma mappings Steve Sistare
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=1720792931-456433-4-git-send-email-steven.sistare@oracle.com \
--to=steven.sistare@oracle.com \
--cc=eperezma@redhat.com \
--cc=farosas@suse.de \
--cc=jasowang@redhat.com \
--cc=mst@redhat.com \
--cc=peterx@redhat.com \
--cc=philmd@linaro.org \
--cc=qemu-devel@nongnu.org \
--cc=si-wei.liu@oracle.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).