* [PATCH v3 1/6] vhost: stop transfer elem ownership in vhost_handle_guest_kick
2022-07-22 8:56 [PATCH v3 0/6] NIC vhost-vdpa state restore via Shadow CVQ Eugenio Pérez
@ 2022-07-22 8:56 ` Eugenio Pérez
2022-07-22 8:56 ` [PATCH v3 2/6] vdpa: Extract vhost_vdpa_net_cvq_add from vhost_vdpa_net_handle_ctrl_avail Eugenio Pérez
` (4 subsequent siblings)
5 siblings, 0 replies; 7+ messages in thread
From: Eugenio Pérez @ 2022-07-22 8:56 UTC (permalink / raw)
To: qemu-devel
Cc: Eric Blake, Gautam Dawar, Gonglei (Arei), Cornelia Huck,
Zhu Lingshan, Cindy Lu, Liuxiangdong, Laurent Vivier,
Markus Armbruster, Paolo Bonzini, Parav Pandit,
Harpreet Singh Anand, Jason Wang, Michael S. Tsirkin, Eli Cohen,
Stefan Hajnoczi, Stefano Garzarella
It was easier to allow vhost_svq_add to handle the memory. Now that we
will allow qemu to add elements to a SVQ without the guest's knowledge,
it's better to handle it in the caller.
Signed-off-by: Eugenio Pérez <eperezma@redhat.com>
---
hw/virtio/vhost-shadow-virtqueue.c | 10 ++++------
1 file changed, 4 insertions(+), 6 deletions(-)
diff --git a/hw/virtio/vhost-shadow-virtqueue.c b/hw/virtio/vhost-shadow-virtqueue.c
index e4956728dd..ffd2b2c972 100644
--- a/hw/virtio/vhost-shadow-virtqueue.c
+++ b/hw/virtio/vhost-shadow-virtqueue.c
@@ -233,9 +233,6 @@ static void vhost_svq_kick(VhostShadowVirtqueue *svq)
/**
* Add an element to a SVQ.
*
- * The caller must check that there is enough slots for the new element. It
- * takes ownership of the element: In case of failure not ENOSPC, it is free.
- *
* Return -EINVAL if element is invalid, -ENOSPC if dev queue is full
*/
int vhost_svq_add(VhostShadowVirtqueue *svq, const struct iovec *out_sg,
@@ -252,7 +249,6 @@ int vhost_svq_add(VhostShadowVirtqueue *svq, const struct iovec *out_sg,
ok = vhost_svq_add_split(svq, out_sg, out_num, in_sg, in_num, &qemu_head);
if (unlikely(!ok)) {
- g_free(elem);
return -EINVAL;
}
@@ -293,7 +289,7 @@ static void vhost_handle_guest_kick(VhostShadowVirtqueue *svq)
virtio_queue_set_notification(svq->vq, false);
while (true) {
- VirtQueueElement *elem;
+ g_autofree VirtQueueElement *elem;
int r;
if (svq->next_guest_avail_elem) {
@@ -324,12 +320,14 @@ static void vhost_handle_guest_kick(VhostShadowVirtqueue *svq)
* queue the current guest descriptor and ignore kicks
* until some elements are used.
*/
- svq->next_guest_avail_elem = elem;
+ svq->next_guest_avail_elem = g_steal_pointer(&elem);
}
/* VQ is full or broken, just return and ignore kicks */
return;
}
+ /* elem belongs to SVQ or external caller now */
+ elem = NULL;
}
virtio_queue_set_notification(svq->vq, true);
--
2.31.1
^ permalink raw reply related [flat|nested] 7+ messages in thread
* [PATCH v3 2/6] vdpa: Extract vhost_vdpa_net_cvq_add from vhost_vdpa_net_handle_ctrl_avail
2022-07-22 8:56 [PATCH v3 0/6] NIC vhost-vdpa state restore via Shadow CVQ Eugenio Pérez
2022-07-22 8:56 ` [PATCH v3 1/6] vhost: stop transfer elem ownership in vhost_handle_guest_kick Eugenio Pérez
@ 2022-07-22 8:56 ` Eugenio Pérez
2022-07-22 8:56 ` [PATCH v3 3/6] vdpa: Make vhost_vdpa_net_cvq_map_elem accept any out sg Eugenio Pérez
` (3 subsequent siblings)
5 siblings, 0 replies; 7+ messages in thread
From: Eugenio Pérez @ 2022-07-22 8:56 UTC (permalink / raw)
To: qemu-devel
Cc: Eric Blake, Gautam Dawar, Gonglei (Arei), Cornelia Huck,
Zhu Lingshan, Cindy Lu, Liuxiangdong, Laurent Vivier,
Markus Armbruster, Paolo Bonzini, Parav Pandit,
Harpreet Singh Anand, Jason Wang, Michael S. Tsirkin, Eli Cohen,
Stefan Hajnoczi, Stefano Garzarella
So we can reuse to inject state messages.
Signed-off-by: Eugenio Pérez <eperezma@redhat.com>
---
net/vhost-vdpa.c | 74 ++++++++++++++++++++++++++++++------------------
1 file changed, 47 insertions(+), 27 deletions(-)
diff --git a/net/vhost-vdpa.c b/net/vhost-vdpa.c
index 6abad276a6..1b82ac2e07 100644
--- a/net/vhost-vdpa.c
+++ b/net/vhost-vdpa.c
@@ -334,6 +334,46 @@ static bool vhost_vdpa_net_cvq_map_elem(VhostVDPAState *s,
return true;
}
+static virtio_net_ctrl_ack vhost_vdpa_net_cvq_add(VhostShadowVirtqueue *svq,
+ const struct iovec *dev_buffers)
+{
+ /* in buffer used for device model */
+ virtio_net_ctrl_ack status;
+ size_t dev_written;
+ int r;
+
+ /*
+ * Add a fake non-NULL VirtQueueElement since we'll remove before SVQ
+ * event loop can get it.
+ */
+ r = vhost_svq_add(svq, &dev_buffers[0], 1, &dev_buffers[1], 1, (void *)1);
+ if (unlikely(r != 0)) {
+ if (unlikely(r == -ENOSPC)) {
+ qemu_log_mask(LOG_GUEST_ERROR, "%s: No space on device queue\n",
+ __func__);
+ }
+ return VIRTIO_NET_ERR;
+ }
+
+ /*
+ * We can poll here since we've had BQL from the time we sent the
+ * descriptor. Also, we need to take the answer before SVQ pulls by itself,
+ * when BQL is released
+ */
+ dev_written = vhost_svq_poll(svq);
+ if (unlikely(dev_written < sizeof(status))) {
+ error_report("Insufficient written data (%zu)", dev_written);
+ return VIRTIO_NET_ERR;
+ }
+
+ memcpy(&status, dev_buffers[1].iov_base, sizeof(status));
+ if (status != VIRTIO_NET_OK) {
+ return VIRTIO_NET_ERR;
+ }
+
+ return VIRTIO_NET_OK;
+}
+
/**
* Do not forward commands not supported by SVQ. Otherwise, the device could
* accept it and qemu would not know how to update the device model.
@@ -380,19 +420,18 @@ static int vhost_vdpa_net_handle_ctrl_avail(VhostShadowVirtqueue *svq,
void *opaque)
{
VhostVDPAState *s = opaque;
- size_t in_len, dev_written;
+ size_t in_len;
virtio_net_ctrl_ack status = VIRTIO_NET_ERR;
/* out and in buffers sent to the device */
struct iovec dev_buffers[2] = {
{ .iov_base = s->cvq_cmd_out_buffer },
{ .iov_base = s->cvq_cmd_in_buffer },
};
- /* in buffer used for device model */
+ /* in buffer seen by virtio-net device model */
const struct iovec in = {
.iov_base = &status,
.iov_len = sizeof(status),
};
- int r = -EINVAL;
bool ok;
ok = vhost_vdpa_net_cvq_map_elem(s, elem, dev_buffers);
@@ -405,35 +444,16 @@ static int vhost_vdpa_net_handle_ctrl_avail(VhostShadowVirtqueue *svq,
goto out;
}
- r = vhost_svq_add(svq, &dev_buffers[0], 1, &dev_buffers[1], 1, elem);
- if (unlikely(r != 0)) {
- if (unlikely(r == -ENOSPC)) {
- qemu_log_mask(LOG_GUEST_ERROR, "%s: No space on device queue\n",
- __func__);
- }
- goto out;
- }
-
- /*
- * We can poll here since we've had BQL from the time we sent the
- * descriptor. Also, we need to take the answer before SVQ pulls by itself,
- * when BQL is released
- */
- dev_written = vhost_svq_poll(svq);
- if (unlikely(dev_written < sizeof(status))) {
- error_report("Insufficient written data (%zu)", dev_written);
- goto out;
- }
-
- memcpy(&status, dev_buffers[1].iov_base, sizeof(status));
+ status = vhost_vdpa_net_cvq_add(svq, dev_buffers);
if (status != VIRTIO_NET_OK) {
goto out;
}
status = VIRTIO_NET_ERR;
- virtio_net_handle_ctrl_iov(svq->vdev, &in, 1, dev_buffers, 1);
- if (status != VIRTIO_NET_OK) {
+ in_len = virtio_net_handle_ctrl_iov(svq->vdev, &in, 1, dev_buffers, 1);
+ if (in_len != sizeof(status) || status != VIRTIO_NET_OK) {
error_report("Bad CVQ processing in model");
+ return VIRTIO_NET_ERR;
}
out:
@@ -450,7 +470,7 @@ out:
if (dev_buffers[1].iov_base) {
vhost_vdpa_cvq_unmap_buf(&s->vhost_vdpa, dev_buffers[1].iov_base);
}
- return r;
+ return status == VIRTIO_NET_OK ? 0 : 1;
}
static const VhostShadowVirtqueueOps vhost_vdpa_net_svq_ops = {
--
2.31.1
^ permalink raw reply related [flat|nested] 7+ messages in thread
* [PATCH v3 3/6] vdpa: Make vhost_vdpa_net_cvq_map_elem accept any out sg
2022-07-22 8:56 [PATCH v3 0/6] NIC vhost-vdpa state restore via Shadow CVQ Eugenio Pérez
2022-07-22 8:56 ` [PATCH v3 1/6] vhost: stop transfer elem ownership in vhost_handle_guest_kick Eugenio Pérez
2022-07-22 8:56 ` [PATCH v3 2/6] vdpa: Extract vhost_vdpa_net_cvq_add from vhost_vdpa_net_handle_ctrl_avail Eugenio Pérez
@ 2022-07-22 8:56 ` Eugenio Pérez
2022-07-22 8:56 ` [PATCH v3 4/6] vdpa: add NetClientState->start() callback Eugenio Pérez
` (2 subsequent siblings)
5 siblings, 0 replies; 7+ messages in thread
From: Eugenio Pérez @ 2022-07-22 8:56 UTC (permalink / raw)
To: qemu-devel
Cc: Eric Blake, Gautam Dawar, Gonglei (Arei), Cornelia Huck,
Zhu Lingshan, Cindy Lu, Liuxiangdong, Laurent Vivier,
Markus Armbruster, Paolo Bonzini, Parav Pandit,
Harpreet Singh Anand, Jason Wang, Michael S. Tsirkin, Eli Cohen,
Stefan Hajnoczi, Stefano Garzarella
So its generic enough to accept any out sg buffer and we can inject
NIC state messages.
Signed-off-by: Eugenio Pérez <eperezma@redhat.com>
---
net/vhost-vdpa.c | 29 +++++++++++++++--------------
1 file changed, 15 insertions(+), 14 deletions(-)
diff --git a/net/vhost-vdpa.c b/net/vhost-vdpa.c
index 1b82ac2e07..bbe1830824 100644
--- a/net/vhost-vdpa.c
+++ b/net/vhost-vdpa.c
@@ -302,35 +302,36 @@ dma_map_err:
}
/**
- * Copy the guest element into a dedicated buffer suitable to be sent to NIC
+ * Maps out sg and in buffer into dedicated buffers suitable to be sent to NIC
*
- * @iov: [0] is the out buffer, [1] is the in one
+ * @dev_iov: [0] is the out buffer, [1] is the in one
*/
-static bool vhost_vdpa_net_cvq_map_elem(VhostVDPAState *s,
- VirtQueueElement *elem,
- struct iovec *iov)
+static bool vhost_vdpa_net_cvq_map_sg(VhostVDPAState *s,
+ const struct iovec *out, size_t out_num,
+ struct iovec *dev_iov)
{
size_t in_copied;
bool ok;
- iov[0].iov_base = s->cvq_cmd_out_buffer;
- ok = vhost_vdpa_cvq_map_buf(&s->vhost_vdpa, elem->out_sg, elem->out_num,
- vhost_vdpa_net_cvq_cmd_len(), iov[0].iov_base,
- &iov[0].iov_len, false);
+ dev_iov[0].iov_base = s->cvq_cmd_out_buffer;
+ ok = vhost_vdpa_cvq_map_buf(&s->vhost_vdpa, out, out_num,
+ vhost_vdpa_net_cvq_cmd_len(),
+ dev_iov[0].iov_base, &dev_iov[0].iov_len,
+ false);
if (unlikely(!ok)) {
return false;
}
- iov[1].iov_base = s->cvq_cmd_in_buffer;
+ dev_iov[1].iov_base = s->cvq_cmd_in_buffer;
ok = vhost_vdpa_cvq_map_buf(&s->vhost_vdpa, NULL, 0,
- sizeof(virtio_net_ctrl_ack), iov[1].iov_base,
- &in_copied, true);
+ sizeof(virtio_net_ctrl_ack),
+ dev_iov[1].iov_base, &in_copied, true);
if (unlikely(!ok)) {
vhost_vdpa_cvq_unmap_buf(&s->vhost_vdpa, s->cvq_cmd_out_buffer);
return false;
}
- iov[1].iov_len = sizeof(virtio_net_ctrl_ack);
+ dev_iov[1].iov_len = sizeof(virtio_net_ctrl_ack);
return true;
}
@@ -434,7 +435,7 @@ static int vhost_vdpa_net_handle_ctrl_avail(VhostShadowVirtqueue *svq,
};
bool ok;
- ok = vhost_vdpa_net_cvq_map_elem(s, elem, dev_buffers);
+ ok = vhost_vdpa_net_cvq_map_sg(s, elem->out_sg, elem->out_num, dev_buffers);
if (unlikely(!ok)) {
goto out;
}
--
2.31.1
^ permalink raw reply related [flat|nested] 7+ messages in thread
* [PATCH v3 4/6] vdpa: add NetClientState->start() callback
2022-07-22 8:56 [PATCH v3 0/6] NIC vhost-vdpa state restore via Shadow CVQ Eugenio Pérez
` (2 preceding siblings ...)
2022-07-22 8:56 ` [PATCH v3 3/6] vdpa: Make vhost_vdpa_net_cvq_map_elem accept any out sg Eugenio Pérez
@ 2022-07-22 8:56 ` Eugenio Pérez
2022-07-22 8:56 ` [PATCH v3 5/6] vdpa: Add virtio-net mac address via CVQ at start Eugenio Pérez
2022-07-22 8:56 ` [PATCH v3 6/6] vdpa: Delete CVQ migration blocker Eugenio Pérez
5 siblings, 0 replies; 7+ messages in thread
From: Eugenio Pérez @ 2022-07-22 8:56 UTC (permalink / raw)
To: qemu-devel
Cc: Eric Blake, Gautam Dawar, Gonglei (Arei), Cornelia Huck,
Zhu Lingshan, Cindy Lu, Liuxiangdong, Laurent Vivier,
Markus Armbruster, Paolo Bonzini, Parav Pandit,
Harpreet Singh Anand, Jason Wang, Michael S. Tsirkin, Eli Cohen,
Stefan Hajnoczi, Stefano Garzarella
It allows per-net client operations right after device's successful
start.
Vhost-vdpa net will use it to add the CVQ buffers to restore the device
status.
Signed-off-by: Eugenio Pérez <eperezma@redhat.com>
---
include/net/net.h | 2 ++
hw/net/vhost_net.c | 7 +++++++
2 files changed, 9 insertions(+)
diff --git a/include/net/net.h b/include/net/net.h
index 523136c7ac..ad9e80083a 100644
--- a/include/net/net.h
+++ b/include/net/net.h
@@ -44,6 +44,7 @@ typedef struct NICConf {
typedef void (NetPoll)(NetClientState *, bool enable);
typedef bool (NetCanReceive)(NetClientState *);
+typedef int (NetStart)(NetClientState *);
typedef ssize_t (NetReceive)(NetClientState *, const uint8_t *, size_t);
typedef ssize_t (NetReceiveIOV)(NetClientState *, const struct iovec *, int);
typedef void (NetCleanup) (NetClientState *);
@@ -71,6 +72,7 @@ typedef struct NetClientInfo {
NetReceive *receive_raw;
NetReceiveIOV *receive_iov;
NetCanReceive *can_receive;
+ NetStart *start;
NetCleanup *cleanup;
LinkStatusChanged *link_status_changed;
QueryRxFilter *query_rx_filter;
diff --git a/hw/net/vhost_net.c b/hw/net/vhost_net.c
index ccac5b7a64..ddd9ee0441 100644
--- a/hw/net/vhost_net.c
+++ b/hw/net/vhost_net.c
@@ -274,6 +274,13 @@ static int vhost_net_start_one(struct vhost_net *net,
}
}
}
+
+ if (net->nc->info->start) {
+ r = net->nc->info->start(net->nc);
+ if (r < 0) {
+ goto fail;
+ }
+ }
return 0;
fail:
file.fd = -1;
--
2.31.1
^ permalink raw reply related [flat|nested] 7+ messages in thread
* [PATCH v3 5/6] vdpa: Add virtio-net mac address via CVQ at start
2022-07-22 8:56 [PATCH v3 0/6] NIC vhost-vdpa state restore via Shadow CVQ Eugenio Pérez
` (3 preceding siblings ...)
2022-07-22 8:56 ` [PATCH v3 4/6] vdpa: add NetClientState->start() callback Eugenio Pérez
@ 2022-07-22 8:56 ` Eugenio Pérez
2022-07-22 8:56 ` [PATCH v3 6/6] vdpa: Delete CVQ migration blocker Eugenio Pérez
5 siblings, 0 replies; 7+ messages in thread
From: Eugenio Pérez @ 2022-07-22 8:56 UTC (permalink / raw)
To: qemu-devel
Cc: Eric Blake, Gautam Dawar, Gonglei (Arei), Cornelia Huck,
Zhu Lingshan, Cindy Lu, Liuxiangdong, Laurent Vivier,
Markus Armbruster, Paolo Bonzini, Parav Pandit,
Harpreet Singh Anand, Jason Wang, Michael S. Tsirkin, Eli Cohen,
Stefan Hajnoczi, Stefano Garzarella
This is needed so the destination vdpa device see the same state a the
guest set in the source.
Signed-off-by: Eugenio Pérez <eperezma@redhat.com>
---
include/net/vhost-vdpa.h | 1 +
net/vhost-vdpa.c | 57 ++++++++++++++++++++++++++++++++++++++++
2 files changed, 58 insertions(+)
diff --git a/include/net/vhost-vdpa.h b/include/net/vhost-vdpa.h
index b81f9a6f2a..38d65e845d 100644
--- a/include/net/vhost-vdpa.h
+++ b/include/net/vhost-vdpa.h
@@ -15,6 +15,7 @@
#define TYPE_VHOST_VDPA "vhost-vdpa"
struct vhost_net *vhost_vdpa_get_vhost_net(NetClientState *nc);
+int vhost_vdpa_start(NetClientState *nc);
extern const int vdpa_feature_bits[];
diff --git a/net/vhost-vdpa.c b/net/vhost-vdpa.c
index bbe1830824..84e90f067a 100644
--- a/net/vhost-vdpa.c
+++ b/net/vhost-vdpa.c
@@ -375,6 +375,63 @@ static virtio_net_ctrl_ack vhost_vdpa_net_cvq_add(VhostShadowVirtqueue *svq,
return VIRTIO_NET_OK;
}
+int vhost_vdpa_start(NetClientState *nc)
+{
+ VhostVDPAState *s = DO_UPCAST(VhostVDPAState, nc, nc);
+ struct vhost_vdpa *v = &s->vhost_vdpa;
+ VirtIONet *n = VIRTIO_NET(v->dev->vdev);
+ uint64_t features = v->dev->vdev->host_features;
+ VhostShadowVirtqueue *svq;
+
+ if (!v->shadow_vqs_enabled) {
+ return 0;
+ }
+
+ if (v->dev->vq_index + v->dev->nvqs != v->dev->vq_index_end) {
+ /* Only interested in CVQ */
+ return 0;
+ }
+
+ assert(nc->info->type == NET_CLIENT_DRIVER_VHOST_VDPA);
+
+ svq = g_ptr_array_index(v->shadow_vqs, 0);
+ if (features & BIT_ULL(VIRTIO_NET_F_CTRL_MAC_ADDR)) {
+ const struct virtio_net_ctrl_hdr ctrl = {
+ .class = VIRTIO_NET_CTRL_MAC,
+ .cmd = VIRTIO_NET_CTRL_MAC_ADDR_SET,
+ };
+ uint8_t mac[6];
+ const struct iovec out[] = {
+ {
+ .iov_base = (void *)&ctrl,
+ .iov_len = sizeof(ctrl),
+ },{
+ .iov_base = mac,
+ .iov_len = sizeof(mac),
+ },
+ };
+ struct iovec dev_buffers[2] = {
+ { .iov_base = s->cvq_cmd_out_buffer },
+ { .iov_base = s->cvq_cmd_in_buffer },
+ };
+ bool ok;
+ virtio_net_ctrl_ack state;
+
+ ok = vhost_vdpa_net_cvq_map_sg(s, out, ARRAY_SIZE(out), dev_buffers);
+ if (unlikely(!ok)) {
+ return -1;
+ }
+
+ memcpy(mac, n->mac, sizeof(mac));
+ state = vhost_vdpa_net_cvq_add(svq, dev_buffers);
+ vhost_vdpa_cvq_unmap_buf(v, dev_buffers[0].iov_base);
+ vhost_vdpa_cvq_unmap_buf(v, dev_buffers[1].iov_base);
+ return state == VIRTIO_NET_OK ? 0 : 1;
+ }
+
+ return 0;
+}
+
/**
* Do not forward commands not supported by SVQ. Otherwise, the device could
* accept it and qemu would not know how to update the device model.
--
2.31.1
^ permalink raw reply related [flat|nested] 7+ messages in thread
* [PATCH v3 6/6] vdpa: Delete CVQ migration blocker
2022-07-22 8:56 [PATCH v3 0/6] NIC vhost-vdpa state restore via Shadow CVQ Eugenio Pérez
` (4 preceding siblings ...)
2022-07-22 8:56 ` [PATCH v3 5/6] vdpa: Add virtio-net mac address via CVQ at start Eugenio Pérez
@ 2022-07-22 8:56 ` Eugenio Pérez
5 siblings, 0 replies; 7+ messages in thread
From: Eugenio Pérez @ 2022-07-22 8:56 UTC (permalink / raw)
To: qemu-devel
Cc: Eric Blake, Gautam Dawar, Gonglei (Arei), Cornelia Huck,
Zhu Lingshan, Cindy Lu, Liuxiangdong, Laurent Vivier,
Markus Armbruster, Paolo Bonzini, Parav Pandit,
Harpreet Singh Anand, Jason Wang, Michael S. Tsirkin, Eli Cohen,
Stefan Hajnoczi, Stefano Garzarella
We can restore the device state in the destination via CVQ now. Remove
the migration blocker.
Signed-off-by: Eugenio Pérez <eperezma@redhat.com>
---
include/hw/virtio/vhost-vdpa.h | 1 -
hw/virtio/vhost-vdpa.c | 14 --------------
net/vhost-vdpa.c | 2 --
3 files changed, 17 deletions(-)
diff --git a/include/hw/virtio/vhost-vdpa.h b/include/hw/virtio/vhost-vdpa.h
index d10a89303e..1111d85643 100644
--- a/include/hw/virtio/vhost-vdpa.h
+++ b/include/hw/virtio/vhost-vdpa.h
@@ -35,7 +35,6 @@ typedef struct vhost_vdpa {
bool shadow_vqs_enabled;
/* IOVA mapping used by the Shadow Virtqueue */
VhostIOVATree *iova_tree;
- Error *migration_blocker;
GPtrArray *shadow_vqs;
const VhostShadowVirtqueueOps *shadow_vq_ops;
void *shadow_vq_ops_opaque;
diff --git a/hw/virtio/vhost-vdpa.c b/hw/virtio/vhost-vdpa.c
index 3ff9ce3501..9a2daef7e3 100644
--- a/hw/virtio/vhost-vdpa.c
+++ b/hw/virtio/vhost-vdpa.c
@@ -1023,13 +1023,6 @@ static bool vhost_vdpa_svqs_start(struct vhost_dev *dev)
return true;
}
- if (v->migration_blocker) {
- int r = migrate_add_blocker(v->migration_blocker, &err);
- if (unlikely(r < 0)) {
- return false;
- }
- }
-
for (i = 0; i < v->shadow_vqs->len; ++i) {
VirtQueue *vq = virtio_get_queue(dev->vdev, dev->vq_index + i);
VhostShadowVirtqueue *svq = g_ptr_array_index(v->shadow_vqs, i);
@@ -1072,10 +1065,6 @@ err:
vhost_svq_stop(svq);
}
- if (v->migration_blocker) {
- migrate_del_blocker(v->migration_blocker);
- }
-
return false;
}
@@ -1095,9 +1084,6 @@ static bool vhost_vdpa_svqs_stop(struct vhost_dev *dev)
}
}
- if (v->migration_blocker) {
- migrate_del_blocker(v->migration_blocker);
- }
return true;
}
diff --git a/net/vhost-vdpa.c b/net/vhost-vdpa.c
index 84e90f067a..82c115bee4 100644
--- a/net/vhost-vdpa.c
+++ b/net/vhost-vdpa.c
@@ -573,8 +573,6 @@ static NetClientState *net_vhost_vdpa_init(NetClientState *peer,
s->vhost_vdpa.shadow_vq_ops = &vhost_vdpa_net_svq_ops;
s->vhost_vdpa.shadow_vq_ops_opaque = s;
- error_setg(&s->vhost_vdpa.migration_blocker,
- "Migration disabled: vhost-vdpa uses CVQ.");
}
ret = vhost_vdpa_add(nc, (void *)&s->vhost_vdpa, queue_pair_index, nvqs);
if (ret) {
--
2.31.1
^ permalink raw reply related [flat|nested] 7+ messages in thread