From: Li Feng <fengli@smartx.com>
To: "Michael S. Tsirkin" <mst@redhat.com>,
Raphael Norwitz <raphael.norwitz@nutanix.com>,
Kevin Wolf <kwolf@redhat.com>, Hanna Reitz <hreitz@redhat.com>,
Paolo Bonzini <pbonzini@redhat.com>, Fam Zheng <fam@euphon.net>,
qemu-block@nongnu.org (open list:Block layer core),
qemu-devel@nongnu.org (open list:All patches CC here)
Cc: lifeng1519@gmail.com, Li Feng <fengli@smartx.com>
Subject: [PATCH] vhost-user-scsi: support reconnect to backend
Date: Fri, 21 Jul 2023 18:51:59 +0800 [thread overview]
Message-ID: <20230721105205.1714449-1-fengli@smartx.com> (raw)
If the backend crashes and restarts, the device is broken.
This patch adds reconnect for vhost-user-scsi.
Tested with spdk backend.
Signed-off-by: Li Feng <fengli@smartx.com>
---
hw/block/vhost-user-blk.c | 2 -
hw/scsi/vhost-scsi-common.c | 27 ++---
hw/scsi/vhost-user-scsi.c | 163 +++++++++++++++++++++++++---
include/hw/virtio/vhost-user-scsi.h | 3 +
include/hw/virtio/vhost.h | 2 +
5 files changed, 165 insertions(+), 32 deletions(-)
diff --git a/hw/block/vhost-user-blk.c b/hw/block/vhost-user-blk.c
index eecf3f7a81..f250c740b5 100644
--- a/hw/block/vhost-user-blk.c
+++ b/hw/block/vhost-user-blk.c
@@ -32,8 +32,6 @@
#include "sysemu/sysemu.h"
#include "sysemu/runstate.h"
-#define REALIZE_CONNECTION_RETRIES 3
-
static const int user_feature_bits[] = {
VIRTIO_BLK_F_SIZE_MAX,
VIRTIO_BLK_F_SEG_MAX,
diff --git a/hw/scsi/vhost-scsi-common.c b/hw/scsi/vhost-scsi-common.c
index a06f01af26..08801886b8 100644
--- a/hw/scsi/vhost-scsi-common.c
+++ b/hw/scsi/vhost-scsi-common.c
@@ -52,16 +52,22 @@ int vhost_scsi_common_start(VHostSCSICommon *vsc)
vsc->dev.acked_features = vdev->guest_features;
- assert(vsc->inflight == NULL);
- vsc->inflight = g_new0(struct vhost_inflight, 1);
- ret = vhost_dev_get_inflight(&vsc->dev,
- vs->conf.virtqueue_size,
- vsc->inflight);
+ ret = vhost_dev_prepare_inflight(&vsc->dev, vdev);
if (ret < 0) {
- error_report("Error get inflight: %d", -ret);
+ error_report("Error setting inflight format: %d", -ret);
goto err_guest_notifiers;
}
+ if (!vsc->inflight->addr) {
+ ret = vhost_dev_get_inflight(&vsc->dev,
+ vs->conf.virtqueue_size,
+ vsc->inflight);
+ if (ret < 0) {
+ error_report("Error get inflight: %d", -ret);
+ goto err_guest_notifiers;
+ }
+ }
+
ret = vhost_dev_set_inflight(&vsc->dev, vsc->inflight);
if (ret < 0) {
error_report("Error set inflight: %d", -ret);
@@ -85,9 +91,6 @@ int vhost_scsi_common_start(VHostSCSICommon *vsc)
return ret;
err_guest_notifiers:
- g_free(vsc->inflight);
- vsc->inflight = NULL;
-
k->set_guest_notifiers(qbus->parent, vsc->dev.nvqs, false);
err_host_notifiers:
vhost_dev_disable_notifiers(&vsc->dev, vdev);
@@ -111,12 +114,6 @@ void vhost_scsi_common_stop(VHostSCSICommon *vsc)
}
assert(ret >= 0);
- if (vsc->inflight) {
- vhost_dev_free_inflight(vsc->inflight);
- g_free(vsc->inflight);
- vsc->inflight = NULL;
- }
-
vhost_dev_disable_notifiers(&vsc->dev, vdev);
}
diff --git a/hw/scsi/vhost-user-scsi.c b/hw/scsi/vhost-user-scsi.c
index ee99b19e7a..e0e88b0c42 100644
--- a/hw/scsi/vhost-user-scsi.c
+++ b/hw/scsi/vhost-user-scsi.c
@@ -89,14 +89,126 @@ static void vhost_dummy_handle_output(VirtIODevice *vdev, VirtQueue *vq)
{
}
+static int vhost_user_scsi_connect(DeviceState *dev, Error **errp)
+{
+ VirtIODevice *vdev = VIRTIO_DEVICE(dev);
+ VHostUserSCSI *s = VHOST_USER_SCSI(vdev);
+ VHostSCSICommon *vsc = VHOST_SCSI_COMMON(s);
+ VirtIOSCSICommon *vs = VIRTIO_SCSI_COMMON(dev);
+ int ret = 0;
+
+ if (s->connected) {
+ return 0;
+ }
+ s->connected = true;
+
+ vsc->dev.num_queues = vs->conf.num_queues;
+ vsc->dev.nvqs = VIRTIO_SCSI_VQ_NUM_FIXED + vs->conf.num_queues;
+ vsc->dev.vqs = s->vhost_vqs;
+ vsc->dev.vq_index = 0;
+ vsc->dev.backend_features = 0;
+
+ ret = vhost_dev_init(&vsc->dev, &s->vhost_user, VHOST_BACKEND_TYPE_USER, 0,
+ errp);
+ if (ret < 0) {
+ return ret;
+ }
+
+ /* restore vhost state */
+ if (virtio_device_started(vdev, vdev->status)) {
+ ret = vhost_scsi_common_start(vsc);
+ if (ret < 0) {
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static void vhost_user_scsi_event(void *opaque, QEMUChrEvent event);
+
+static void vhost_user_scsi_disconnect(DeviceState *dev)
+{
+ VirtIODevice *vdev = VIRTIO_DEVICE(dev);
+ VHostUserSCSI *s = VHOST_USER_SCSI(vdev);
+ VHostSCSICommon *vsc = VHOST_SCSI_COMMON(s);
+ VirtIOSCSICommon *vs = VIRTIO_SCSI_COMMON(dev);
+
+ if (!s->connected) {
+ return;
+ }
+ s->connected = false;
+
+ vhost_scsi_common_stop(vsc);
+
+ vhost_dev_cleanup(&vsc->dev);
+
+ /* Re-instate the event handler for new connections */
+ qemu_chr_fe_set_handlers(&vs->conf.chardev, NULL, NULL,
+ vhost_user_scsi_event, NULL, dev, NULL, true);
+}
+
+static void vhost_user_scsi_event(void *opaque, QEMUChrEvent event)
+{
+ DeviceState *dev = opaque;
+ VirtIODevice *vdev = VIRTIO_DEVICE(dev);
+ VHostUserSCSI *s = VHOST_USER_SCSI(vdev);
+ VHostSCSICommon *vsc = VHOST_SCSI_COMMON(s);
+ VirtIOSCSICommon *vs = VIRTIO_SCSI_COMMON(dev);
+ Error *local_err = NULL;
+
+ switch (event) {
+ case CHR_EVENT_OPENED:
+ if (vhost_user_scsi_connect(dev, &local_err) < 0) {
+ error_report_err(local_err);
+ qemu_chr_fe_disconnect(&vs->conf.chardev);
+ return;
+ }
+ break;
+ case CHR_EVENT_CLOSED:
+ /* defer close until later to avoid circular close */
+ vhost_user_async_close(dev, &vs->conf.chardev, &vsc->dev,
+ vhost_user_scsi_disconnect);
+ break;
+ case CHR_EVENT_BREAK:
+ case CHR_EVENT_MUX_IN:
+ case CHR_EVENT_MUX_OUT:
+ /* Ignore */
+ break;
+ }
+}
+
+static int vhost_user_scsi_realize_connect(VHostUserSCSI *s, Error **errp)
+{
+ DeviceState *dev = &s->parent_obj.parent_obj.parent_obj.parent_obj;
+ VirtIOSCSICommon *vs = VIRTIO_SCSI_COMMON(dev);
+ int ret;
+
+ s->connected = false;
+
+ ret = qemu_chr_fe_wait_connected(&vs->conf.chardev, errp);
+ if (ret < 0) {
+ return ret;
+ }
+
+ ret = vhost_user_scsi_connect(dev, errp);
+ if (ret < 0) {
+ qemu_chr_fe_disconnect(&vs->conf.chardev);
+ return ret;
+ }
+ assert(s->connected);
+
+ return 0;
+}
+
static void vhost_user_scsi_realize(DeviceState *dev, Error **errp)
{
VirtIOSCSICommon *vs = VIRTIO_SCSI_COMMON(dev);
VHostUserSCSI *s = VHOST_USER_SCSI(dev);
VHostSCSICommon *vsc = VHOST_SCSI_COMMON(s);
- struct vhost_virtqueue *vqs = NULL;
Error *err = NULL;
int ret;
+ int retries = REALIZE_CONNECTION_RETRIES;
if (!vs->conf.chardev.chr) {
error_setg(errp, "vhost-user-scsi: missing chardev");
@@ -112,21 +224,31 @@ static void vhost_user_scsi_realize(DeviceState *dev, Error **errp)
}
if (!vhost_user_init(&s->vhost_user, &vs->conf.chardev, errp)) {
- goto free_virtio;
+ goto free_vhost;
}
- vsc->dev.nvqs = VIRTIO_SCSI_VQ_NUM_FIXED + vs->conf.num_queues;
- vsc->dev.vqs = g_new0(struct vhost_virtqueue, vsc->dev.nvqs);
- vsc->dev.vq_index = 0;
- vsc->dev.backend_features = 0;
- vqs = vsc->dev.vqs;
+ vsc->inflight = g_new0(struct vhost_inflight, 1);
+ s->vhost_vqs = g_new0(struct vhost_virtqueue,
+ VIRTIO_SCSI_VQ_NUM_FIXED + vs->conf.num_queues);
+
+ assert(!*errp);
+ do {
+ if (*errp) {
+ error_prepend(errp, "Reconnecting after error: ");
+ error_report_err(*errp);
+ *errp = NULL;
+ }
+ ret = vhost_user_scsi_realize_connect(s, errp);
+ } while (ret < 0 && retries--);
- ret = vhost_dev_init(&vsc->dev, &s->vhost_user,
- VHOST_BACKEND_TYPE_USER, 0, errp);
if (ret < 0) {
- goto free_vhost;
+ goto free_vqs;
}
+ /* we're fully initialized, now we can operate, so add the handler */
+ qemu_chr_fe_set_handlers(&vs->conf.chardev, NULL, NULL,
+ vhost_user_scsi_event, NULL, (void *)dev,
+ NULL, true);
/* Channel and lun both are 0 for bootable vhost-user-scsi disk */
vsc->channel = 0;
vsc->lun = 0;
@@ -134,10 +256,15 @@ static void vhost_user_scsi_realize(DeviceState *dev, Error **errp)
return;
+free_vqs:
+ g_free(s->vhost_vqs);
+ s->vhost_vqs = NULL;
+ g_free(vsc->inflight);
+ vsc->inflight = NULL;
+
free_vhost:
vhost_user_cleanup(&s->vhost_user);
- g_free(vqs);
-free_virtio:
+
virtio_scsi_common_unrealize(dev);
}
@@ -146,16 +273,22 @@ static void vhost_user_scsi_unrealize(DeviceState *dev)
VirtIODevice *vdev = VIRTIO_DEVICE(dev);
VHostUserSCSI *s = VHOST_USER_SCSI(dev);
VHostSCSICommon *vsc = VHOST_SCSI_COMMON(s);
- struct vhost_virtqueue *vqs = vsc->dev.vqs;
+ VirtIOSCSICommon *vs = VIRTIO_SCSI_COMMON(dev);
/* This will stop the vhost backend. */
vhost_user_scsi_set_status(vdev, 0);
+ qemu_chr_fe_set_handlers(&vs->conf.chardev, NULL, NULL, NULL, NULL, NULL,
+ NULL, false);
vhost_dev_cleanup(&vsc->dev);
- g_free(vqs);
+ vhost_dev_free_inflight(vsc->inflight);
+ g_free(s->vhost_vqs);
+ s->vhost_vqs = NULL;
+ g_free(vsc->inflight);
+ vsc->inflight = NULL;
- virtio_scsi_common_unrealize(dev);
vhost_user_cleanup(&s->vhost_user);
+ virtio_scsi_common_unrealize(dev);
}
static Property vhost_user_scsi_properties[] = {
diff --git a/include/hw/virtio/vhost-user-scsi.h b/include/hw/virtio/vhost-user-scsi.h
index 521b08e559..c66acc68b7 100644
--- a/include/hw/virtio/vhost-user-scsi.h
+++ b/include/hw/virtio/vhost-user-scsi.h
@@ -29,6 +29,9 @@ OBJECT_DECLARE_SIMPLE_TYPE(VHostUserSCSI, VHOST_USER_SCSI)
struct VHostUserSCSI {
VHostSCSICommon parent_obj;
VhostUserState vhost_user;
+ bool connected;
+
+ struct vhost_virtqueue *vhost_vqs;
};
#endif /* VHOST_USER_SCSI_H */
diff --git a/include/hw/virtio/vhost.h b/include/hw/virtio/vhost.h
index 6a173cb9fa..b904346fe1 100644
--- a/include/hw/virtio/vhost.h
+++ b/include/hw/virtio/vhost.h
@@ -8,6 +8,8 @@
#define VHOST_F_DEVICE_IOTLB 63
#define VHOST_USER_F_PROTOCOL_FEATURES 30
+#define REALIZE_CONNECTION_RETRIES 3
+
/* Generic structures common for any vhost based device. */
struct vhost_inflight {
--
2.41.0
next reply other threads:[~2023-07-21 10:54 UTC|newest]
Thread overview: 66+ messages / expand[flat|nested] mbox.gz Atom feed top
2023-07-21 10:51 Li Feng [this message]
2023-07-24 17:21 ` [PATCH] vhost-user-scsi: support reconnect to backend Raphael Norwitz
2023-07-24 20:30 ` Michael S. Tsirkin
2023-07-25 10:19 ` Li Feng
2023-07-27 21:21 ` Raphael Norwitz
2023-07-28 7:48 ` Li Feng
2023-07-30 22:09 ` Raphael Norwitz
2023-07-31 11:32 ` Li Feng
2023-07-25 10:42 ` [PATCH v2 0/4] Implement reconnect for vhost-user-scsi Li Feng
2023-07-25 10:42 ` [PATCH v2 1/4] vhost: fix the fd leak Li Feng
2023-07-30 22:09 ` Raphael Norwitz
2023-07-25 10:42 ` [PATCH v2 2/4] vhost-user-common: send get_inflight_fd once Li Feng
2023-07-28 6:04 ` Michael S. Tsirkin
2023-07-28 7:49 ` Li Feng
2023-07-30 22:13 ` Raphael Norwitz
2023-07-31 11:38 ` Li Feng
2023-07-31 23:11 ` Raphael Norwitz
2023-07-25 10:42 ` [PATCH v2 3/4] vhost: move and rename the conn retry times Li Feng
2023-07-30 22:13 ` Raphael Norwitz
2023-07-25 10:42 ` [PATCH v2 4/4] vhost-user-scsi: support reconnect to backend Li Feng
2023-07-25 10:58 ` Li Feng
2023-07-30 22:14 ` Raphael Norwitz
2023-07-31 10:24 ` Li Feng
2023-07-31 12:10 ` [PATCH v3 0/5] Implement reconnect for vhost-user-scsi Li Feng
2023-07-31 12:10 ` [PATCH v3 1/5] vhost: fix the fd leak Li Feng
2023-08-03 13:41 ` Fiona Ebner
2023-07-31 12:10 ` [PATCH v3 2/5] vhost-user-common: send get_inflight_fd once Li Feng
2023-07-31 23:34 ` Raphael Norwitz
2023-07-31 12:10 ` [PATCH v3 3/5] vhost: move and rename the conn retry times Li Feng
2023-07-31 12:10 ` [PATCH v3 4/5] vhost-user-scsi: support reconnect to backend Li Feng
2023-07-31 23:35 ` Raphael Norwitz
2023-09-01 12:00 ` Markus Armbruster
2023-09-12 8:29 ` Li Feng
2023-07-31 12:10 ` [PATCH v3 5/5] vhost-user-scsi: start vhost when guest kicks Li Feng
2023-07-31 23:35 ` Raphael Norwitz
2023-09-01 11:44 ` Markus Armbruster
2023-09-12 7:53 ` Li Feng
2023-09-12 8:52 ` [PATCH v4 0/5] Implement reconnect for vhost-user-scsi Li Feng
2023-09-12 8:52 ` [PATCH v4 1/5] vhost-user-common: send get_inflight_fd once Li Feng
2023-09-12 8:52 ` [PATCH v4 2/5] vhost: move and rename the conn retry times Li Feng
2023-09-12 8:52 ` [PATCH v4 3/5] vhost-user-scsi: support reconnect to backend Li Feng
2023-09-12 8:52 ` [PATCH v4 4/5] vhost-user-scsi: start vhost when guest kicks Li Feng
2023-09-12 8:52 ` [PATCH v4 5/5] vhost-user: fix lost reconnect Li Feng
2023-09-19 11:40 ` [PATCH v5 0/5] Implement reconnect for vhost-user-scsi Li Feng
2023-09-19 11:40 ` [PATCH v5 1/5] vhost-user-common: send get_inflight_fd once Li Feng
2023-09-19 11:40 ` [PATCH v5 2/5] vhost: move and rename the conn retry times Li Feng
2023-09-19 11:40 ` [PATCH v5 3/5] vhost-user-scsi: support reconnect to backend Li Feng
2023-09-19 11:40 ` [PATCH v5 4/5] vhost-user-scsi: start vhost when guest kicks Li Feng
2023-09-19 11:40 ` [PATCH v5 5/5] vhost-user: fix lost reconnect Li Feng
2023-09-22 11:46 ` [PATCH v6 0/5] Implement reconnect for vhost-user-scsi Li Feng
2023-09-22 11:46 ` [PATCH v6 1/5] vhost-user-common: send get_inflight_fd once Li Feng
2023-09-29 0:54 ` Raphael Norwitz
2023-10-08 8:49 ` Li Feng
2023-10-08 8:50 ` Michael S. Tsirkin
2023-10-08 8:53 ` Li Feng
2023-09-22 11:46 ` [PATCH v6 2/5] vhost: move and rename the conn retry times Li Feng
2023-09-22 11:46 ` [PATCH v6 3/5] vhost-user-scsi: support reconnect to backend Li Feng
2023-09-29 0:55 ` Raphael Norwitz
2023-10-08 8:45 ` Li Feng
2023-09-22 11:46 ` [PATCH v6 4/5] vhost-user-scsi: start vhost when guest kicks Li Feng
2023-09-29 0:55 ` Raphael Norwitz
2023-09-22 11:46 ` [PATCH v6 5/5] vhost-user: fix lost reconnect Li Feng
2023-09-29 0:55 ` Raphael Norwitz
2023-09-29 0:55 ` Raphael Norwitz
2023-10-08 8:49 ` [PATCH v6 0/5] Implement reconnect for vhost-user-scsi Michael S. Tsirkin
2023-10-08 8:53 ` Li Feng
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20230721105205.1714449-1-fengli@smartx.com \
--to=fengli@smartx.com \
--cc=fam@euphon.net \
--cc=hreitz@redhat.com \
--cc=kwolf@redhat.com \
--cc=lifeng1519@gmail.com \
--cc=mst@redhat.com \
--cc=pbonzini@redhat.com \
--cc=qemu-block@nongnu.org \
--cc=qemu-devel@nongnu.org \
--cc=raphael.norwitz@nutanix.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).