From: "Michael S. Tsirkin" <mst@redhat.com>
To: Li Feng <fengli@smartx.com>
Cc: "Raphael Norwitz" <raphael@enfabrica.net>,
"Kevin Wolf" <kwolf@redhat.com>,
"Hanna Reitz" <hreitz@redhat.com>,
"Paolo Bonzini" <pbonzini@redhat.com>,
"Fam Zheng" <fam@euphon.net>,
"Alex Bennée" <alex.bennee@linaro.org>,
"open list:Block layer core" <qemu-block@nongnu.org>,
"open list:All patches CC here" <qemu-devel@nongnu.org>,
"Yajun Wu" <yajunw@nvidia.com>,
"Stefano Garzarella" <sgarzare@redhat.com>
Subject: Re: [PATCH v2 1/2] Revert "vhost-user: fix lost reconnect"
Date: Mon, 13 May 2024 03:15:27 -0400 [thread overview]
Message-ID: <20240513031501-mutt-send-email-mst@kernel.org> (raw)
In-Reply-To: <20240513071052.129581-2-fengli@smartx.com>
On Mon, May 13, 2024 at 03:10:47PM +0800, Li Feng wrote:
> This reverts commit f02a4b8e6431598612466f76aac64ab492849abf.
>
include subject of reverted commit and motivation for the revert pls.
> Signed-off-by: Li Feng <fengli@smartx.com>
> ---
> hw/block/vhost-user-blk.c | 2 +-
> hw/scsi/vhost-user-scsi.c | 3 +--
> hw/virtio/vhost-user-base.c | 2 +-
> hw/virtio/vhost-user.c | 10 ++--------
> include/hw/virtio/vhost-user.h | 3 +--
> 5 files changed, 6 insertions(+), 14 deletions(-)
>
> diff --git a/hw/block/vhost-user-blk.c b/hw/block/vhost-user-blk.c
> index 9e6bbc6950..41d1ac3a5a 100644
> --- a/hw/block/vhost-user-blk.c
> +++ b/hw/block/vhost-user-blk.c
> @@ -384,7 +384,7 @@ static void vhost_user_blk_event(void *opaque, QEMUChrEvent event)
> case CHR_EVENT_CLOSED:
> /* defer close until later to avoid circular close */
> vhost_user_async_close(dev, &s->chardev, &s->dev,
> - vhost_user_blk_disconnect, vhost_user_blk_event);
> + vhost_user_blk_disconnect);
> break;
> case CHR_EVENT_BREAK:
> case CHR_EVENT_MUX_IN:
> diff --git a/hw/scsi/vhost-user-scsi.c b/hw/scsi/vhost-user-scsi.c
> index a63b1f4948..48a59e020e 100644
> --- a/hw/scsi/vhost-user-scsi.c
> +++ b/hw/scsi/vhost-user-scsi.c
> @@ -214,8 +214,7 @@ static void vhost_user_scsi_event(void *opaque, QEMUChrEvent event)
> case CHR_EVENT_CLOSED:
> /* defer close until later to avoid circular close */
> vhost_user_async_close(dev, &vs->conf.chardev, &vsc->dev,
> - vhost_user_scsi_disconnect,
> - vhost_user_scsi_event);
> + vhost_user_scsi_disconnect);
> break;
> case CHR_EVENT_BREAK:
> case CHR_EVENT_MUX_IN:
> diff --git a/hw/virtio/vhost-user-base.c b/hw/virtio/vhost-user-base.c
> index a83167191e..4b54255682 100644
> --- a/hw/virtio/vhost-user-base.c
> +++ b/hw/virtio/vhost-user-base.c
> @@ -254,7 +254,7 @@ static void vub_event(void *opaque, QEMUChrEvent event)
> case CHR_EVENT_CLOSED:
> /* defer close until later to avoid circular close */
> vhost_user_async_close(dev, &vub->chardev, &vub->vhost_dev,
> - vub_disconnect, vub_event);
> + vub_disconnect);
> break;
> case CHR_EVENT_BREAK:
> case CHR_EVENT_MUX_IN:
> diff --git a/hw/virtio/vhost-user.c b/hw/virtio/vhost-user.c
> index cdf9af4a4b..c929097e87 100644
> --- a/hw/virtio/vhost-user.c
> +++ b/hw/virtio/vhost-user.c
> @@ -2776,7 +2776,6 @@ typedef struct {
> DeviceState *dev;
> CharBackend *cd;
> struct vhost_dev *vhost;
> - IOEventHandler *event_cb;
> } VhostAsyncCallback;
>
> static void vhost_user_async_close_bh(void *opaque)
> @@ -2791,10 +2790,7 @@ static void vhost_user_async_close_bh(void *opaque)
> */
> if (vhost->vdev) {
> data->cb(data->dev);
> - } else if (data->event_cb) {
> - qemu_chr_fe_set_handlers(data->cd, NULL, NULL, data->event_cb,
> - NULL, data->dev, NULL, true);
> - }
> + }
>
> g_free(data);
> }
> @@ -2806,8 +2802,7 @@ static void vhost_user_async_close_bh(void *opaque)
> */
> void vhost_user_async_close(DeviceState *d,
> CharBackend *chardev, struct vhost_dev *vhost,
> - vu_async_close_fn cb,
> - IOEventHandler *event_cb)
> + vu_async_close_fn cb)
> {
> if (!runstate_check(RUN_STATE_SHUTDOWN)) {
> /*
> @@ -2823,7 +2818,6 @@ void vhost_user_async_close(DeviceState *d,
> data->dev = d;
> data->cd = chardev;
> data->vhost = vhost;
> - data->event_cb = event_cb;
>
> /* Disable any further notifications on the chardev */
> qemu_chr_fe_set_handlers(chardev,
> diff --git a/include/hw/virtio/vhost-user.h b/include/hw/virtio/vhost-user.h
> index d7c09ffd34..324cd8663a 100644
> --- a/include/hw/virtio/vhost-user.h
> +++ b/include/hw/virtio/vhost-user.h
> @@ -108,7 +108,6 @@ typedef void (*vu_async_close_fn)(DeviceState *cb);
>
> void vhost_user_async_close(DeviceState *d,
> CharBackend *chardev, struct vhost_dev *vhost,
> - vu_async_close_fn cb,
> - IOEventHandler *event_cb);
> + vu_async_close_fn cb);
>
> #endif
> --
> 2.45.0
next prev parent reply other threads:[~2024-05-13 7:16 UTC|newest]
Thread overview: 3+ messages / expand[flat|nested] mbox.gz Atom feed top
[not found] <20240513071052.129581-1-fengli@smartx.com>
2024-05-13 7:10 ` [PATCH v2 1/2] Revert "vhost-user: fix lost reconnect" Li Feng
2024-05-13 7:15 ` Michael S. Tsirkin [this message]
2024-05-13 7:10 ` [PATCH v2 2/2] vhost-user: fix lost reconnect again Li Feng
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20240513031501-mutt-send-email-mst@kernel.org \
--to=mst@redhat.com \
--cc=alex.bennee@linaro.org \
--cc=fam@euphon.net \
--cc=fengli@smartx.com \
--cc=hreitz@redhat.com \
--cc=kwolf@redhat.com \
--cc=pbonzini@redhat.com \
--cc=qemu-block@nongnu.org \
--cc=qemu-devel@nongnu.org \
--cc=raphael@enfabrica.net \
--cc=sgarzare@redhat.com \
--cc=yajunw@nvidia.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).