* [Qemu-devel] [PATCH v2] vhost-user: delay vhost_user_stop
@ 2017-02-27 10:18 Marc-André Lureau
2017-02-27 10:25 ` Paolo Bonzini
0 siblings, 1 reply; 2+ messages in thread
From: Marc-André Lureau @ 2017-02-27 10:18 UTC (permalink / raw)
To: qemu-devel; +Cc: mst, pbonzini, den, dgilbert, Marc-André Lureau
Since commit b0a335e351103bf92f3f9d0bd5759311be8156ac, a socket write
may trigger a disconnect events, calling vhost_user_stop() and clearing
all the vhost_dev strutures holding data that vhost.c functions expect
to remain valid. Delay the cleanup to keep the vhost_dev structure
valid during the vhost.c functions.
Signed-off-by: Marc-André Lureau <marcandre.lureau@redhat.com>
---
net/vhost-user.c | 56 ++++++++++++++++++++++++++++++++++++++++++++++++++------
1 file changed, 50 insertions(+), 6 deletions(-)
diff --git a/net/vhost-user.c b/net/vhost-user.c
index 77b8110f8c..028bf0cf5d 100644
--- a/net/vhost-user.c
+++ b/net/vhost-user.c
@@ -25,6 +25,7 @@ typedef struct VhostUserState {
guint watch;
uint64_t acked_features;
bool started;
+ QEMUBH *chr_closed_bh;
} VhostUserState;
VHostNetState *vhost_user_get_vhost_net(NetClientState *nc)
@@ -190,9 +191,45 @@ static gboolean net_vhost_user_watch(GIOChannel *chan, GIOCondition cond,
qemu_chr_fe_disconnect(&s->chr);
+ s->watch = 0;
return FALSE;
}
+static void net_vhost_user_event(void *opaque, int event);
+
+static void chr_closed_bh(void *opaque)
+{
+ const char *name = opaque;
+ NetClientState *ncs[MAX_QUEUE_NUM];
+ VhostUserState *s;
+ Error *err = NULL;
+ int queues;
+
+ queues = qemu_find_net_clients_except(name, ncs,
+ NET_CLIENT_DRIVER_NIC,
+ MAX_QUEUE_NUM);
+ assert(queues < MAX_QUEUE_NUM);
+
+ s = DO_UPCAST(VhostUserState, nc, ncs[0]);
+
+ qmp_set_link(name, false, &err);
+ vhost_user_stop(queues, ncs);
+ if (s->watch) {
+ g_source_remove(s->watch);
+ }
+ s->watch = 0;
+
+ qemu_bh_delete(s->chr_closed_bh);
+ s->chr_closed_bh = NULL;
+
+ qemu_chr_fe_set_handlers(&s->chr, NULL, NULL, net_vhost_user_event,
+ opaque, NULL, true);
+
+ if (err) {
+ error_report_err(err);
+ }
+}
+
static void net_vhost_user_event(void *opaque, int event)
{
const char *name = opaque;
@@ -212,20 +249,27 @@ static void net_vhost_user_event(void *opaque, int event)
trace_vhost_user_event(chr->label, event);
switch (event) {
case CHR_EVENT_OPENED:
- s->watch = qemu_chr_fe_add_watch(&s->chr, G_IO_HUP,
- net_vhost_user_watch, s);
if (vhost_user_start(queues, ncs, &s->chr) < 0) {
qemu_chr_fe_disconnect(&s->chr);
return;
}
+ s->watch = qemu_chr_fe_add_watch(&s->chr, G_IO_HUP,
+ net_vhost_user_watch, s);
qmp_set_link(name, true, &err);
+ s->chr_closed_bh = qemu_bh_new(chr_closed_bh, opaque);
s->started = true;
break;
case CHR_EVENT_CLOSED:
- qmp_set_link(name, false, &err);
- vhost_user_stop(queues, ncs);
- g_source_remove(s->watch);
- s->watch = 0;
+ /* a close event may happen during a read/write, but vhost
+ * code assumes the vhost_dev remains setup, so delay the
+ * stop & clear to idle.
+ * FIXME: better handle failure in vhost code, remove bh
+ */
+ if (s->chr_closed_bh) {
+ qemu_chr_fe_set_handlers(&s->chr, NULL, NULL, NULL,
+ NULL, NULL, false);
+ qemu_bh_schedule(s->chr_closed_bh);
+ }
break;
}
--
2.12.0.rc2.3.gc93709801
^ permalink raw reply related [flat|nested] 2+ messages in thread
* Re: [Qemu-devel] [PATCH v2] vhost-user: delay vhost_user_stop
2017-02-27 10:18 [Qemu-devel] [PATCH v2] vhost-user: delay vhost_user_stop Marc-André Lureau
@ 2017-02-27 10:25 ` Paolo Bonzini
0 siblings, 0 replies; 2+ messages in thread
From: Paolo Bonzini @ 2017-02-27 10:25 UTC (permalink / raw)
To: Marc-André Lureau, qemu-devel; +Cc: mst, den, dgilbert
On 27/02/2017 11:18, Marc-André Lureau wrote:
> Since commit b0a335e351103bf92f3f9d0bd5759311be8156ac, a socket write
> may trigger a disconnect events, calling vhost_user_stop() and clearing
> all the vhost_dev strutures holding data that vhost.c functions expect
> to remain valid. Delay the cleanup to keep the vhost_dev structure
> valid during the vhost.c functions.
>
> Signed-off-by: Marc-André Lureau <marcandre.lureau@redhat.com>
> ---
> net/vhost-user.c | 56 ++++++++++++++++++++++++++++++++++++++++++++++++++------
> 1 file changed, 50 insertions(+), 6 deletions(-)
>
> diff --git a/net/vhost-user.c b/net/vhost-user.c
> index 77b8110f8c..028bf0cf5d 100644
> --- a/net/vhost-user.c
> +++ b/net/vhost-user.c
> @@ -25,6 +25,7 @@ typedef struct VhostUserState {
> guint watch;
> uint64_t acked_features;
> bool started;
> + QEMUBH *chr_closed_bh;
> } VhostUserState;
>
> VHostNetState *vhost_user_get_vhost_net(NetClientState *nc)
> @@ -190,9 +191,45 @@ static gboolean net_vhost_user_watch(GIOChannel *chan, GIOCondition cond,
>
> qemu_chr_fe_disconnect(&s->chr);
>
> + s->watch = 0;
> return FALSE;
> }
>
> +static void net_vhost_user_event(void *opaque, int event);
> +
> +static void chr_closed_bh(void *opaque)
> +{
> + const char *name = opaque;
> + NetClientState *ncs[MAX_QUEUE_NUM];
> + VhostUserState *s;
> + Error *err = NULL;
> + int queues;
> +
> + queues = qemu_find_net_clients_except(name, ncs,
> + NET_CLIENT_DRIVER_NIC,
> + MAX_QUEUE_NUM);
> + assert(queues < MAX_QUEUE_NUM);
> +
> + s = DO_UPCAST(VhostUserState, nc, ncs[0]);
> +
> + qmp_set_link(name, false, &err);
> + vhost_user_stop(queues, ncs);
> + if (s->watch) {
> + g_source_remove(s->watch);
> + }
> + s->watch = 0;
> +
> + qemu_bh_delete(s->chr_closed_bh);
> + s->chr_closed_bh = NULL;
> +
> + qemu_chr_fe_set_handlers(&s->chr, NULL, NULL, net_vhost_user_event,
> + opaque, NULL, true);
> +
> + if (err) {
> + error_report_err(err);
> + }
> +}
> +
> static void net_vhost_user_event(void *opaque, int event)
> {
> const char *name = opaque;
> @@ -212,20 +249,27 @@ static void net_vhost_user_event(void *opaque, int event)
> trace_vhost_user_event(chr->label, event);
> switch (event) {
> case CHR_EVENT_OPENED:
> - s->watch = qemu_chr_fe_add_watch(&s->chr, G_IO_HUP,
> - net_vhost_user_watch, s);
> if (vhost_user_start(queues, ncs, &s->chr) < 0) {
> qemu_chr_fe_disconnect(&s->chr);
> return;
> }
> + s->watch = qemu_chr_fe_add_watch(&s->chr, G_IO_HUP,
> + net_vhost_user_watch, s);
> qmp_set_link(name, true, &err);
> + s->chr_closed_bh = qemu_bh_new(chr_closed_bh, opaque);
> s->started = true;
> break;
> case CHR_EVENT_CLOSED:
> - qmp_set_link(name, false, &err);
> - vhost_user_stop(queues, ncs);
> - g_source_remove(s->watch);
> - s->watch = 0;
> + /* a close event may happen during a read/write, but vhost
> + * code assumes the vhost_dev remains setup, so delay the
> + * stop & clear to idle.
> + * FIXME: better handle failure in vhost code, remove bh
> + */
> + if (s->chr_closed_bh) {
> + qemu_chr_fe_set_handlers(&s->chr, NULL, NULL, NULL,
> + NULL, NULL, false);
> + qemu_bh_schedule(s->chr_closed_bh);
> + }
The bottom half adds a small overhead to the event loop all the time,
even if it is not scheduled. Would it be possible to create it here
instead? You can have a s->state enum (OPENED, CLOSING, CLOSED for
example). You can even use aio_bh_schedule_oneshot to avoid having to
store the QEMUBH pointer.
Paolo
> break;
> }
>
>
^ permalink raw reply [flat|nested] 2+ messages in thread
end of thread, other threads:[~2017-02-27 10:25 UTC | newest]
Thread overview: 2+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2017-02-27 10:18 [Qemu-devel] [PATCH v2] vhost-user: delay vhost_user_stop Marc-André Lureau
2017-02-27 10:25 ` Paolo Bonzini
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).