From: Peter Lieven <pl@dlh.net>
To: Corentin Chary <corentin.chary@gmail.com>
Cc: Jan Kiszka <jan.kiszka@siemens.com>,
Paolo Bonzini <pbonzini@redhat.com>,
qemu-devel <qemu-devel@nongnu.org>,
kvm@vger.kernel.org,
Anthony Liguori <aliguori@linux.vnet.ibm.com>
Subject: [Qemu-devel] Re: [PATCH 2/2] vnc: don't mess up with iohandlers in the vnc thread
Date: Thu, 10 Mar 2011 14:47:16 +0100 [thread overview]
Message-ID: <4D78D664.8010108@dlh.net> (raw)
In-Reply-To: <1299761979-15197-2-git-send-email-corentin.chary@gmail.com>
On 10.03.2011 13:59, Corentin Chary wrote:
> The threaded VNC servers messed up with QEMU fd handlers without
> any kind of locking, and that can cause some nasty race conditions.
>
> Using qemu_mutex_lock_iothread() won't work because vnc_dpy_cpy(),
> which will wait for the current job queue to finish, can be called with
> the iothread lock held.
>
> Instead, we now store the data in a temporary buffer, and use a socket
> pair to notify the main thread that new data is available. The data
> is not directly send through this socket because it would make the code
> more complex without any performance benefit.
>
> vnc_[un]lock_ouput() is still needed to access VncState members like
> abort, csock or jobs_buffer.
>
is this compatible with qemu-kvm?
thanks,
peter
> Thanks to Jan Kiszka for helping me solve this issue.
>
> Signed-off-by: Corentin Chary<corentin.chary@gmail.com>
> ---
> ui/vnc-jobs-async.c | 50 ++++++++++++++++++++++++++++++++------------------
> ui/vnc-jobs-sync.c | 4 ++++
> ui/vnc-jobs.h | 1 +
> ui/vnc.c | 16 ++++++++++++++++
> ui/vnc.h | 2 ++
> 5 files changed, 55 insertions(+), 18 deletions(-)
>
> diff --git a/ui/vnc-jobs-async.c b/ui/vnc-jobs-async.c
> index f596247..543b5a9 100644
> --- a/ui/vnc-jobs-async.c
> +++ b/ui/vnc-jobs-async.c
> @@ -28,6 +28,7 @@
>
> #include "vnc.h"
> #include "vnc-jobs.h"
> +#include "qemu_socket.h"
>
> /*
> * Locking:
> @@ -155,6 +156,24 @@ void vnc_jobs_join(VncState *vs)
> qemu_cond_wait(&queue->cond,&queue->mutex);
> }
> vnc_unlock_queue(queue);
> + vnc_jobs_consume_buffer(vs);
> +}
> +
> +void vnc_jobs_consume_buffer(VncState *vs)
> +{
> + bool flush;
> +
> + vnc_lock_output(vs);
> + if (vs->jobs_buffer.offset) {
> + vnc_write(vs, vs->jobs_buffer.buffer, vs->jobs_buffer.offset);
> + buffer_reset(&vs->jobs_buffer);
> + }
> + flush = vs->csock != -1&& vs->abort != true;
> + vnc_unlock_output(vs);
> +
> + if (flush) {
> + vnc_flush(vs);
> + }
> }
>
> /*
> @@ -197,7 +216,6 @@ static int vnc_worker_thread_loop(VncJobQueue *queue)
> VncState vs;
> int n_rectangles;
> int saved_offset;
> - bool flush;
>
> vnc_lock_queue(queue);
> while (QTAILQ_EMPTY(&queue->jobs)&& !queue->exit) {
> @@ -213,6 +231,7 @@ static int vnc_worker_thread_loop(VncJobQueue *queue)
>
> vnc_lock_output(job->vs);
> if (job->vs->csock == -1 || job->vs->abort == true) {
> + vnc_unlock_output(job->vs);
> goto disconnected;
> }
> vnc_unlock_output(job->vs);
> @@ -233,10 +252,6 @@ static int vnc_worker_thread_loop(VncJobQueue *queue)
>
> if (job->vs->csock == -1) {
> vnc_unlock_display(job->vs->vd);
> - /* output mutex must be locked before going to
> - * disconnected:
> - */
> - vnc_lock_output(job->vs);
> goto disconnected;
> }
>
> @@ -254,24 +269,23 @@ static int vnc_worker_thread_loop(VncJobQueue *queue)
> vs.output.buffer[saved_offset] = (n_rectangles>> 8)& 0xFF;
> vs.output.buffer[saved_offset + 1] = n_rectangles& 0xFF;
>
> - /* Switch back buffers */
> vnc_lock_output(job->vs);
> - if (job->vs->csock == -1) {
> - goto disconnected;
> - }
> -
> - vnc_write(job->vs, vs.output.buffer, vs.output.offset);
> + if (job->vs->csock != -1) {
> + int notify = !job->vs->jobs_buffer.offset;
>
> -disconnected:
> - /* Copy persistent encoding data */
> - vnc_async_encoding_end(job->vs,&vs);
> - flush = (job->vs->csock != -1&& job->vs->abort != true);
> - vnc_unlock_output(job->vs);
> + buffer_reserve(&job->vs->jobs_buffer, vs.output.offset);
> + buffer_append(&job->vs->jobs_buffer, vs.output.buffer,
> + vs.output.offset);
> + /* Copy persistent encoding data */
> + vnc_async_encoding_end(job->vs,&vs);
>
> - if (flush) {
> - vnc_flush(job->vs);
> + if (notify) {
> + send(job->vs->jobs_socks[1], (char []){1}, 1, 0);
> + }
> }
> + vnc_unlock_output(job->vs);
>
> +disconnected:
> vnc_lock_queue(queue);
> QTAILQ_REMOVE(&queue->jobs, job, next);
> vnc_unlock_queue(queue);
> diff --git a/ui/vnc-jobs-sync.c b/ui/vnc-jobs-sync.c
> index 49b77af..3a4d7df 100644
> --- a/ui/vnc-jobs-sync.c
> +++ b/ui/vnc-jobs-sync.c
> @@ -36,6 +36,10 @@ void vnc_jobs_join(VncState *vs)
> {
> }
>
> +void vnc_jobs_consume_buffer(VncState *vs)
> +{
> +}
> +
> VncJob *vnc_job_new(VncState *vs)
> {
> vs->job.vs = vs;
> diff --git a/ui/vnc-jobs.h b/ui/vnc-jobs.h
> index b8dab81..7c529b7 100644
> --- a/ui/vnc-jobs.h
> +++ b/ui/vnc-jobs.h
> @@ -37,6 +37,7 @@ void vnc_job_push(VncJob *job);
> bool vnc_has_job(VncState *vs);
> void vnc_jobs_clear(VncState *vs);
> void vnc_jobs_join(VncState *vs);
> +void vnc_jobs_consume_buffer(VncState *vs);
>
> #ifdef CONFIG_VNC_THREAD
>
> diff --git a/ui/vnc.c b/ui/vnc.c
> index 610f884..48a81a2 100644
> --- a/ui/vnc.c
> +++ b/ui/vnc.c
> @@ -1011,6 +1011,10 @@ static void vnc_disconnect_finish(VncState *vs)
>
> #ifdef CONFIG_VNC_THREAD
> qemu_mutex_destroy(&vs->output_mutex);
> + qemu_set_fd_handler2(vs->jobs_socks[0], NULL, NULL, NULL, vs);
> + close(vs->jobs_socks[0]);
> + close(vs->jobs_socks[1]);
> + buffer_free(&vs->jobs_buffer);
> #endif
> for (i = 0; i< VNC_STAT_ROWS; ++i) {
> qemu_free(vs->lossy_rect[i]);
> @@ -1226,6 +1230,16 @@ static long vnc_client_read_plain(VncState *vs)
> return ret;
> }
>
> +#ifdef CONFIG_VNC_THREAD
> +static void vnc_jobs_read(void *opaque)
> +{
> + VncState *vs = opaque;
> + char dummy;
> +
> + recv(vs->jobs_socks[0], (void *)&dummy, 1, 0);
> + vnc_jobs_consume_buffer(vs);
> +}
> +#endif
>
> /*
> * First function called whenever there is more data to be read from
> @@ -2525,6 +2539,8 @@ static void vnc_connect(VncDisplay *vd, int csock)
>
> #ifdef CONFIG_VNC_THREAD
> qemu_mutex_init(&vs->output_mutex);
> + qemu_socketpair(AF_UNIX, SOCK_STREAM, 0, vs->jobs_socks);
> + qemu_set_fd_handler2(vs->jobs_socks[0], NULL, vnc_jobs_read, NULL, vs);
> #endif
>
> QTAILQ_INSERT_HEAD(&vd->clients, vs, next);
> diff --git a/ui/vnc.h b/ui/vnc.h
> index 8a1e7b9..0f98f2e 100644
> --- a/ui/vnc.h
> +++ b/ui/vnc.h
> @@ -283,6 +283,8 @@ struct VncState
> VncJob job;
> #else
> QemuMutex output_mutex;
> + Buffer jobs_buffer;
> + int jobs_socks[2];
> #endif
>
> /* Encoding specific, if you add something here, don't forget to
next prev parent reply other threads:[~2011-03-10 13:47 UTC|newest]
Thread overview: 50+ messages / expand[flat|nested] mbox.gz Atom feed top
2011-03-08 22:53 [Qemu-devel] segmentation fault in qemu-kvm-0.14.0 Peter Lieven
2011-03-09 7:13 ` Corentin Chary
2011-03-09 7:26 ` Stefan Weil
2011-03-09 7:39 ` Michael Tokarev
2011-03-09 9:22 ` Stefan Weil
2011-03-09 10:00 ` Peter Lieven
2011-03-15 12:53 ` Peter Lieven
2011-03-15 18:52 ` Stefan Weil
2011-03-09 7:37 ` [Qemu-devel] " Jan Kiszka
2011-03-09 8:50 ` Corentin Chary
2011-03-09 9:04 ` Jan Kiszka
2011-03-09 9:54 ` Corentin Chary
2011-03-09 9:58 ` Jan Kiszka
2011-03-09 10:02 ` Jan Kiszka
2011-03-09 10:06 ` Corentin Chary
2011-03-09 10:12 ` Jan Kiszka
2011-03-09 10:14 ` Corentin Chary
2011-03-09 10:17 ` Jan Kiszka
2011-03-09 10:41 ` [Qemu-devel] [PATCH] vnc: threaded server depends on io-thread Corentin Chary
2011-03-09 10:50 ` [Qemu-devel] " Peter Lieven
2011-03-09 10:57 ` Corentin Chary
2011-03-09 11:05 ` Stefan Hajnoczi
2011-03-09 11:25 ` Jan Kiszka
2011-03-09 11:32 ` Peter Lieven
2011-03-09 11:33 ` Jan Kiszka
2011-03-09 11:42 ` Jan Kiszka
2011-03-09 12:50 ` Peter Lieven
2011-03-09 13:21 ` [Qemu-devel] [PATCH v2] " Corentin Chary
2011-03-09 13:42 ` [Qemu-devel] " Corentin Chary
2011-03-09 13:51 ` Paolo Bonzini
2011-03-09 13:59 ` Corentin Chary
2011-03-10 12:59 ` [Qemu-devel] [PATCH 1/2] sockets: add qemu_socketpair() Corentin Chary
2011-03-10 12:59 ` [Qemu-devel] [PATCH 2/2] vnc: don't mess up with iohandlers in the vnc thread Corentin Chary
2011-03-10 13:06 ` [Qemu-devel] " Paolo Bonzini
2011-03-10 13:45 ` Anthony Liguori
2011-03-10 13:54 ` Corentin Chary
2011-03-10 13:58 ` Paolo Bonzini
2011-03-10 13:56 ` Paolo Bonzini
2011-03-10 13:47 ` Peter Lieven [this message]
2011-03-10 15:13 ` [Qemu-devel] [PATCH v5] " Corentin Chary
2011-03-14 9:19 ` [Qemu-devel] " Corentin Chary
2011-03-14 9:55 ` Peter Lieven
2011-03-15 16:55 ` Peter Lieven
2011-03-15 18:07 ` Peter Lieven
2011-03-09 10:02 ` [Qemu-devel] Re: segmentation fault in qemu-kvm-0.14.0 Peter Lieven
2011-03-09 10:16 ` Peter Lieven
2011-03-09 10:20 ` Jan Kiszka
2011-03-09 10:31 ` Peter Lieven
2011-03-09 11:20 ` Paolo Bonzini
2011-03-09 11:44 ` Jan Kiszka
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=4D78D664.8010108@dlh.net \
--to=pl@dlh.net \
--cc=aliguori@linux.vnet.ibm.com \
--cc=corentin.chary@gmail.com \
--cc=jan.kiszka@siemens.com \
--cc=kvm@vger.kernel.org \
--cc=pbonzini@redhat.com \
--cc=qemu-devel@nongnu.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).