From mboxrd@z Thu Jan 1 00:00:00 1970 Received: from [140.186.70.92] (port=39015 helo=eggs.gnu.org) by lists.gnu.org with esmtp (Exim 4.43) id 1PxgD1-0004GU-LA for qemu-devel@nongnu.org; Thu, 10 Mar 2011 08:47:21 -0500 Received: from Debian-exim by eggs.gnu.org with spam-scanned (Exim 4.71) (envelope-from ) id 1PxgD0-0001IJ-Cj for qemu-devel@nongnu.org; Thu, 10 Mar 2011 08:47:19 -0500 Received: from ssl.dlh.net ([91.198.192.8]:43440) by eggs.gnu.org with esmtp (Exim 4.71) (envelope-from ) id 1PxgD0-0001I6-4q for qemu-devel@nongnu.org; Thu, 10 Mar 2011 08:47:18 -0500 Message-ID: <4D78D664.8010108@dlh.net> Date: Thu, 10 Mar 2011 14:47:16 +0100 From: Peter Lieven MIME-Version: 1.0 References: <4D7767C0.6060609@siemens.com> <1299761979-15197-2-git-send-email-corentin.chary@gmail.com> In-Reply-To: <1299761979-15197-2-git-send-email-corentin.chary@gmail.com> Content-Type: text/plain; charset=ISO-8859-1; format=flowed Content-Transfer-Encoding: 7bit Subject: [Qemu-devel] Re: [PATCH 2/2] vnc: don't mess up with iohandlers in the vnc thread List-Id: qemu-devel.nongnu.org List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , To: Corentin Chary Cc: Jan Kiszka , Paolo Bonzini , qemu-devel , kvm@vger.kernel.org, Anthony Liguori On 10.03.2011 13:59, Corentin Chary wrote: > The threaded VNC servers messed up with QEMU fd handlers without > any kind of locking, and that can cause some nasty race conditions. > > Using qemu_mutex_lock_iothread() won't work because vnc_dpy_cpy(), > which will wait for the current job queue to finish, can be called with > the iothread lock held. > > Instead, we now store the data in a temporary buffer, and use a socket > pair to notify the main thread that new data is available. The data > is not directly send through this socket because it would make the code > more complex without any performance benefit. > > vnc_[un]lock_ouput() is still needed to access VncState members like > abort, csock or jobs_buffer. > is this compatible with qemu-kvm? thanks, peter > Thanks to Jan Kiszka for helping me solve this issue. > > Signed-off-by: Corentin Chary > --- > ui/vnc-jobs-async.c | 50 ++++++++++++++++++++++++++++++++------------------ > ui/vnc-jobs-sync.c | 4 ++++ > ui/vnc-jobs.h | 1 + > ui/vnc.c | 16 ++++++++++++++++ > ui/vnc.h | 2 ++ > 5 files changed, 55 insertions(+), 18 deletions(-) > > diff --git a/ui/vnc-jobs-async.c b/ui/vnc-jobs-async.c > index f596247..543b5a9 100644 > --- a/ui/vnc-jobs-async.c > +++ b/ui/vnc-jobs-async.c > @@ -28,6 +28,7 @@ > > #include "vnc.h" > #include "vnc-jobs.h" > +#include "qemu_socket.h" > > /* > * Locking: > @@ -155,6 +156,24 @@ void vnc_jobs_join(VncState *vs) > qemu_cond_wait(&queue->cond,&queue->mutex); > } > vnc_unlock_queue(queue); > + vnc_jobs_consume_buffer(vs); > +} > + > +void vnc_jobs_consume_buffer(VncState *vs) > +{ > + bool flush; > + > + vnc_lock_output(vs); > + if (vs->jobs_buffer.offset) { > + vnc_write(vs, vs->jobs_buffer.buffer, vs->jobs_buffer.offset); > + buffer_reset(&vs->jobs_buffer); > + } > + flush = vs->csock != -1&& vs->abort != true; > + vnc_unlock_output(vs); > + > + if (flush) { > + vnc_flush(vs); > + } > } > > /* > @@ -197,7 +216,6 @@ static int vnc_worker_thread_loop(VncJobQueue *queue) > VncState vs; > int n_rectangles; > int saved_offset; > - bool flush; > > vnc_lock_queue(queue); > while (QTAILQ_EMPTY(&queue->jobs)&& !queue->exit) { > @@ -213,6 +231,7 @@ static int vnc_worker_thread_loop(VncJobQueue *queue) > > vnc_lock_output(job->vs); > if (job->vs->csock == -1 || job->vs->abort == true) { > + vnc_unlock_output(job->vs); > goto disconnected; > } > vnc_unlock_output(job->vs); > @@ -233,10 +252,6 @@ static int vnc_worker_thread_loop(VncJobQueue *queue) > > if (job->vs->csock == -1) { > vnc_unlock_display(job->vs->vd); > - /* output mutex must be locked before going to > - * disconnected: > - */ > - vnc_lock_output(job->vs); > goto disconnected; > } > > @@ -254,24 +269,23 @@ static int vnc_worker_thread_loop(VncJobQueue *queue) > vs.output.buffer[saved_offset] = (n_rectangles>> 8)& 0xFF; > vs.output.buffer[saved_offset + 1] = n_rectangles& 0xFF; > > - /* Switch back buffers */ > vnc_lock_output(job->vs); > - if (job->vs->csock == -1) { > - goto disconnected; > - } > - > - vnc_write(job->vs, vs.output.buffer, vs.output.offset); > + if (job->vs->csock != -1) { > + int notify = !job->vs->jobs_buffer.offset; > > -disconnected: > - /* Copy persistent encoding data */ > - vnc_async_encoding_end(job->vs,&vs); > - flush = (job->vs->csock != -1&& job->vs->abort != true); > - vnc_unlock_output(job->vs); > + buffer_reserve(&job->vs->jobs_buffer, vs.output.offset); > + buffer_append(&job->vs->jobs_buffer, vs.output.buffer, > + vs.output.offset); > + /* Copy persistent encoding data */ > + vnc_async_encoding_end(job->vs,&vs); > > - if (flush) { > - vnc_flush(job->vs); > + if (notify) { > + send(job->vs->jobs_socks[1], (char []){1}, 1, 0); > + } > } > + vnc_unlock_output(job->vs); > > +disconnected: > vnc_lock_queue(queue); > QTAILQ_REMOVE(&queue->jobs, job, next); > vnc_unlock_queue(queue); > diff --git a/ui/vnc-jobs-sync.c b/ui/vnc-jobs-sync.c > index 49b77af..3a4d7df 100644 > --- a/ui/vnc-jobs-sync.c > +++ b/ui/vnc-jobs-sync.c > @@ -36,6 +36,10 @@ void vnc_jobs_join(VncState *vs) > { > } > > +void vnc_jobs_consume_buffer(VncState *vs) > +{ > +} > + > VncJob *vnc_job_new(VncState *vs) > { > vs->job.vs = vs; > diff --git a/ui/vnc-jobs.h b/ui/vnc-jobs.h > index b8dab81..7c529b7 100644 > --- a/ui/vnc-jobs.h > +++ b/ui/vnc-jobs.h > @@ -37,6 +37,7 @@ void vnc_job_push(VncJob *job); > bool vnc_has_job(VncState *vs); > void vnc_jobs_clear(VncState *vs); > void vnc_jobs_join(VncState *vs); > +void vnc_jobs_consume_buffer(VncState *vs); > > #ifdef CONFIG_VNC_THREAD > > diff --git a/ui/vnc.c b/ui/vnc.c > index 610f884..48a81a2 100644 > --- a/ui/vnc.c > +++ b/ui/vnc.c > @@ -1011,6 +1011,10 @@ static void vnc_disconnect_finish(VncState *vs) > > #ifdef CONFIG_VNC_THREAD > qemu_mutex_destroy(&vs->output_mutex); > + qemu_set_fd_handler2(vs->jobs_socks[0], NULL, NULL, NULL, vs); > + close(vs->jobs_socks[0]); > + close(vs->jobs_socks[1]); > + buffer_free(&vs->jobs_buffer); > #endif > for (i = 0; i< VNC_STAT_ROWS; ++i) { > qemu_free(vs->lossy_rect[i]); > @@ -1226,6 +1230,16 @@ static long vnc_client_read_plain(VncState *vs) > return ret; > } > > +#ifdef CONFIG_VNC_THREAD > +static void vnc_jobs_read(void *opaque) > +{ > + VncState *vs = opaque; > + char dummy; > + > + recv(vs->jobs_socks[0], (void *)&dummy, 1, 0); > + vnc_jobs_consume_buffer(vs); > +} > +#endif > > /* > * First function called whenever there is more data to be read from > @@ -2525,6 +2539,8 @@ static void vnc_connect(VncDisplay *vd, int csock) > > #ifdef CONFIG_VNC_THREAD > qemu_mutex_init(&vs->output_mutex); > + qemu_socketpair(AF_UNIX, SOCK_STREAM, 0, vs->jobs_socks); > + qemu_set_fd_handler2(vs->jobs_socks[0], NULL, vnc_jobs_read, NULL, vs); > #endif > > QTAILQ_INSERT_HEAD(&vd->clients, vs, next); > diff --git a/ui/vnc.h b/ui/vnc.h > index 8a1e7b9..0f98f2e 100644 > --- a/ui/vnc.h > +++ b/ui/vnc.h > @@ -283,6 +283,8 @@ struct VncState > VncJob job; > #else > QemuMutex output_mutex; > + Buffer jobs_buffer; > + int jobs_socks[2]; > #endif > > /* Encoding specific, if you add something here, don't forget to