From mboxrd@z Thu Jan 1 00:00:00 1970 Received: from [140.186.70.92] (port=60219 helo=eggs.gnu.org) by lists.gnu.org with esmtp (Exim 4.43) id 1PzYeH-00031G-DS for qemu-devel@nongnu.org; Tue, 15 Mar 2011 14:07:15 -0400 Received: from Debian-exim by eggs.gnu.org with spam-scanned (Exim 4.71) (envelope-from ) id 1PzYeF-0007cM-UI for qemu-devel@nongnu.org; Tue, 15 Mar 2011 14:07:13 -0400 Received: from ssl.dlh.net ([91.198.192.8]:54491) by eggs.gnu.org with esmtp (Exim 4.71) (envelope-from ) id 1PzYeF-0007c2-Ge for qemu-devel@nongnu.org; Tue, 15 Mar 2011 14:07:11 -0400 Message-ID: <4D7FAACE.6060403@dlh.net> Date: Tue, 15 Mar 2011 19:07:10 +0100 From: Peter Lieven MIME-Version: 1.0 References: <4D7767C0.6060609@siemens.com> <1299769982-18815-1-git-send-email-corentin.chary@gmail.com> <4D7F99EF.6010505@dlh.net> In-Reply-To: <4D7F99EF.6010505@dlh.net> Content-Type: text/plain; charset=UTF-8; format=flowed Content-Transfer-Encoding: 7bit Subject: [Qemu-devel] Re: [PATCH v5] vnc: don't mess up with iohandlers in the vnc thread List-Id: qemu-devel.nongnu.org List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , To: Corentin Chary Cc: Anthony Liguori , Paolo Bonzini , qemu-devel , kvm@vger.kernel.org, Jan Kiszka On 15.03.2011 17:55, Peter Lieven wrote: > On 14.03.2011 10:19, Corentin Chary wrote: >> On Thu, Mar 10, 2011 at 3:13 PM, Corentin Chary >> wrote: >>> The threaded VNC servers messed up with QEMU fd handlers without >>> any kind of locking, and that can cause some nasty race conditions. >>> >>> Using qemu_mutex_lock_iothread() won't work because vnc_dpy_cpy(), >>> which will wait for the current job queue to finish, can be called with >>> the iothread lock held. >>> >>> Instead, we now store the data in a temporary buffer, and use a bottom >>> half to notify the main thread that new data is available. >>> >>> vnc_[un]lock_ouput() is still needed to access VncState members like >>> abort, csock or jobs_buffer. >>> >>> Signed-off-by: Corentin Chary >>> --- >>> ui/vnc-jobs-async.c | 48 >>> +++++++++++++++++++++++++++++------------------- >>> ui/vnc-jobs.h | 1 + >>> ui/vnc.c | 12 ++++++++++++ >>> ui/vnc.h | 2 ++ >>> 4 files changed, 44 insertions(+), 19 deletions(-) >>> >>> diff --git a/ui/vnc-jobs-async.c b/ui/vnc-jobs-async.c >>> index f596247..4438776 100644 >>> --- a/ui/vnc-jobs-async.c >>> +++ b/ui/vnc-jobs-async.c >>> @@ -28,6 +28,7 @@ >>> >>> #include "vnc.h" >>> #include "vnc-jobs.h" >>> +#include "qemu_socket.h" >>> >>> /* >>> * Locking: >>> @@ -155,6 +156,24 @@ void vnc_jobs_join(VncState *vs) >>> qemu_cond_wait(&queue->cond,&queue->mutex); >>> } >>> vnc_unlock_queue(queue); >>> + vnc_jobs_consume_buffer(vs); >>> +} >>> + >>> +void vnc_jobs_consume_buffer(VncState *vs) >>> +{ >>> + bool flush; >>> + >>> + vnc_lock_output(vs); >>> + if (vs->jobs_buffer.offset) { >>> + vnc_write(vs, vs->jobs_buffer.buffer, vs->jobs_buffer.offset); >>> + buffer_reset(&vs->jobs_buffer); >>> + } >>> + flush = vs->csock != -1&& vs->abort != true; >>> + vnc_unlock_output(vs); >>> + >>> + if (flush) { >>> + vnc_flush(vs); >>> + } >>> } >>> >>> /* >>> @@ -197,7 +216,6 @@ static int vnc_worker_thread_loop(VncJobQueue >>> *queue) >>> VncState vs; >>> int n_rectangles; >>> int saved_offset; >>> - bool flush; >>> >>> vnc_lock_queue(queue); >>> while (QTAILQ_EMPTY(&queue->jobs)&& !queue->exit) { >>> @@ -213,6 +231,7 @@ static int vnc_worker_thread_loop(VncJobQueue >>> *queue) >>> >>> vnc_lock_output(job->vs); >>> if (job->vs->csock == -1 || job->vs->abort == true) { >>> + vnc_unlock_output(job->vs); >>> goto disconnected; >>> } >>> vnc_unlock_output(job->vs); >>> @@ -233,10 +252,6 @@ static int vnc_worker_thread_loop(VncJobQueue >>> *queue) >>> >>> if (job->vs->csock == -1) { >>> vnc_unlock_display(job->vs->vd); >>> - /* output mutex must be locked before going to >>> - * disconnected: >>> - */ >>> - vnc_lock_output(job->vs); >>> goto disconnected; >>> } >>> >>> @@ -254,24 +269,19 @@ static int vnc_worker_thread_loop(VncJobQueue >>> *queue) >>> vs.output.buffer[saved_offset] = (n_rectangles>> 8)& 0xFF; >>> vs.output.buffer[saved_offset + 1] = n_rectangles& 0xFF; >>> >>> - /* Switch back buffers */ >>> vnc_lock_output(job->vs); >>> - if (job->vs->csock == -1) { >>> - goto disconnected; >>> + if (job->vs->csock != -1) { >>> + buffer_reserve(&job->vs->jobs_buffer, vs.output.offset); >>> + buffer_append(&job->vs->jobs_buffer, vs.output.buffer, >>> + vs.output.offset); >>> + /* Copy persistent encoding data */ >>> + vnc_async_encoding_end(job->vs,&vs); >>> + >>> + qemu_bh_schedule(job->vs->bh); >>> } >>> - >>> - vnc_write(job->vs, vs.output.buffer, vs.output.offset); >>> - >>> -disconnected: >>> - /* Copy persistent encoding data */ >>> - vnc_async_encoding_end(job->vs,&vs); >>> - flush = (job->vs->csock != -1&& job->vs->abort != true); >>> vnc_unlock_output(job->vs); >>> >>> - if (flush) { >>> - vnc_flush(job->vs); >>> - } >>> - >>> +disconnected: >>> vnc_lock_queue(queue); >>> QTAILQ_REMOVE(&queue->jobs, job, next); >>> vnc_unlock_queue(queue); >>> diff --git a/ui/vnc-jobs.h b/ui/vnc-jobs.h >>> index b8dab81..4c661f9 100644 >>> --- a/ui/vnc-jobs.h >>> +++ b/ui/vnc-jobs.h >>> @@ -40,6 +40,7 @@ void vnc_jobs_join(VncState *vs); >>> >>> #ifdef CONFIG_VNC_THREAD >>> >>> +void vnc_jobs_consume_buffer(VncState *vs); >>> void vnc_start_worker_thread(void); >>> bool vnc_worker_thread_running(void); >>> void vnc_stop_worker_thread(void); >>> diff --git a/ui/vnc.c b/ui/vnc.c >>> index 610f884..f28873b 100644 >>> --- a/ui/vnc.c >>> +++ b/ui/vnc.c >>> @@ -1011,7 +1011,10 @@ static void vnc_disconnect_finish(VncState *vs) >>> >>> #ifdef CONFIG_VNC_THREAD >>> qemu_mutex_destroy(&vs->output_mutex); >>> + qemu_bh_delete(vs->bh); >>> + buffer_free(&vs->jobs_buffer); >>> #endif >>> + >>> for (i = 0; i< VNC_STAT_ROWS; ++i) { >>> qemu_free(vs->lossy_rect[i]); >>> } >>> @@ -1226,6 +1229,14 @@ static long vnc_client_read_plain(VncState *vs) >>> return ret; >>> } >>> >>> +#ifdef CONFIG_VNC_THREAD >>> +static void vnc_jobs_bh(void *opaque) >>> +{ >>> + VncState *vs = opaque; >>> + >>> + vnc_jobs_consume_buffer(vs); >>> +} >>> +#endif >>> >>> /* >>> * First function called whenever there is more data to be read from >>> @@ -2525,6 +2536,7 @@ static void vnc_connect(VncDisplay *vd, int >>> csock) >>> >>> #ifdef CONFIG_VNC_THREAD >>> qemu_mutex_init(&vs->output_mutex); >>> + vs->bh = qemu_bh_new(vnc_jobs_bh, vs); >>> #endif >>> >>> QTAILQ_INSERT_HEAD(&vd->clients, vs, next); >>> diff --git a/ui/vnc.h b/ui/vnc.h >>> index 8a1e7b9..bca5f87 100644 >>> --- a/ui/vnc.h >>> +++ b/ui/vnc.h >>> @@ -283,6 +283,8 @@ struct VncState >>> VncJob job; >>> #else >>> QemuMutex output_mutex; >>> + QEMUBH *bh; >>> + Buffer jobs_buffer; >>> #endif >>> >>> /* Encoding specific, if you add something here, don't forget to >>> -- >>> 1.7.3.4 >>> >>> >> Paolo, Anthony, Jan, are you ok with that one ? >> Peter Lieve, could you test that patch ? > i have seen one segfault. unfortunatly, i had no debugger attached. > > but whats equally worse during stress test, i managed to trigger > oom-killer. > it seems we have a memory leak somewhere.... commit c53af37f375ce9c4999ff451c51173bdc1167e67 seems to fix this. remember this is not in 0.14.0 which could cause a lot of trouble to 0.14.0 users when they do a lot of vnc work... peter > > peter > > >> Thanks >> >