From: Peter Lieven <pl@dlh.net>
To: Corentin Chary <corentin.chary@gmail.com>
Cc: Anthony Liguori <aliguori@linux.vnet.ibm.com>,
Paolo Bonzini <pbonzini@redhat.com>,
qemu-devel <qemu-devel@nongnu.org>,
kvm@vger.kernel.org, Jan Kiszka <jan.kiszka@siemens.com>
Subject: [Qemu-devel] Re: [PATCH v5] vnc: don't mess up with iohandlers in the vnc thread
Date: Tue, 15 Mar 2011 19:07:10 +0100 [thread overview]
Message-ID: <4D7FAACE.6060403@dlh.net> (raw)
In-Reply-To: <4D7F99EF.6010505@dlh.net>
On 15.03.2011 17:55, Peter Lieven wrote:
> On 14.03.2011 10:19, Corentin Chary wrote:
>> On Thu, Mar 10, 2011 at 3:13 PM, Corentin Chary
>> <corentin.chary@gmail.com> wrote:
>>> The threaded VNC servers messed up with QEMU fd handlers without
>>> any kind of locking, and that can cause some nasty race conditions.
>>>
>>> Using qemu_mutex_lock_iothread() won't work because vnc_dpy_cpy(),
>>> which will wait for the current job queue to finish, can be called with
>>> the iothread lock held.
>>>
>>> Instead, we now store the data in a temporary buffer, and use a bottom
>>> half to notify the main thread that new data is available.
>>>
>>> vnc_[un]lock_ouput() is still needed to access VncState members like
>>> abort, csock or jobs_buffer.
>>>
>>> Signed-off-by: Corentin Chary<corentin.chary@gmail.com>
>>> ---
>>> ui/vnc-jobs-async.c | 48
>>> +++++++++++++++++++++++++++++-------------------
>>> ui/vnc-jobs.h | 1 +
>>> ui/vnc.c | 12 ++++++++++++
>>> ui/vnc.h | 2 ++
>>> 4 files changed, 44 insertions(+), 19 deletions(-)
>>>
>>> diff --git a/ui/vnc-jobs-async.c b/ui/vnc-jobs-async.c
>>> index f596247..4438776 100644
>>> --- a/ui/vnc-jobs-async.c
>>> +++ b/ui/vnc-jobs-async.c
>>> @@ -28,6 +28,7 @@
>>>
>>> #include "vnc.h"
>>> #include "vnc-jobs.h"
>>> +#include "qemu_socket.h"
>>>
>>> /*
>>> * Locking:
>>> @@ -155,6 +156,24 @@ void vnc_jobs_join(VncState *vs)
>>> qemu_cond_wait(&queue->cond,&queue->mutex);
>>> }
>>> vnc_unlock_queue(queue);
>>> + vnc_jobs_consume_buffer(vs);
>>> +}
>>> +
>>> +void vnc_jobs_consume_buffer(VncState *vs)
>>> +{
>>> + bool flush;
>>> +
>>> + vnc_lock_output(vs);
>>> + if (vs->jobs_buffer.offset) {
>>> + vnc_write(vs, vs->jobs_buffer.buffer, vs->jobs_buffer.offset);
>>> + buffer_reset(&vs->jobs_buffer);
>>> + }
>>> + flush = vs->csock != -1&& vs->abort != true;
>>> + vnc_unlock_output(vs);
>>> +
>>> + if (flush) {
>>> + vnc_flush(vs);
>>> + }
>>> }
>>>
>>> /*
>>> @@ -197,7 +216,6 @@ static int vnc_worker_thread_loop(VncJobQueue
>>> *queue)
>>> VncState vs;
>>> int n_rectangles;
>>> int saved_offset;
>>> - bool flush;
>>>
>>> vnc_lock_queue(queue);
>>> while (QTAILQ_EMPTY(&queue->jobs)&& !queue->exit) {
>>> @@ -213,6 +231,7 @@ static int vnc_worker_thread_loop(VncJobQueue
>>> *queue)
>>>
>>> vnc_lock_output(job->vs);
>>> if (job->vs->csock == -1 || job->vs->abort == true) {
>>> + vnc_unlock_output(job->vs);
>>> goto disconnected;
>>> }
>>> vnc_unlock_output(job->vs);
>>> @@ -233,10 +252,6 @@ static int vnc_worker_thread_loop(VncJobQueue
>>> *queue)
>>>
>>> if (job->vs->csock == -1) {
>>> vnc_unlock_display(job->vs->vd);
>>> - /* output mutex must be locked before going to
>>> - * disconnected:
>>> - */
>>> - vnc_lock_output(job->vs);
>>> goto disconnected;
>>> }
>>>
>>> @@ -254,24 +269,19 @@ static int vnc_worker_thread_loop(VncJobQueue
>>> *queue)
>>> vs.output.buffer[saved_offset] = (n_rectangles>> 8)& 0xFF;
>>> vs.output.buffer[saved_offset + 1] = n_rectangles& 0xFF;
>>>
>>> - /* Switch back buffers */
>>> vnc_lock_output(job->vs);
>>> - if (job->vs->csock == -1) {
>>> - goto disconnected;
>>> + if (job->vs->csock != -1) {
>>> + buffer_reserve(&job->vs->jobs_buffer, vs.output.offset);
>>> + buffer_append(&job->vs->jobs_buffer, vs.output.buffer,
>>> + vs.output.offset);
>>> + /* Copy persistent encoding data */
>>> + vnc_async_encoding_end(job->vs,&vs);
>>> +
>>> + qemu_bh_schedule(job->vs->bh);
>>> }
>>> -
>>> - vnc_write(job->vs, vs.output.buffer, vs.output.offset);
>>> -
>>> -disconnected:
>>> - /* Copy persistent encoding data */
>>> - vnc_async_encoding_end(job->vs,&vs);
>>> - flush = (job->vs->csock != -1&& job->vs->abort != true);
>>> vnc_unlock_output(job->vs);
>>>
>>> - if (flush) {
>>> - vnc_flush(job->vs);
>>> - }
>>> -
>>> +disconnected:
>>> vnc_lock_queue(queue);
>>> QTAILQ_REMOVE(&queue->jobs, job, next);
>>> vnc_unlock_queue(queue);
>>> diff --git a/ui/vnc-jobs.h b/ui/vnc-jobs.h
>>> index b8dab81..4c661f9 100644
>>> --- a/ui/vnc-jobs.h
>>> +++ b/ui/vnc-jobs.h
>>> @@ -40,6 +40,7 @@ void vnc_jobs_join(VncState *vs);
>>>
>>> #ifdef CONFIG_VNC_THREAD
>>>
>>> +void vnc_jobs_consume_buffer(VncState *vs);
>>> void vnc_start_worker_thread(void);
>>> bool vnc_worker_thread_running(void);
>>> void vnc_stop_worker_thread(void);
>>> diff --git a/ui/vnc.c b/ui/vnc.c
>>> index 610f884..f28873b 100644
>>> --- a/ui/vnc.c
>>> +++ b/ui/vnc.c
>>> @@ -1011,7 +1011,10 @@ static void vnc_disconnect_finish(VncState *vs)
>>>
>>> #ifdef CONFIG_VNC_THREAD
>>> qemu_mutex_destroy(&vs->output_mutex);
>>> + qemu_bh_delete(vs->bh);
>>> + buffer_free(&vs->jobs_buffer);
>>> #endif
>>> +
>>> for (i = 0; i< VNC_STAT_ROWS; ++i) {
>>> qemu_free(vs->lossy_rect[i]);
>>> }
>>> @@ -1226,6 +1229,14 @@ static long vnc_client_read_plain(VncState *vs)
>>> return ret;
>>> }
>>>
>>> +#ifdef CONFIG_VNC_THREAD
>>> +static void vnc_jobs_bh(void *opaque)
>>> +{
>>> + VncState *vs = opaque;
>>> +
>>> + vnc_jobs_consume_buffer(vs);
>>> +}
>>> +#endif
>>>
>>> /*
>>> * First function called whenever there is more data to be read from
>>> @@ -2525,6 +2536,7 @@ static void vnc_connect(VncDisplay *vd, int
>>> csock)
>>>
>>> #ifdef CONFIG_VNC_THREAD
>>> qemu_mutex_init(&vs->output_mutex);
>>> + vs->bh = qemu_bh_new(vnc_jobs_bh, vs);
>>> #endif
>>>
>>> QTAILQ_INSERT_HEAD(&vd->clients, vs, next);
>>> diff --git a/ui/vnc.h b/ui/vnc.h
>>> index 8a1e7b9..bca5f87 100644
>>> --- a/ui/vnc.h
>>> +++ b/ui/vnc.h
>>> @@ -283,6 +283,8 @@ struct VncState
>>> VncJob job;
>>> #else
>>> QemuMutex output_mutex;
>>> + QEMUBH *bh;
>>> + Buffer jobs_buffer;
>>> #endif
>>>
>>> /* Encoding specific, if you add something here, don't forget to
>>> --
>>> 1.7.3.4
>>>
>>>
>> Paolo, Anthony, Jan, are you ok with that one ?
>> Peter Lieve, could you test that patch ?
> i have seen one segfault. unfortunatly, i had no debugger attached.
>
> but whats equally worse during stress test, i managed to trigger
> oom-killer.
> it seems we have a memory leak somewhere....
commit c53af37f375ce9c4999ff451c51173bdc1167e67
seems to fix this. remember this is not in 0.14.0 which could cause
a lot of trouble to 0.14.0 users when they do a lot of vnc work...
peter
>
> peter
>
>
>> Thanks
>>
>
next prev parent reply other threads:[~2011-03-15 18:07 UTC|newest]
Thread overview: 50+ messages / expand[flat|nested] mbox.gz Atom feed top
2011-03-08 22:53 [Qemu-devel] segmentation fault in qemu-kvm-0.14.0 Peter Lieven
2011-03-09 7:13 ` Corentin Chary
2011-03-09 7:26 ` Stefan Weil
2011-03-09 7:39 ` Michael Tokarev
2011-03-09 9:22 ` Stefan Weil
2011-03-09 10:00 ` Peter Lieven
2011-03-15 12:53 ` Peter Lieven
2011-03-15 18:52 ` Stefan Weil
2011-03-09 7:37 ` [Qemu-devel] " Jan Kiszka
2011-03-09 8:50 ` Corentin Chary
2011-03-09 9:04 ` Jan Kiszka
2011-03-09 9:54 ` Corentin Chary
2011-03-09 9:58 ` Jan Kiszka
2011-03-09 10:02 ` Jan Kiszka
2011-03-09 10:06 ` Corentin Chary
2011-03-09 10:12 ` Jan Kiszka
2011-03-09 10:14 ` Corentin Chary
2011-03-09 10:17 ` Jan Kiszka
2011-03-09 10:41 ` [Qemu-devel] [PATCH] vnc: threaded server depends on io-thread Corentin Chary
2011-03-09 10:50 ` [Qemu-devel] " Peter Lieven
2011-03-09 10:57 ` Corentin Chary
2011-03-09 11:05 ` Stefan Hajnoczi
2011-03-09 11:25 ` Jan Kiszka
2011-03-09 11:32 ` Peter Lieven
2011-03-09 11:33 ` Jan Kiszka
2011-03-09 11:42 ` Jan Kiszka
2011-03-09 12:50 ` Peter Lieven
2011-03-09 13:21 ` [Qemu-devel] [PATCH v2] " Corentin Chary
2011-03-09 13:42 ` [Qemu-devel] " Corentin Chary
2011-03-09 13:51 ` Paolo Bonzini
2011-03-09 13:59 ` Corentin Chary
2011-03-10 12:59 ` [Qemu-devel] [PATCH 1/2] sockets: add qemu_socketpair() Corentin Chary
2011-03-10 12:59 ` [Qemu-devel] [PATCH 2/2] vnc: don't mess up with iohandlers in the vnc thread Corentin Chary
2011-03-10 13:06 ` [Qemu-devel] " Paolo Bonzini
2011-03-10 13:45 ` Anthony Liguori
2011-03-10 13:54 ` Corentin Chary
2011-03-10 13:58 ` Paolo Bonzini
2011-03-10 13:56 ` Paolo Bonzini
2011-03-10 13:47 ` Peter Lieven
2011-03-10 15:13 ` [Qemu-devel] [PATCH v5] " Corentin Chary
2011-03-14 9:19 ` [Qemu-devel] " Corentin Chary
2011-03-14 9:55 ` Peter Lieven
2011-03-15 16:55 ` Peter Lieven
2011-03-15 18:07 ` Peter Lieven [this message]
2011-03-09 10:02 ` [Qemu-devel] Re: segmentation fault in qemu-kvm-0.14.0 Peter Lieven
2011-03-09 10:16 ` Peter Lieven
2011-03-09 10:20 ` Jan Kiszka
2011-03-09 10:31 ` Peter Lieven
2011-03-09 11:20 ` Paolo Bonzini
2011-03-09 11:44 ` Jan Kiszka
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=4D7FAACE.6060403@dlh.net \
--to=pl@dlh.net \
--cc=aliguori@linux.vnet.ibm.com \
--cc=corentin.chary@gmail.com \
--cc=jan.kiszka@siemens.com \
--cc=kvm@vger.kernel.org \
--cc=pbonzini@redhat.com \
--cc=qemu-devel@nongnu.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).