* [Qemu-devel] [PATCH for-2.5] virtio-9p: use QEMU thread pool
@ 2015-11-27 11:43 Paolo Bonzini
2015-11-27 12:42 ` Greg Kurz
0 siblings, 1 reply; 4+ messages in thread
From: Paolo Bonzini @ 2015-11-27 11:43 UTC (permalink / raw)
To: qemu-devel; +Cc: aneesh.kumar
The QEMU thread pool already has a mechanism to invoke callbacks in the main
thread. It does not need an EventNotifier and it is more efficient too.
Use it instead of GAsyncQueue + GThreadPool + glue.
As a side effect, it silences Coverity's complaint about an unchecked
return value for event_notifier_init.
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
---
hw/9pfs/virtio-9p-coth.c | 79 +++++++++++-----------------------------------
hw/9pfs/virtio-9p-coth.h | 9 +-----
hw/9pfs/virtio-9p-device.c | 4 ---
3 files changed, 20 insertions(+), 72 deletions(-)
diff --git a/hw/9pfs/virtio-9p-coth.c b/hw/9pfs/virtio-9p-coth.c
index 5057f8d..fb6e8f8 100644
--- a/hw/9pfs/virtio-9p-coth.c
+++ b/hw/9pfs/virtio-9p-coth.c
@@ -12,71 +12,30 @@
*
*/
-#include "fsdev/qemu-fsdev.h"
-#include "qemu/thread.h"
-#include "qemu/event_notifier.h"
+#include "qemu-common.h"
+#include "block/thread-pool.h"
#include "qemu/coroutine.h"
+#include "qemu/main-loop.h"
#include "virtio-9p-coth.h"
-/* v9fs glib thread pool */
-static V9fsThPool v9fs_pool;
+/* Called from QEMU I/O thread. */
+static void coroutine_enter_cb(void *opaque, int ret)
+{
+ Coroutine *co = opaque;
+ qemu_coroutine_enter(co, NULL);
+}
+
+/* Called from worker thread. */
+static int coroutine_enter_func(void *arg)
+{
+ Coroutine *co = arg;
+ qemu_coroutine_enter(co, NULL);
+ return 0;
+}
void co_run_in_worker_bh(void *opaque)
{
Coroutine *co = opaque;
- g_thread_pool_push(v9fs_pool.pool, co, NULL);
-}
-
-static void v9fs_qemu_process_req_done(EventNotifier *e)
-{
- Coroutine *co;
-
- event_notifier_test_and_clear(e);
-
- while ((co = g_async_queue_try_pop(v9fs_pool.completed)) != NULL) {
- qemu_coroutine_enter(co, NULL);
- }
-}
-
-static void v9fs_thread_routine(gpointer data, gpointer user_data)
-{
- Coroutine *co = data;
-
- qemu_coroutine_enter(co, NULL);
-
- g_async_queue_push(v9fs_pool.completed, co);
-
- event_notifier_set(&v9fs_pool.e);
-}
-
-int v9fs_init_worker_threads(void)
-{
- int ret = 0;
- V9fsThPool *p = &v9fs_pool;
- sigset_t set, oldset;
-
- sigfillset(&set);
- /* Leave signal handling to the iothread. */
- pthread_sigmask(SIG_SETMASK, &set, &oldset);
-
- p->pool = g_thread_pool_new(v9fs_thread_routine, p, -1, FALSE, NULL);
- if (!p->pool) {
- ret = -1;
- goto err_out;
- }
- p->completed = g_async_queue_new();
- if (!p->completed) {
- /*
- * We are going to terminate.
- * So don't worry about cleanup
- */
- ret = -1;
- goto err_out;
- }
- event_notifier_init(&p->e, 0);
-
- event_notifier_set_handler(&p->e, v9fs_qemu_process_req_done);
-err_out:
- pthread_sigmask(SIG_SETMASK, &oldset, NULL);
- return ret;
+ thread_pool_submit_aio(qemu_get_aio_context()->thread_pool,
+ coroutine_enter_func, co, coroutine_enter_cb, co);
}
diff --git a/hw/9pfs/virtio-9p-coth.h b/hw/9pfs/virtio-9p-coth.h
index 0fbe49a..535743f 100644
--- a/hw/9pfs/virtio-9p-coth.h
+++ b/hw/9pfs/virtio-9p-coth.h
@@ -20,13 +20,6 @@
#include "virtio-9p.h"
#include <glib.h>
-typedef struct V9fsThPool {
- EventNotifier e;
-
- GThreadPool *pool;
- GAsyncQueue *completed;
-} V9fsThPool;
-
/*
* we want to use bottom half because we want to make sure the below
* sequence of events.
@@ -45,7 +38,7 @@ typedef struct V9fsThPool {
qemu_bh_schedule(co_bh); \
/* \
* yield in qemu thread and re-enter back \
- * in glib worker thread \
+ * in worker thread \
*/ \
qemu_coroutine_yield(); \
qemu_bh_delete(co_bh); \
diff --git a/hw/9pfs/virtio-9p-device.c b/hw/9pfs/virtio-9p-device.c
index e3abcfa..944b5f5 100644
--- a/hw/9pfs/virtio-9p-device.c
+++ b/hw/9pfs/virtio-9p-device.c
@@ -116,10 +116,6 @@ static void virtio_9p_device_realize(DeviceState *dev, Error **errp)
" and export path:%s", s->fsconf.fsdev_id, s->ctx.fs_root);
goto out;
}
- if (v9fs_init_worker_threads() < 0) {
- error_setg(errp, "worker thread initialization failed");
- goto out;
- }
/*
* Check details of export path, We need to use fs driver
--
2.5.0
^ permalink raw reply related [flat|nested] 4+ messages in thread
* Re: [Qemu-devel] [PATCH for-2.5] virtio-9p: use QEMU thread pool
2015-11-27 11:43 [Qemu-devel] [PATCH for-2.5] virtio-9p: use QEMU thread pool Paolo Bonzini
@ 2015-11-27 12:42 ` Greg Kurz
2015-11-27 13:08 ` Paolo Bonzini
0 siblings, 1 reply; 4+ messages in thread
From: Greg Kurz @ 2015-11-27 12:42 UTC (permalink / raw)
To: Paolo Bonzini; +Cc: qemu-devel, aneesh.kumar
On Fri, 27 Nov 2015 12:43:06 +0100
Paolo Bonzini <pbonzini@redhat.com> wrote:
> The QEMU thread pool already has a mechanism to invoke callbacks in the main
> thread. It does not need an EventNotifier and it is more efficient too.
> Use it instead of GAsyncQueue + GThreadPool + glue.
>
> As a side effect, it silences Coverity's complaint about an unchecked
> return value for event_notifier_init.
>
And it makes the code a lot nicer too !
> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
> ---
I have just one minor remark in hw/9pfs/virtio-9p-coth.h but anyway:
Reviewed-by: Greg Kurz <gkurz@linux.vnet.ibm.com>
Tested-by: Greg Kurz <gkurz@linux.vnet.ibm.com>
> hw/9pfs/virtio-9p-coth.c | 79 +++++++++++-----------------------------------
> hw/9pfs/virtio-9p-coth.h | 9 +-----
> hw/9pfs/virtio-9p-device.c | 4 ---
> 3 files changed, 20 insertions(+), 72 deletions(-)
>
> diff --git a/hw/9pfs/virtio-9p-coth.c b/hw/9pfs/virtio-9p-coth.c
> index 5057f8d..fb6e8f8 100644
> --- a/hw/9pfs/virtio-9p-coth.c
> +++ b/hw/9pfs/virtio-9p-coth.c
> @@ -12,71 +12,30 @@
> *
> */
>
> -#include "fsdev/qemu-fsdev.h"
> -#include "qemu/thread.h"
> -#include "qemu/event_notifier.h"
> +#include "qemu-common.h"
> +#include "block/thread-pool.h"
> #include "qemu/coroutine.h"
> +#include "qemu/main-loop.h"
> #include "virtio-9p-coth.h"
>
> -/* v9fs glib thread pool */
> -static V9fsThPool v9fs_pool;
> +/* Called from QEMU I/O thread. */
> +static void coroutine_enter_cb(void *opaque, int ret)
> +{
> + Coroutine *co = opaque;
> + qemu_coroutine_enter(co, NULL);
> +}
> +
> +/* Called from worker thread. */
> +static int coroutine_enter_func(void *arg)
> +{
> + Coroutine *co = arg;
> + qemu_coroutine_enter(co, NULL);
> + return 0;
> +}
>
> void co_run_in_worker_bh(void *opaque)
> {
> Coroutine *co = opaque;
> - g_thread_pool_push(v9fs_pool.pool, co, NULL);
> -}
> -
> -static void v9fs_qemu_process_req_done(EventNotifier *e)
> -{
> - Coroutine *co;
> -
> - event_notifier_test_and_clear(e);
> -
> - while ((co = g_async_queue_try_pop(v9fs_pool.completed)) != NULL) {
> - qemu_coroutine_enter(co, NULL);
> - }
> -}
> -
> -static void v9fs_thread_routine(gpointer data, gpointer user_data)
> -{
> - Coroutine *co = data;
> -
> - qemu_coroutine_enter(co, NULL);
> -
> - g_async_queue_push(v9fs_pool.completed, co);
> -
> - event_notifier_set(&v9fs_pool.e);
> -}
> -
> -int v9fs_init_worker_threads(void)
> -{
> - int ret = 0;
> - V9fsThPool *p = &v9fs_pool;
> - sigset_t set, oldset;
> -
> - sigfillset(&set);
> - /* Leave signal handling to the iothread. */
> - pthread_sigmask(SIG_SETMASK, &set, &oldset);
> -
> - p->pool = g_thread_pool_new(v9fs_thread_routine, p, -1, FALSE, NULL);
> - if (!p->pool) {
> - ret = -1;
> - goto err_out;
> - }
> - p->completed = g_async_queue_new();
> - if (!p->completed) {
> - /*
> - * We are going to terminate.
> - * So don't worry about cleanup
> - */
> - ret = -1;
> - goto err_out;
> - }
> - event_notifier_init(&p->e, 0);
> -
> - event_notifier_set_handler(&p->e, v9fs_qemu_process_req_done);
> -err_out:
> - pthread_sigmask(SIG_SETMASK, &oldset, NULL);
> - return ret;
> + thread_pool_submit_aio(qemu_get_aio_context()->thread_pool,
> + coroutine_enter_func, co, coroutine_enter_cb, co);
> }
> diff --git a/hw/9pfs/virtio-9p-coth.h b/hw/9pfs/virtio-9p-coth.h
> index 0fbe49a..535743f 100644
> --- a/hw/9pfs/virtio-9p-coth.h
> +++ b/hw/9pfs/virtio-9p-coth.h
> @@ -20,13 +20,6 @@
> #include "virtio-9p.h"
> #include <glib.h>
>
glib.h is no more needed.
> -typedef struct V9fsThPool {
> - EventNotifier e;
> -
> - GThreadPool *pool;
> - GAsyncQueue *completed;
> -} V9fsThPool;
> -
> /*
> * we want to use bottom half because we want to make sure the below
> * sequence of events.
> @@ -45,7 +38,7 @@ typedef struct V9fsThPool {
> qemu_bh_schedule(co_bh); \
> /* \
> * yield in qemu thread and re-enter back \
> - * in glib worker thread \
> + * in worker thread \
> */ \
> qemu_coroutine_yield(); \
> qemu_bh_delete(co_bh); \
> diff --git a/hw/9pfs/virtio-9p-device.c b/hw/9pfs/virtio-9p-device.c
> index e3abcfa..944b5f5 100644
> --- a/hw/9pfs/virtio-9p-device.c
> +++ b/hw/9pfs/virtio-9p-device.c
> @@ -116,10 +116,6 @@ static void virtio_9p_device_realize(DeviceState *dev, Error **errp)
> " and export path:%s", s->fsconf.fsdev_id, s->ctx.fs_root);
> goto out;
> }
> - if (v9fs_init_worker_threads() < 0) {
> - error_setg(errp, "worker thread initialization failed");
> - goto out;
> - }
>
> /*
> * Check details of export path, We need to use fs driver
^ permalink raw reply [flat|nested] 4+ messages in thread
* Re: [Qemu-devel] [PATCH for-2.5] virtio-9p: use QEMU thread pool
2015-11-27 12:42 ` Greg Kurz
@ 2015-11-27 13:08 ` Paolo Bonzini
2015-11-27 14:06 ` Greg Kurz
0 siblings, 1 reply; 4+ messages in thread
From: Paolo Bonzini @ 2015-11-27 13:08 UTC (permalink / raw)
To: Greg Kurz; +Cc: qemu-devel, aneesh.kumar
On 27/11/2015 13:42, Greg Kurz wrote:
> On Fri, 27 Nov 2015 12:43:06 +0100
> Paolo Bonzini <pbonzini@redhat.com> wrote:
>
>> The QEMU thread pool already has a mechanism to invoke callbacks in the main
>> thread. It does not need an EventNotifier and it is more efficient too.
>> Use it instead of GAsyncQueue + GThreadPool + glue.
>>
>> As a side effect, it silences Coverity's complaint about an unchecked
>> return value for event_notifier_init.
>
> And it makes the code a lot nicer too !
Thanks. :)
>> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
>> ---
>
> I have just one minor remark in hw/9pfs/virtio-9p-coth.h but anyway:
>
> Reviewed-by: Greg Kurz <gkurz@linux.vnet.ibm.com>
> Tested-by: Greg Kurz <gkurz@linux.vnet.ibm.com>
Would you like to send a pull request for these patches yourself, and
possibly add yourself to MAINTAINERS? I can help you setting up git
(next week).
Paolo
^ permalink raw reply [flat|nested] 4+ messages in thread
* Re: [Qemu-devel] [PATCH for-2.5] virtio-9p: use QEMU thread pool
2015-11-27 13:08 ` Paolo Bonzini
@ 2015-11-27 14:06 ` Greg Kurz
0 siblings, 0 replies; 4+ messages in thread
From: Greg Kurz @ 2015-11-27 14:06 UTC (permalink / raw)
To: Paolo Bonzini; +Cc: qemu-devel, aneesh.kumar
On Fri, 27 Nov 2015 14:08:47 +0100
Paolo Bonzini <pbonzini@redhat.com> wrote:
>
>
> On 27/11/2015 13:42, Greg Kurz wrote:
> > On Fri, 27 Nov 2015 12:43:06 +0100
> > Paolo Bonzini <pbonzini@redhat.com> wrote:
> >
> >> The QEMU thread pool already has a mechanism to invoke callbacks in the main
> >> thread. It does not need an EventNotifier and it is more efficient too.
> >> Use it instead of GAsyncQueue + GThreadPool + glue.
> >>
> >> As a side effect, it silences Coverity's complaint about an unchecked
> >> return value for event_notifier_init.
> >
> > And it makes the code a lot nicer too !
>
> Thanks. :)
>
> >> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
> >> ---
> >
> > I have just one minor remark in hw/9pfs/virtio-9p-coth.h but anyway:
> >
> > Reviewed-by: Greg Kurz <gkurz@linux.vnet.ibm.com>
> > Tested-by: Greg Kurz <gkurz@linux.vnet.ibm.com>
>
> Would you like to send a pull request for these patches yourself, and
> possibly add yourself to MAINTAINERS? I can help you setting up git
> (next week).
>
> Paolo
>
I'd be glad to. I'll ping you next week then :)
Cheers.
--
Greg
^ permalink raw reply [flat|nested] 4+ messages in thread
end of thread, other threads:[~2015-11-27 14:07 UTC | newest]
Thread overview: 4+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2015-11-27 11:43 [Qemu-devel] [PATCH for-2.5] virtio-9p: use QEMU thread pool Paolo Bonzini
2015-11-27 12:42 ` Greg Kurz
2015-11-27 13:08 ` Paolo Bonzini
2015-11-27 14:06 ` Greg Kurz
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).