* Re: [Qemu-devel] [PATCH v4] QEMUBH: make AioContext's bh re-entrant
2013-06-24 9:54 [Qemu-devel] [PATCH v4] QEMUBH: make AioContext's bh re-entrant Liu Ping Fan
@ 2013-06-24 8:01 ` Stefan Hajnoczi
0 siblings, 0 replies; 2+ messages in thread
From: Stefan Hajnoczi @ 2013-06-24 8:01 UTC (permalink / raw)
To: Liu Ping Fan; +Cc: Kevin Wolf, Paolo Bonzini, qemu-devel, Anthony Liguori
On Mon, Jun 24, 2013 at 05:54:26PM +0800, Liu Ping Fan wrote:
> BH will be used outside big lock, so introduce lock to protect
> between the writers, ie, bh's adders and deleter. The lock only
> affects the writers and bh's callback does not take this extra lock.
> Note that for the same AioContext, aio_bh_poll() can not run in
> parallel yet.
>
> Signed-off-by: Liu Ping Fan <pingfank@linux.vnet.ibm.com>
>
> --------
> v3->v4
> resolve memory order of bh->idle and ->scheduled
> add comments for qemu_bh_delete/cancel
>
>
> ---
> async.c | 32 ++++++++++++++++++++++++++++++--
> include/block/aio.h | 7 +++++++
> 2 files changed, 37 insertions(+), 2 deletions(-)
>
> diff --git a/async.c b/async.c
> index 90fe906..108d7c3 100644
> --- a/async.c
> +++ b/async.c
> @@ -47,11 +47,16 @@ QEMUBH *aio_bh_new(AioContext *ctx, QEMUBHFunc *cb, void *opaque)
> bh->ctx = ctx;
> bh->cb = cb;
> bh->opaque = opaque;
> + qemu_mutex_lock(&ctx->bh_lock);
> bh->next = ctx->first_bh;
> + /* Make sure the members ready before putting bh into list */
> + smp_wmb();
s/members/member is/
^ permalink raw reply [flat|nested] 2+ messages in thread
* [Qemu-devel] [PATCH v4] QEMUBH: make AioContext's bh re-entrant
@ 2013-06-24 9:54 Liu Ping Fan
2013-06-24 8:01 ` Stefan Hajnoczi
0 siblings, 1 reply; 2+ messages in thread
From: Liu Ping Fan @ 2013-06-24 9:54 UTC (permalink / raw)
To: qemu-devel; +Cc: Kevin Wolf, Paolo Bonzini, Stefan Hajnoczi, Anthony Liguori
BH will be used outside big lock, so introduce lock to protect
between the writers, ie, bh's adders and deleter. The lock only
affects the writers and bh's callback does not take this extra lock.
Note that for the same AioContext, aio_bh_poll() can not run in
parallel yet.
Signed-off-by: Liu Ping Fan <pingfank@linux.vnet.ibm.com>
--------
v3->v4
resolve memory order of bh->idle and ->scheduled
add comments for qemu_bh_delete/cancel
---
async.c | 32 ++++++++++++++++++++++++++++++--
include/block/aio.h | 7 +++++++
2 files changed, 37 insertions(+), 2 deletions(-)
diff --git a/async.c b/async.c
index 90fe906..108d7c3 100644
--- a/async.c
+++ b/async.c
@@ -47,11 +47,16 @@ QEMUBH *aio_bh_new(AioContext *ctx, QEMUBHFunc *cb, void *opaque)
bh->ctx = ctx;
bh->cb = cb;
bh->opaque = opaque;
+ qemu_mutex_lock(&ctx->bh_lock);
bh->next = ctx->first_bh;
+ /* Make sure the members ready before putting bh into list */
+ smp_wmb();
ctx->first_bh = bh;
+ qemu_mutex_unlock(&ctx->bh_lock);
return bh;
}
+/* Multiple occurrences of aio_bh_poll cannot be called concurrently */
int aio_bh_poll(AioContext *ctx)
{
QEMUBH *bh, **bhp, *next;
@@ -61,9 +66,15 @@ int aio_bh_poll(AioContext *ctx)
ret = 0;
for (bh = ctx->first_bh; bh; bh = next) {
+ /* Make sure fetching bh before accessing its members */
+ smp_read_barrier_depends();
next = bh->next;
if (!bh->deleted && bh->scheduled) {
bh->scheduled = 0;
+ /* Paired with write barrier in bh schedule to ensure reading for
+ * idle & callbacks coming after bh's scheduling.
+ */
+ smp_rmb();
if (!bh->idle)
ret = 1;
bh->idle = 0;
@@ -75,6 +86,7 @@ int aio_bh_poll(AioContext *ctx)
/* remove deleted bhs */
if (!ctx->walking_bh) {
+ qemu_mutex_lock(&ctx->bh_lock);
bhp = &ctx->first_bh;
while (*bhp) {
bh = *bhp;
@@ -85,6 +97,7 @@ int aio_bh_poll(AioContext *ctx)
bhp = &bh->next;
}
}
+ qemu_mutex_unlock(&ctx->bh_lock);
}
return ret;
@@ -94,24 +107,38 @@ void qemu_bh_schedule_idle(QEMUBH *bh)
{
if (bh->scheduled)
return;
- bh->scheduled = 1;
bh->idle = 1;
+ /* Make sure idle & any writes that are needed by the callback are done
+ * before the locations are read in the aio_bh_poll.
+ */
+ smp_wmb();
+ bh->scheduled = 1;
}
void qemu_bh_schedule(QEMUBH *bh)
{
if (bh->scheduled)
return;
- bh->scheduled = 1;
bh->idle = 0;
+ /* Make sure idle & any writes that are needed by the callback are done
+ * before the locations are read in the aio_bh_poll.
+ */
+ smp_wmb();
+ bh->scheduled = 1;
aio_notify(bh->ctx);
}
+
+/* This func is async.
+ */
void qemu_bh_cancel(QEMUBH *bh)
{
bh->scheduled = 0;
}
+/* This func is async.The bottom half will do the delete action at the finial
+ * end.
+ */
void qemu_bh_delete(QEMUBH *bh)
{
bh->scheduled = 0;
@@ -211,6 +238,7 @@ AioContext *aio_context_new(void)
ctx = (AioContext *) g_source_new(&aio_source_funcs, sizeof(AioContext));
ctx->pollfds = g_array_new(FALSE, FALSE, sizeof(GPollFD));
ctx->thread_pool = NULL;
+ qemu_mutex_init(&ctx->bh_lock);
event_notifier_init(&ctx->notifier, false);
aio_set_event_notifier(ctx, &ctx->notifier,
(EventNotifierHandler *)
diff --git a/include/block/aio.h b/include/block/aio.h
index 1836793..cc77771 100644
--- a/include/block/aio.h
+++ b/include/block/aio.h
@@ -17,6 +17,7 @@
#include "qemu-common.h"
#include "qemu/queue.h"
#include "qemu/event_notifier.h"
+#include "qemu/thread.h"
typedef struct BlockDriverAIOCB BlockDriverAIOCB;
typedef void BlockDriverCompletionFunc(void *opaque, int ret);
@@ -53,6 +54,8 @@ typedef struct AioContext {
*/
int walking_handlers;
+ /* lock to protect between bh's adders and deleter */
+ QemuMutex bh_lock;
/* Anchor of the list of Bottom Halves belonging to the context */
struct QEMUBH *first_bh;
@@ -127,6 +130,8 @@ void aio_notify(AioContext *ctx);
* aio_bh_poll: Poll bottom halves for an AioContext.
*
* These are internal functions used by the QEMU main loop.
+ * And notice that multiple occurrences of aio_bh_poll cannot
+ * be called concurrently
*/
int aio_bh_poll(AioContext *ctx);
@@ -163,6 +168,8 @@ void qemu_bh_cancel(QEMUBH *bh);
* Deleting a bottom half frees the memory that was allocated for it by
* qemu_bh_new. It also implies canceling the bottom half if it was
* scheduled.
+ * This func is async. The bottom half will do the delete action at the finial
+ * end.
*
* @bh: The bottom half to be deleted.
*/
--
1.8.1.4
^ permalink raw reply related [flat|nested] 2+ messages in thread
end of thread, other threads:[~2013-06-24 8:01 UTC | newest]
Thread overview: 2+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2013-06-24 9:54 [Qemu-devel] [PATCH v4] QEMUBH: make AioContext's bh re-entrant Liu Ping Fan
2013-06-24 8:01 ` Stefan Hajnoczi
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).