From mboxrd@z Thu Jan 1 00:00:00 1970 Received: from eggs.gnu.org ([2001:4830:134:3::10]:47408) by lists.gnu.org with esmtp (Exim 4.71) (envelope-from ) id 1UnKC3-0000rl-Et for qemu-devel@nongnu.org; Thu, 13 Jun 2013 22:56:52 -0400 Received: from Debian-exim by eggs.gnu.org with spam-scanned (Exim 4.71) (envelope-from ) id 1UnKC2-0006iK-Hy for qemu-devel@nongnu.org; Thu, 13 Jun 2013 22:56:51 -0400 Received: from mail-ie0-x236.google.com ([2607:f8b0:4001:c03::236]:33635) by eggs.gnu.org with esmtp (Exim 4.71) (envelope-from ) id 1UnKC2-0006iC-Cs for qemu-devel@nongnu.org; Thu, 13 Jun 2013 22:56:50 -0400 Received: by mail-ie0-f182.google.com with SMTP id s9so237414iec.13 for ; Thu, 13 Jun 2013 19:56:49 -0700 (PDT) From: Liu Ping Fan Date: Fri, 14 Jun 2013 10:55:54 +0800 Message-Id: <1371178554-14432-1-git-send-email-pingfanl@linux.vnet.ibm.com> Subject: [Qemu-devel] [PATCH] QEMUBH: make AioContext's bh re-entrant List-Id: List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , To: qemu-devel@nongnu.org Cc: Paolo Bonzini , Anthony Liguori BH will be used outside big lock, so introduce lock to protect it. Note that the lock only affects the writer and bh's callback does not take this extra lock. Signed-off-by: Liu Ping Fan --- async.c | 10 +++++++++- include/block/aio.h | 2 ++ 2 files changed, 11 insertions(+), 1 deletion(-) diff --git a/async.c b/async.c index 90fe906..b1dcead 100644 --- a/async.c +++ b/async.c @@ -47,8 +47,11 @@ QEMUBH *aio_bh_new(AioContext *ctx, QEMUBHFunc *cb, void *opaque) bh->ctx = ctx; bh->cb = cb; bh->opaque = opaque; + qemu_mutex_lock(&ctx->bh_lock); bh->next = ctx->first_bh; + smp_wmb(); ctx->first_bh = bh; + qemu_mutex_unlock(&ctx->bh_lock); return bh; } @@ -60,7 +63,9 @@ int aio_bh_poll(AioContext *ctx) ctx->walking_bh++; ret = 0; - for (bh = ctx->first_bh; bh; bh = next) { + bh = ctx->first_bh; + smp_rmb(); + for (; bh; bh = next) { next = bh->next; if (!bh->deleted && bh->scheduled) { bh->scheduled = 0; @@ -75,6 +80,7 @@ int aio_bh_poll(AioContext *ctx) /* remove deleted bhs */ if (!ctx->walking_bh) { + qemu_mutex_lock(&ctx->bh_lock); bhp = &ctx->first_bh; while (*bhp) { bh = *bhp; @@ -85,6 +91,7 @@ int aio_bh_poll(AioContext *ctx) bhp = &bh->next; } } + qemu_mutex_unlock(&ctx->bh_lock); } return ret; @@ -211,6 +218,7 @@ AioContext *aio_context_new(void) ctx = (AioContext *) g_source_new(&aio_source_funcs, sizeof(AioContext)); ctx->pollfds = g_array_new(FALSE, FALSE, sizeof(GPollFD)); ctx->thread_pool = NULL; + qemu_mutex_init(&ctx->bh_lock); event_notifier_init(&ctx->notifier, false); aio_set_event_notifier(ctx, &ctx->notifier, (EventNotifierHandler *) diff --git a/include/block/aio.h b/include/block/aio.h index 1836793..a65d16f 100644 --- a/include/block/aio.h +++ b/include/block/aio.h @@ -17,6 +17,7 @@ #include "qemu-common.h" #include "qemu/queue.h" #include "qemu/event_notifier.h" +#include "qemu/thread.h" typedef struct BlockDriverAIOCB BlockDriverAIOCB; typedef void BlockDriverCompletionFunc(void *opaque, int ret); @@ -53,6 +54,7 @@ typedef struct AioContext { */ int walking_handlers; + QemuMutex bh_lock; /* Anchor of the list of Bottom Halves belonging to the context */ struct QEMUBH *first_bh; -- 1.8.1.4