From: Paolo Bonzini <pbonzini@redhat.com>
To: Fam Zheng <famz@redhat.com>, qemu-devel@nongnu.org
Subject: Re: [Qemu-devel] [PATCH v3 4/4] dma-helpers: Fix race condition of continue_after_map_failure and dma_aio_cancel
Date: Mon, 16 Mar 2015 08:36:59 +0100 [thread overview]
Message-ID: <5506881B.3010405@redhat.com> (raw)
In-Reply-To: <1426483910-24597-5-git-send-email-famz@redhat.com>
On 16/03/2015 06:31, Fam Zheng wrote:
> If DMA's owning thread cancels the IO while the bounce buffer's owning thread
> is notifying the "cpu client list", a use-after-free happens:
>
> continue_after_map_failure dma_aio_cancel
> ------------------------------------------------------------------
> aio_bh_new
> qemu_bh_delete
> qemu_bh_schedule (use after free)
>
> Also, the old code doesn't run the bh in the right AioContext.
>
> Fix both problems by passing a QEMUBH to cpu_register_map_client.
>
> Signed-off-by: Fam Zheng <famz@redhat.com>
> ---
> dma-helpers.c | 17 ++++++++---------
> exec.c | 32 +++++++++++++++++++++-----------
> include/exec/cpu-common.h | 3 ++-
> 3 files changed, 31 insertions(+), 21 deletions(-)
>
> diff --git a/dma-helpers.c b/dma-helpers.c
> index 6918572..1fddf6a 100644
> --- a/dma-helpers.c
> +++ b/dma-helpers.c
> @@ -92,14 +92,6 @@ static void reschedule_dma(void *opaque)
> dma_blk_cb(dbs, 0);
> }
>
> -static void continue_after_map_failure(void *opaque)
> -{
> - DMAAIOCB *dbs = (DMAAIOCB *)opaque;
> -
> - dbs->bh = qemu_bh_new(reschedule_dma, dbs);
> - qemu_bh_schedule(dbs->bh);
> -}
> -
> static void dma_blk_unmap(DMAAIOCB *dbs)
> {
> int i;
> @@ -161,7 +153,9 @@ static void dma_blk_cb(void *opaque, int ret)
>
> if (dbs->iov.size == 0) {
> trace_dma_map_wait(dbs);
> - cpu_register_map_client(dbs, continue_after_map_failure);
> + dbs->bh = aio_bh_new(blk_get_aio_context(dbs->blk),
> + reschedule_dma, dbs);
> + cpu_register_map_client(dbs->bh);
> return;
> }
>
> @@ -183,6 +177,11 @@ static void dma_aio_cancel(BlockAIOCB *acb)
> if (dbs->acb) {
> blk_aio_cancel_async(dbs->acb);
> }
> + if (dbs->bh) {
> + cpu_unregister_map_client(dbs->bh);
> + qemu_bh_delete(dbs->bh);
> + dbs->bh = NULL;
> + }
> }
>
>
> diff --git a/exec.c b/exec.c
> index 20381a0..b15ca5e 100644
> --- a/exec.c
> +++ b/exec.c
> @@ -2480,8 +2480,7 @@ typedef struct {
> static BounceBuffer bounce;
>
> typedef struct MapClient {
> - void *opaque;
> - void (*callback)(void *opaque);
> + QEMUBH *bh;
> QLIST_ENTRY(MapClient) link;
> } MapClient;
>
> @@ -2489,30 +2488,29 @@ QemuMutex map_client_list_lock;
> static QLIST_HEAD(map_client_list, MapClient) map_client_list
> = QLIST_HEAD_INITIALIZER(map_client_list);
>
> +static void cpu_unregister_map_client_do(MapClient *client);
> static void cpu_notify_map_clients_unlocked(void)
> {
> MapClient *client;
>
> while (!QLIST_EMPTY(&map_client_list)) {
> client = QLIST_FIRST(&map_client_list);
> - client->callback(client->opaque);
> - cpu_unregister_map_client(client);
> + qemu_bh_schedule(client->bh);
> + cpu_unregister_map_client_do(client);
> }
> }
>
> -void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque))
> +void cpu_register_map_client(QEMUBH *bh)
> {
> MapClient *client = g_malloc(sizeof(*client));
>
> qemu_mutex_lock(&map_client_list_lock);
> - client->opaque = opaque;
> - client->callback = callback;
> + client->bh = bh;
> QLIST_INSERT_HEAD(&map_client_list, client, link);
> if (!atomic_read(&bounce.in_use)) {
> cpu_notify_map_clients_unlocked();
> }
> qemu_mutex_unlock(&map_client_list_lock);
> - return client;
> }
>
> void cpu_exec_init_all(void)
> @@ -2525,14 +2523,26 @@ void cpu_exec_init_all(void)
> qemu_mutex_init(&map_client_list_lock);
> }
>
> -static void cpu_unregister_map_client(void *_client)
> +static void cpu_unregister_map_client_do(MapClient *client)
> {
> - MapClient *client = (MapClient *)_client;
> -
> QLIST_REMOVE(client, link);
> g_free(client);
> }
>
> +void cpu_unregister_map_client(QEMUBH *bh)
> +{
> + MapClient *client;
> +
> + qemu_mutex_lock(&map_client_list_lock);
> + QLIST_FOREACH(client, &map_client_list, link) {
> + if (client->bh == bh) {
> + cpu_unregister_map_client_do(client);
> + break;
> + }
> + }
> + qemu_mutex_unlock(&map_client_list_lock);
> +}
> +
> static void cpu_notify_map_clients(void)
> {
> qemu_mutex_lock(&map_client_list_lock);
> diff --git a/include/exec/cpu-common.h b/include/exec/cpu-common.h
> index fcc3162..43428bd 100644
> --- a/include/exec/cpu-common.h
> +++ b/include/exec/cpu-common.h
> @@ -82,7 +82,8 @@ void *cpu_physical_memory_map(hwaddr addr,
> int is_write);
> void cpu_physical_memory_unmap(void *buffer, hwaddr len,
> int is_write, hwaddr access_len);
> -void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque));
> +void cpu_register_map_client(QEMUBH *bh);
> +void cpu_unregister_map_client(QEMUBH *bh);
>
> bool cpu_physical_memory_is_io(hwaddr phys_addr);
>
>
Reviewed-by: Paolo Bonzini <pbonzini@redhat.com>
prev parent reply other threads:[~2015-03-16 7:37 UTC|newest]
Thread overview: 12+ messages / expand[flat|nested] mbox.gz Atom feed top
2015-03-16 5:31 [Qemu-devel] [PATCH v3 0/4] exec: Make bounce buffer thread safe Fam Zheng
2015-03-16 5:31 ` [Qemu-devel] [PATCH v3 1/4] exec: Atomic access to bounce buffer Fam Zheng
2015-03-16 7:30 ` Paolo Bonzini
2015-03-16 7:42 ` Fam Zheng
2015-03-16 5:31 ` [Qemu-devel] [PATCH v3 2/4] exec: Protect map_client_list with mutex Fam Zheng
2015-03-16 7:33 ` Paolo Bonzini
2015-03-16 7:55 ` Fam Zheng
2015-03-16 5:31 ` [Qemu-devel] [PATCH v3 3/4] exec: Notify cpu_register_map_client caller if the bounce buffer is available Fam Zheng
2015-03-16 7:34 ` Paolo Bonzini
2015-03-16 7:44 ` Fam Zheng
2015-03-16 5:31 ` [Qemu-devel] [PATCH v3 4/4] dma-helpers: Fix race condition of continue_after_map_failure and dma_aio_cancel Fam Zheng
2015-03-16 7:36 ` Paolo Bonzini [this message]
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=5506881B.3010405@redhat.com \
--to=pbonzini@redhat.com \
--cc=famz@redhat.com \
--cc=qemu-devel@nongnu.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).