From: Stefan Hajnoczi <stefanha@redhat.com>
To: Mattias Nissler <mnissler@rivosinc.com>
Cc: qemu-devel@nongnu.org,
"Elena Ufimtseva" <elena.ufimtseva@oracle.com>,
"Jagannathan Raman" <jag.raman@oracle.com>,
"Philippe Mathieu-Daudé" <philmd@linaro.org>,
"Paolo Bonzini" <pbonzini@redhat.com>,
"Peter Xu" <peterx@redhat.com>,
"David Hildenbrand" <david@redhat.com>,
john.levon@nutanix.com
Subject: Re: [PATCH 2/3] softmmu: Remove DMA unmap notification callback
Date: Thu, 20 Jul 2023 14:14:02 -0400 [thread overview]
Message-ID: <20230720181402.GB210977@fedora> (raw)
In-Reply-To: <20230704080628.852525-3-mnissler@rivosinc.com>
[-- Attachment #1: Type: text/plain, Size: 6458 bytes --]
On Tue, Jul 04, 2023 at 01:06:26AM -0700, Mattias Nissler wrote:
> According to old commit messages, this was introduced to retry a DMA
> operation at a later point in case the single bounce buffer is found to
> be busy. This was never used widely - only the dma-helpers code made use
> of it, but there are other device models that use multiple DMA mappings
> (concurrently) and just failed.
>
> After the improvement to support multiple concurrent bounce buffers,
> the condition the notification callback allowed to work around no
> longer exists, so we can just remove the logic and simplify the code.
>
> Signed-off-by: Mattias Nissler <mnissler@rivosinc.com>
> ---
> softmmu/dma-helpers.c | 28 -----------------
> softmmu/physmem.c | 71 -------------------------------------------
> 2 files changed, 99 deletions(-)
I'm not sure if it will be possible to remove this once a limit is
placed bounce buffer space.
>
> diff --git a/softmmu/dma-helpers.c b/softmmu/dma-helpers.c
> index 2463964805..d05d226f11 100644
> --- a/softmmu/dma-helpers.c
> +++ b/softmmu/dma-helpers.c
> @@ -68,23 +68,10 @@ typedef struct {
> int sg_cur_index;
> dma_addr_t sg_cur_byte;
> QEMUIOVector iov;
> - QEMUBH *bh;
> DMAIOFunc *io_func;
> void *io_func_opaque;
> } DMAAIOCB;
>
> -static void dma_blk_cb(void *opaque, int ret);
> -
> -static void reschedule_dma(void *opaque)
> -{
> - DMAAIOCB *dbs = (DMAAIOCB *)opaque;
> -
> - assert(!dbs->acb && dbs->bh);
> - qemu_bh_delete(dbs->bh);
> - dbs->bh = NULL;
> - dma_blk_cb(dbs, 0);
> -}
> -
> static void dma_blk_unmap(DMAAIOCB *dbs)
> {
> int i;
> @@ -101,7 +88,6 @@ static void dma_complete(DMAAIOCB *dbs, int ret)
> {
> trace_dma_complete(dbs, ret, dbs->common.cb);
>
> - assert(!dbs->acb && !dbs->bh);
> dma_blk_unmap(dbs);
> if (dbs->common.cb) {
> dbs->common.cb(dbs->common.opaque, ret);
> @@ -164,13 +150,6 @@ static void dma_blk_cb(void *opaque, int ret)
> }
> }
>
> - if (dbs->iov.size == 0) {
> - trace_dma_map_wait(dbs);
> - dbs->bh = aio_bh_new(ctx, reschedule_dma, dbs);
> - cpu_register_map_client(dbs->bh);
> - goto out;
> - }
> -
> if (!QEMU_IS_ALIGNED(dbs->iov.size, dbs->align)) {
> qemu_iovec_discard_back(&dbs->iov,
> QEMU_ALIGN_DOWN(dbs->iov.size, dbs->align));
> @@ -189,18 +168,12 @@ static void dma_aio_cancel(BlockAIOCB *acb)
>
> trace_dma_aio_cancel(dbs);
>
> - assert(!(dbs->acb && dbs->bh));
> if (dbs->acb) {
> /* This will invoke dma_blk_cb. */
> blk_aio_cancel_async(dbs->acb);
> return;
> }
>
> - if (dbs->bh) {
> - cpu_unregister_map_client(dbs->bh);
> - qemu_bh_delete(dbs->bh);
> - dbs->bh = NULL;
> - }
> if (dbs->common.cb) {
> dbs->common.cb(dbs->common.opaque, -ECANCELED);
> }
> @@ -239,7 +212,6 @@ BlockAIOCB *dma_blk_io(AioContext *ctx,
> dbs->dir = dir;
> dbs->io_func = io_func;
> dbs->io_func_opaque = io_func_opaque;
> - dbs->bh = NULL;
> qemu_iovec_init(&dbs->iov, sg->nsg);
> dma_blk_cb(dbs, 0);
> return &dbs->common;
> diff --git a/softmmu/physmem.c b/softmmu/physmem.c
> index 56130b5a1d..2b4123c127 100644
> --- a/softmmu/physmem.c
> +++ b/softmmu/physmem.c
> @@ -2908,49 +2908,6 @@ typedef struct {
> uint8_t buffer[];
> } BounceBuffer;
>
> -static size_t bounce_buffers_in_use;
> -
> -typedef struct MapClient {
> - QEMUBH *bh;
> - QLIST_ENTRY(MapClient) link;
> -} MapClient;
> -
> -QemuMutex map_client_list_lock;
> -static QLIST_HEAD(, MapClient) map_client_list
> - = QLIST_HEAD_INITIALIZER(map_client_list);
> -
> -static void cpu_unregister_map_client_do(MapClient *client)
> -{
> - QLIST_REMOVE(client, link);
> - g_free(client);
> -}
> -
> -static void cpu_notify_map_clients_locked(void)
> -{
> - MapClient *client;
> -
> - while (!QLIST_EMPTY(&map_client_list)) {
> - client = QLIST_FIRST(&map_client_list);
> - qemu_bh_schedule(client->bh);
> - cpu_unregister_map_client_do(client);
> - }
> -}
> -
> -void cpu_register_map_client(QEMUBH *bh)
> -{
> - MapClient *client = g_malloc(sizeof(*client));
> -
> - qemu_mutex_lock(&map_client_list_lock);
> - client->bh = bh;
> - QLIST_INSERT_HEAD(&map_client_list, client, link);
> - /* Write map_client_list before reading in_use. */
> - smp_mb();
> - if (qatomic_read(&bounce_buffers_in_use)) {
> - cpu_notify_map_clients_locked();
> - }
> - qemu_mutex_unlock(&map_client_list_lock);
> -}
> -
> void cpu_exec_init_all(void)
> {
> qemu_mutex_init(&ram_list.mutex);
> @@ -2964,28 +2921,6 @@ void cpu_exec_init_all(void)
> finalize_target_page_bits();
> io_mem_init();
> memory_map_init();
> - qemu_mutex_init(&map_client_list_lock);
> -}
> -
> -void cpu_unregister_map_client(QEMUBH *bh)
> -{
> - MapClient *client;
> -
> - qemu_mutex_lock(&map_client_list_lock);
> - QLIST_FOREACH(client, &map_client_list, link) {
> - if (client->bh == bh) {
> - cpu_unregister_map_client_do(client);
> - break;
> - }
> - }
> - qemu_mutex_unlock(&map_client_list_lock);
> -}
> -
> -static void cpu_notify_map_clients(void)
> -{
> - qemu_mutex_lock(&map_client_list_lock);
> - cpu_notify_map_clients_locked();
> - qemu_mutex_unlock(&map_client_list_lock);
> }
>
> static bool flatview_access_valid(FlatView *fv, hwaddr addr, hwaddr len,
> @@ -3077,8 +3012,6 @@ void *address_space_map(AddressSpace *as,
> memory_region_ref(mr);
>
> if (!memory_access_is_direct(mr, is_write)) {
> - qatomic_inc_fetch(&bounce_buffers_in_use);
> -
> BounceBuffer *bounce = g_malloc(l + sizeof(BounceBuffer));
> bounce->addr = addr;
> bounce->mr = mr;
> @@ -3122,10 +3055,6 @@ void address_space_unmap(AddressSpace *as, void *buffer, hwaddr len,
> }
> memory_region_unref(bounce->mr);
> g_free(bounce);
> -
> - if (qatomic_dec_fetch(&bounce_buffers_in_use) == 1) {
> - cpu_notify_map_clients();
> - }
> return;
> }
>
> --
> 2.34.1
>
[-- Attachment #2: signature.asc --]
[-- Type: application/pgp-signature, Size: 488 bytes --]
next prev parent reply other threads:[~2023-07-20 18:14 UTC|newest]
Thread overview: 14+ messages / expand[flat|nested] mbox.gz Atom feed top
2023-07-04 8:06 [PATCH 0/3] Support message-based DMA in vfio-user server Mattias Nissler
2023-07-04 8:06 ` [PATCH 1/3] softmmu: Support concurrent bounce buffers Mattias Nissler
2023-07-20 18:10 ` Stefan Hajnoczi
2023-08-23 9:27 ` Mattias Nissler
2023-07-20 18:14 ` Stefan Hajnoczi
2023-07-04 8:06 ` [PATCH 2/3] softmmu: Remove DMA unmap notification callback Mattias Nissler
2023-07-20 18:14 ` Stefan Hajnoczi [this message]
2023-08-23 9:28 ` Mattias Nissler
2023-07-04 8:06 ` [PATCH 3/3] vfio-user: Message-based DMA support Mattias Nissler
2023-07-20 18:32 ` Stefan Hajnoczi
2023-08-23 9:28 ` Mattias Nissler
2023-07-04 8:20 ` [PATCH 0/3] Support message-based DMA in vfio-user server David Hildenbrand
2023-07-20 18:41 ` Stefan Hajnoczi
2023-07-20 22:10 ` Mattias Nissler
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20230720181402.GB210977@fedora \
--to=stefanha@redhat.com \
--cc=david@redhat.com \
--cc=elena.ufimtseva@oracle.com \
--cc=jag.raman@oracle.com \
--cc=john.levon@nutanix.com \
--cc=mnissler@rivosinc.com \
--cc=pbonzini@redhat.com \
--cc=peterx@redhat.com \
--cc=philmd@linaro.org \
--cc=qemu-devel@nongnu.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).