From: David Stevens <stevensd@chromium.org>
To: Robin Murphy <robin.murphy@arm.com>
Cc: Christoph Hellwig <hch@lst.de>, Joerg Roedel <joro@8bytes.org>,
Will Deacon <will@kernel.org>,
Sergey Senozhatsky <senozhatsky@chromium.org>,
Lu Baolu <baolu.lu@linux.intel.com>,
iommu@lists.linux-foundation.org, linux-kernel@vger.kernel.org,
David Stevens <stevensd@chromium.org>
Subject: [PATCH v2 4/9] dma-iommu: remove extra buffer search on unmap
Date: Fri, 6 Aug 2021 19:34:18 +0900 [thread overview]
Message-ID: <20210806103423.3341285-5-stevensd@google.com> (raw)
In-Reply-To: <20210806103423.3341285-1-stevensd@google.com>
From: David Stevens <stevensd@chromium.org>
Add callback to buffer manager's removal function so that the buffer can
be sync'ed during unmap without an extra find operation.
Signed-off-by: David Stevens <stevensd@chromium.org>
---
drivers/iommu/io-bounce-buffers.c | 87 +++++++++++++++++++++++++------
drivers/iommu/io-buffer-manager.c | 6 ++-
drivers/iommu/io-buffer-manager.h | 6 ++-
3 files changed, 81 insertions(+), 18 deletions(-)
diff --git a/drivers/iommu/io-bounce-buffers.c b/drivers/iommu/io-bounce-buffers.c
index 78b4440b58c8..c7c52a3f8bf7 100644
--- a/drivers/iommu/io-bounce-buffers.c
+++ b/drivers/iommu/io-bounce-buffers.c
@@ -153,6 +153,20 @@ static void io_bounce_buffers_do_sync(struct io_bounce_buffers *buffers,
}
}
+static void __io_bounce_buffers_sync_single(struct io_bounce_buffers *buffers,
+ dma_addr_t dma_handle, size_t size,
+ struct io_bounce_buffer_info *info,
+ struct page *orig_buffer, int prot,
+ enum dma_data_direction dir,
+ bool sync_for_cpu)
+{
+ size_t offset = dma_handle - info->iova;
+
+ io_bounce_buffers_do_sync(buffers, info->bounce_buffer, offset,
+ orig_buffer, offset, size, dir, prot,
+ sync_for_cpu);
+}
+
bool io_bounce_buffers_sync_single(struct io_bounce_buffers *buffers,
dma_addr_t dma_handle, size_t size,
enum dma_data_direction dir,
@@ -160,17 +174,14 @@ bool io_bounce_buffers_sync_single(struct io_bounce_buffers *buffers,
{
struct io_bounce_buffer_info info;
void *orig_buffer;
- size_t offset;
int prot;
if (!io_buffer_manager_find_buffer(&buffers->manager, dma_handle, &info,
&orig_buffer, &prot))
return false;
- offset = dma_handle - info.iova;
- io_bounce_buffers_do_sync(buffers, info.bounce_buffer, offset,
- orig_buffer, offset, size, dir, prot,
- sync_for_cpu);
+ __io_bounce_buffers_sync_single(buffers, dma_handle, size, &info,
+ orig_buffer, prot, dir, sync_for_cpu);
return true;
}
@@ -219,16 +230,56 @@ bool io_bounce_buffers_sync_sg(struct io_bounce_buffers *buffers,
return true;
}
+struct unmap_sync_args {
+ struct io_bounce_buffers *buffers;
+ unsigned long attrs;
+ enum dma_data_direction dir;
+ dma_addr_t handle;
+ size_t size;
+ int nents;
+};
+
+static void
+io_bounce_buffers_unmap_page_sync(struct io_bounce_buffer_info *info, int prot,
+ void *orig_buffer, void *ctx)
+{
+ struct unmap_sync_args *args = ctx;
+
+ if (args->attrs & DMA_ATTR_SKIP_CPU_SYNC)
+ return;
+
+ __io_bounce_buffers_sync_single(args->buffers, args->handle, args->size,
+ info, orig_buffer, prot, args->dir,
+ true);
+}
+
bool io_bounce_buffers_unmap_page(struct io_bounce_buffers *buffers,
dma_addr_t handle, size_t size,
enum dma_data_direction dir,
unsigned long attrs)
{
- if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
- io_bounce_buffers_sync_single(buffers, handle, size, dir, true);
+ struct unmap_sync_args args = { .buffers = buffers,
+ .attrs = attrs,
+ .dir = dir,
+ .handle = handle,
+ .size = size };
+
+ return io_buffer_manager_release_buffer(
+ &buffers->manager, buffers->domain, handle, true,
+ io_bounce_buffers_unmap_page_sync, &args);
+}
+
+static void io_bounce_buffers_unmap_sg_sync(struct io_bounce_buffer_info *info,
+ int prot, void *orig_buffer,
+ void *ctx)
+{
+ struct unmap_sync_args *args = ctx;
+
+ if (args->attrs & DMA_ATTR_SKIP_CPU_SYNC)
+ return;
- return io_buffer_manager_release_buffer(&buffers->manager,
- buffers->domain, handle, true);
+ __io_bounce_buffers_sync_sg(args->buffers, orig_buffer, args->nents,
+ info->bounce_buffer, args->dir, prot, true);
}
bool io_bounce_buffers_unmap_sg(struct io_bounce_buffers *buffers,
@@ -236,11 +287,13 @@ bool io_bounce_buffers_unmap_sg(struct io_bounce_buffers *buffers,
enum dma_data_direction dir,
unsigned long attrs)
{
- if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
- io_bounce_buffers_sync_sg(buffers, sgl, nents, dir, true);
+ struct unmap_sync_args args = {
+ .buffers = buffers, .attrs = attrs, .dir = dir, .nents = nents
+ };
return io_buffer_manager_release_buffer(
- &buffers->manager, buffers->domain, sg_dma_address(sgl), true);
+ &buffers->manager, buffers->domain, sg_dma_address(sgl), true,
+ io_bounce_buffers_unmap_sg_sync, &args);
}
static bool io_bounce_buffers_map_buffer(struct io_bounce_buffers *buffers,
@@ -286,8 +339,9 @@ bool io_bounce_buffers_map_page(struct io_bounce_buffers *buffers,
page, offset, size, dir, prot, false);
if (!io_bounce_buffers_map_buffer(buffers, &info, prot)) {
- io_buffer_manager_release_buffer(
- &buffers->manager, buffers->domain, info.iova, false);
+ io_buffer_manager_release_buffer(&buffers->manager,
+ buffers->domain, info.iova,
+ false, NULL, NULL);
return true;
}
@@ -328,8 +382,9 @@ bool io_bounce_buffers_map_sg(struct io_bounce_buffers *buffers,
false);
if (!io_bounce_buffers_map_buffer(buffers, &info, prot)) {
- io_buffer_manager_release_buffer(
- &buffers->manager, buffers->domain, info.iova, false);
+ io_buffer_manager_release_buffer(&buffers->manager,
+ buffers->domain, info.iova,
+ false, NULL, NULL);
return true;
}
diff --git a/drivers/iommu/io-buffer-manager.c b/drivers/iommu/io-buffer-manager.c
index 24e95a2faa37..79b9759da928 100644
--- a/drivers/iommu/io-buffer-manager.c
+++ b/drivers/iommu/io-buffer-manager.c
@@ -169,7 +169,8 @@ bool io_buffer_manager_find_buffer(struct io_buffer_manager *manager,
bool io_buffer_manager_release_buffer(struct io_buffer_manager *manager,
struct iommu_domain *domain,
- dma_addr_t handle, bool inited)
+ dma_addr_t handle, bool inited,
+ prerelease_cb cb, void *ctx)
{
struct io_buffer_node *node;
unsigned long flags;
@@ -184,6 +185,9 @@ bool io_buffer_manager_release_buffer(struct io_buffer_manager *manager,
if (!node)
return false;
+ if (cb)
+ cb(&node->info, node->prot, node->orig_buffer, ctx);
+
if (inited)
free_buffer = io_bounce_buffers_release_buffer_cb(
manager, node->info.iova, node->info.size);
diff --git a/drivers/iommu/io-buffer-manager.h b/drivers/iommu/io-buffer-manager.h
index aae560cc8512..0e75d89926ca 100644
--- a/drivers/iommu/io-buffer-manager.h
+++ b/drivers/iommu/io-buffer-manager.h
@@ -31,9 +31,13 @@ bool io_buffer_manager_find_buffer(struct io_buffer_manager *manager,
struct io_bounce_buffer_info *info,
void **orig_buffer, int *prot);
+typedef void (*prerelease_cb)(struct io_bounce_buffer_info *info, int prot,
+ void *orig_buffer, void *ctx);
+
bool io_buffer_manager_release_buffer(struct io_buffer_manager *manager,
struct iommu_domain *domain,
- dma_addr_t handle, bool inited);
+ dma_addr_t handle, bool inited,
+ prerelease_cb cb, void *ctx);
int io_buffer_manager_init(struct io_buffer_manager *manager);
--
2.32.0.605.g8dce9f2422-goog
next prev parent reply other threads:[~2021-08-06 10:35 UTC|newest]
Thread overview: 19+ messages / expand[flat|nested] mbox.gz Atom feed top
2021-08-06 10:34 [PATCH v2 0/9] Add dynamic iommu backed bounce buffers David Stevens
2021-08-06 10:34 ` [PATCH v2 1/9] Revert "iommu: Allow the dma-iommu api to use bounce buffers" David Stevens
2021-08-06 10:34 ` [PATCH v2 2/9] dma-iommu: expose a few helper functions to module David Stevens
2021-08-06 17:28 ` kernel test robot
2021-08-06 10:34 ` [PATCH v2 3/9] dma-iommu: bounce buffers for untrusted devices David Stevens
2021-08-06 15:53 ` kernel test robot
2021-08-10 1:19 ` Mi, Dapeng1
2021-08-10 1:41 ` David Stevens
2021-08-06 10:34 ` David Stevens [this message]
2021-08-06 10:34 ` [PATCH v2 5/9] dma-iommu: clear only necessary bytes David Stevens
2021-08-06 10:34 ` [PATCH v2 6/9] dma-iommu: add bounce buffer pools David Stevens
2021-08-06 10:34 ` [PATCH v2 7/9] dma-iommu: support iommu bounce buffer optimization David Stevens
2021-08-06 10:34 ` [PATCH v2 8/9] dma-mapping: add persistent streaming mapping flag David Stevens
2021-08-06 10:34 ` [PATCH v2 9/9] drm/i915: use DMA_ATTR_PERSISTENT_STREAMING flag David Stevens
2022-05-24 12:27 ` [PATCH v2 0/9] Add dynamic iommu backed bounce buffers Niklas Schnelle
2022-05-27 1:25 ` David Stevens
2022-06-03 14:53 ` Niklas Schnelle
2022-06-06 1:24 ` David Stevens
2022-07-01 9:23 ` Niklas Schnelle
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20210806103423.3341285-5-stevensd@google.com \
--to=stevensd@chromium.org \
--cc=baolu.lu@linux.intel.com \
--cc=hch@lst.de \
--cc=iommu@lists.linux-foundation.org \
--cc=joro@8bytes.org \
--cc=linux-kernel@vger.kernel.org \
--cc=robin.murphy@arm.com \
--cc=senozhatsky@chromium.org \
--cc=will@kernel.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox