From: Lu Baolu <baolu.lu@linux.intel.com>
To: David Woodhouse <dwmw2@infradead.org>, Joerg Roedel <joro@8bytes.org>
Cc: ashok.raj@intel.com, jacob.jun.pan@intel.com, alan.cox@intel.com,
kevin.tian@intel.com, mika.westerberg@linux.intel.com,
pengfei.xu@intel.com,
Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>,
Christoph Hellwig <hch@lst.de>,
Marek Szyprowski <m.szyprowski@samsung.com>,
Robin Murphy <robin.murphy@arm.com>,
iommu@lists.linux-foundation.org, linux-kernel@vger.kernel.org,
Lu Baolu <baolu.lu@linux.intel.com>
Subject: [PATCH v3 02/10] swiotlb: Factor out slot allocation and free
Date: Sun, 21 Apr 2019 09:17:11 +0800 [thread overview]
Message-ID: <20190421011719.14909-3-baolu.lu@linux.intel.com> (raw)
In-Reply-To: <20190421011719.14909-1-baolu.lu@linux.intel.com>
This moves slot allocation and free code into two common
functions in order to avoid code duplication. There's no
functional change.
Cc: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
Cc: Christoph Hellwig <hch@lst.de>
Cc: Marek Szyprowski <m.szyprowski@samsung.com>
Cc: Robin Murphy <robin.murphy@arm.com>
Signed-off-by: Lu Baolu <baolu.lu@linux.intel.com>
---
kernel/dma/swiotlb.c | 72 +++++++++++++++++++++++++++++---------------
1 file changed, 47 insertions(+), 25 deletions(-)
diff --git a/kernel/dma/swiotlb.c b/kernel/dma/swiotlb.c
index 53012db1e53c..173122d16b7f 100644
--- a/kernel/dma/swiotlb.c
+++ b/kernel/dma/swiotlb.c
@@ -439,11 +439,9 @@ static void swiotlb_bounce(phys_addr_t orig_addr, phys_addr_t tlb_addr,
}
}
-phys_addr_t swiotlb_tbl_map_single(struct device *hwdev,
- dma_addr_t tbl_dma_addr,
- phys_addr_t orig_addr, size_t size,
- enum dma_data_direction dir,
- unsigned long attrs)
+static phys_addr_t
+swiotlb_tbl_alloc_tlb(struct device *hwdev, dma_addr_t tbl_dma_addr,
+ phys_addr_t orig_addr, size_t size)
{
unsigned long flags;
phys_addr_t tlb_addr;
@@ -539,8 +537,6 @@ phys_addr_t swiotlb_tbl_map_single(struct device *hwdev,
not_found:
spin_unlock_irqrestore(&io_tlb_lock, flags);
- if (!(attrs & DMA_ATTR_NO_WARN) && printk_ratelimit())
- dev_warn(hwdev, "swiotlb buffer is full (sz: %zd bytes)\n", size);
return DMA_MAPPING_ERROR;
found:
io_tlb_used += nslots;
@@ -553,32 +549,16 @@ phys_addr_t swiotlb_tbl_map_single(struct device *hwdev,
*/
for (i = 0; i < nslots; i++)
io_tlb_orig_addr[index+i] = orig_addr + (i << IO_TLB_SHIFT);
- if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC) &&
- (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL))
- swiotlb_bounce(orig_addr, tlb_addr, size, DMA_TO_DEVICE);
return tlb_addr;
}
-/*
- * tlb_addr is the physical address of the bounce buffer to unmap.
- */
-void swiotlb_tbl_unmap_single(struct device *hwdev, phys_addr_t tlb_addr,
- size_t size, enum dma_data_direction dir,
- unsigned long attrs)
+static void
+swiotlb_tbl_free_tlb(struct device *hwdev, phys_addr_t tlb_addr, size_t size)
{
unsigned long flags;
int i, count, nslots = ALIGN(size, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT;
int index = (tlb_addr - io_tlb_start) >> IO_TLB_SHIFT;
- phys_addr_t orig_addr = io_tlb_orig_addr[index];
-
- /*
- * First, sync the memory before unmapping the entry
- */
- if (orig_addr != INVALID_PHYS_ADDR &&
- !(attrs & DMA_ATTR_SKIP_CPU_SYNC) &&
- ((dir == DMA_FROM_DEVICE) || (dir == DMA_BIDIRECTIONAL)))
- swiotlb_bounce(orig_addr, tlb_addr, size, DMA_FROM_DEVICE);
/*
* Return the buffer to the free list by setting the corresponding
@@ -610,6 +590,48 @@ void swiotlb_tbl_unmap_single(struct device *hwdev, phys_addr_t tlb_addr,
spin_unlock_irqrestore(&io_tlb_lock, flags);
}
+phys_addr_t swiotlb_tbl_map_single(struct device *hwdev,
+ dma_addr_t tbl_dma_addr,
+ phys_addr_t orig_addr, size_t size,
+ enum dma_data_direction dir,
+ unsigned long attrs)
+{
+ phys_addr_t tlb_addr;
+
+ tlb_addr = swiotlb_tbl_alloc_tlb(hwdev, tbl_dma_addr, orig_addr, size);
+ if (tlb_addr == DMA_MAPPING_ERROR) {
+ if (!(attrs & DMA_ATTR_NO_WARN) && printk_ratelimit())
+ dev_warn(hwdev, "swiotlb buffer is full (sz: %zd bytes)\n",
+ size);
+ } else if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC) &&
+ (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL)) {
+ swiotlb_bounce(orig_addr, tlb_addr, size, DMA_TO_DEVICE);
+ }
+
+ return tlb_addr;
+}
+
+/*
+ * tlb_addr is the physical address of the bounce buffer to unmap.
+ */
+void swiotlb_tbl_unmap_single(struct device *hwdev, phys_addr_t tlb_addr,
+ size_t size, enum dma_data_direction dir,
+ unsigned long attrs)
+{
+ int index = (tlb_addr - io_tlb_start) >> IO_TLB_SHIFT;
+ phys_addr_t orig_addr = io_tlb_orig_addr[index];
+
+ /*
+ * First, sync the memory before unmapping the entry
+ */
+ if (orig_addr != INVALID_PHYS_ADDR &&
+ !(attrs & DMA_ATTR_SKIP_CPU_SYNC) &&
+ ((dir == DMA_FROM_DEVICE) || (dir == DMA_BIDIRECTIONAL)))
+ swiotlb_bounce(orig_addr, tlb_addr, size, DMA_FROM_DEVICE);
+
+ swiotlb_tbl_free_tlb(hwdev, tlb_addr, size);
+}
+
void swiotlb_tbl_sync_single(struct device *hwdev, phys_addr_t tlb_addr,
size_t size, enum dma_data_direction dir,
enum dma_sync_target target)
--
2.17.1
next prev parent reply other threads:[~2019-04-21 1:28 UTC|newest]
Thread overview: 36+ messages / expand[flat|nested] mbox.gz Atom feed top
2019-04-21 1:17 [PATCH v3 00/10] iommu: Bounce page for untrusted devices Lu Baolu
2019-04-21 1:17 ` [PATCH v3 01/10] iommu: Add helper to get minimal page size of domain Lu Baolu
2019-04-29 10:55 ` Robin Murphy
2019-04-30 0:40 ` Lu Baolu
2019-04-21 1:17 ` Lu Baolu [this message]
2019-04-22 16:45 ` [PATCH v3 02/10] swiotlb: Factor out slot allocation and free Christoph Hellwig
2019-04-23 1:58 ` Lu Baolu
2019-04-23 6:12 ` Christoph Hellwig
2019-04-23 7:32 ` Lu Baolu
2019-04-24 14:45 ` Christoph Hellwig
2019-04-25 2:07 ` Lu Baolu
2019-04-26 15:04 ` Christoph Hellwig
2019-04-29 5:10 ` Lu Baolu
2019-04-29 11:06 ` Robin Murphy
2019-04-29 11:44 ` Christoph Hellwig
2019-05-06 1:54 ` Lu Baolu
2019-05-13 7:05 ` Christoph Hellwig
2019-05-16 1:53 ` Lu Baolu
2019-04-30 2:02 ` Lu Baolu
2019-04-30 9:53 ` Robin Murphy
2019-05-02 1:47 ` Lu Baolu
2019-04-21 1:17 ` [PATCH v3 03/10] swiotlb: Limit tlb address range inside slot pool Lu Baolu
2019-04-21 1:17 ` [PATCH v3 04/10] swiotlb: Extend swiotlb to support page bounce Lu Baolu
2019-04-21 1:17 ` [PATCH v3 05/10] iommu: Add bounce page APIs Lu Baolu
2019-04-21 1:17 ` [PATCH v3 06/10] iommu/vt-d: Add trace events for domain map/unmap Lu Baolu
2019-04-21 1:17 ` [PATCH v3 07/10] iommu/vt-d: Keep swiotlb on if bounce page is necessary Lu Baolu
2019-04-22 16:47 ` Christoph Hellwig
2019-04-23 2:00 ` Lu Baolu
2019-04-21 1:17 ` [PATCH v3 08/10] iommu/vt-d: Check whether device requires bounce buffer Lu Baolu
2019-04-22 16:47 ` Christoph Hellwig
2019-04-23 2:03 ` Lu Baolu
2019-04-23 6:08 ` Christoph Hellwig
2019-04-23 7:35 ` Lu Baolu
2019-04-24 18:27 ` Konrad Rzeszutek Wilk
2019-04-21 1:17 ` [PATCH v3 09/10] iommu/vt-d: Add dma sync ops for untrusted devices Lu Baolu
2019-04-21 1:17 ` [PATCH v3 10/10] iommu/vt-d: Use bounce buffer " Lu Baolu
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20190421011719.14909-3-baolu.lu@linux.intel.com \
--to=baolu.lu@linux.intel.com \
--cc=alan.cox@intel.com \
--cc=ashok.raj@intel.com \
--cc=dwmw2@infradead.org \
--cc=hch@lst.de \
--cc=iommu@lists.linux-foundation.org \
--cc=jacob.jun.pan@intel.com \
--cc=joro@8bytes.org \
--cc=kevin.tian@intel.com \
--cc=konrad.wilk@oracle.com \
--cc=linux-kernel@vger.kernel.org \
--cc=m.szyprowski@samsung.com \
--cc=mika.westerberg@linux.intel.com \
--cc=pengfei.xu@intel.com \
--cc=robin.murphy@arm.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox