From: Oak Zeng <oak.zeng@intel.com>
To: intel-xe@lists.freedesktop.org
Subject: [CI 03/43] dma-mapping: provide callbacks to link/unlink pages to specific IOVA
Date: Tue, 11 Jun 2024 22:25:25 -0400 [thread overview]
Message-ID: <20240612022605.385062-3-oak.zeng@intel.com> (raw)
In-Reply-To: <20240612022605.385062-1-oak.zeng@intel.com>
From: Leon Romanovsky <leonro@nvidia.com>
Introduce new DMA link/unlink API to provide a way for advanced users
to directly map/unmap pages without ned to allocate IOVA on every map
call.
Signed-off-by: Leon Romanovsky <leonro@nvidia.com>
---
include/linux/dma-map-ops.h | 10 +++++++
include/linux/dma-mapping.h | 13 +++++++++
kernel/dma/debug.h | 2 ++
kernel/dma/direct.h | 3 ++
kernel/dma/mapping.c | 57 +++++++++++++++++++++++++++++++++++++
5 files changed, 85 insertions(+)
diff --git a/include/linux/dma-map-ops.h b/include/linux/dma-map-ops.h
index 23e5e2f63a1c..292326ac5a12 100644
--- a/include/linux/dma-map-ops.h
+++ b/include/linux/dma-map-ops.h
@@ -89,6 +89,13 @@ struct dma_map_ops {
dma_addr_t (*alloc_iova)(struct device *dev, size_t size);
void (*free_iova)(struct device *dev, dma_addr_t dma_addr, size_t size);
+ dma_addr_t (*link_range)(struct device *dev, struct page *page,
+ unsigned long offset, dma_addr_t addr,
+ size_t size, enum dma_data_direction dir,
+ unsigned long attrs);
+ void (*unlink_range)(struct device *dev, dma_addr_t dma_handle,
+ size_t size, enum dma_data_direction dir,
+ unsigned long attrs);
};
#ifdef CONFIG_DMA_OPS
@@ -440,6 +447,9 @@ bool arch_dma_unmap_sg_direct(struct device *dev, struct scatterlist *sg,
#define arch_dma_unmap_sg_direct(d, s, n) (false)
#endif
+#define arch_dma_link_range_direct arch_dma_map_page_direct
+#define arch_dma_unlink_range_direct arch_dma_unmap_page_direct
+
#ifdef CONFIG_ARCH_HAS_SETUP_DMA_OPS
void arch_setup_dma_ops(struct device *dev, bool coherent);
#else
diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h
index 34a3b6420606..223b5477e36e 100644
--- a/include/linux/dma-mapping.h
+++ b/include/linux/dma-mapping.h
@@ -113,6 +113,9 @@ static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
int dma_alloc_iova(struct dma_iova_attrs *iova);
void dma_free_iova(struct dma_iova_attrs *iova);
+dma_addr_t dma_link_range(struct page *page, unsigned long offset,
+ struct dma_iova_attrs *iova, dma_addr_t dma_offset);
+void dma_unlink_range(struct dma_iova_attrs *iova, dma_addr_t dma_offset);
dma_addr_t dma_map_page_attrs(struct device *dev, struct page *page,
size_t offset, size_t size, enum dma_data_direction dir,
@@ -170,6 +173,16 @@ static inline int dma_alloc_iova(struct dma_iova_attrs *iova)
static inline void dma_free_iova(struct dma_iova_attrs *iova)
{
}
+static inline dma_addr_t dma_link_range(struct page *page, unsigned long offset,
+ struct dma_iova_attrs *iova,
+ dma_addr_t dma_offset)
+{
+ return DMA_MAPPING_ERROR;
+}
+static inline void dma_unlink_range(struct dma_iova_attrs *iova,
+ dma_addr_t dma_offset)
+{
+}
static inline dma_addr_t dma_map_page_attrs(struct device *dev,
struct page *page, size_t offset, size_t size,
enum dma_data_direction dir, unsigned long attrs)
diff --git a/kernel/dma/debug.h b/kernel/dma/debug.h
index f525197d3cae..3d529f355c6d 100644
--- a/kernel/dma/debug.h
+++ b/kernel/dma/debug.h
@@ -127,4 +127,6 @@ static inline void debug_dma_sync_sg_for_device(struct device *dev,
{
}
#endif /* CONFIG_DMA_API_DEBUG */
+#define debug_dma_link_range debug_dma_map_page
+#define debug_dma_unlink_range debug_dma_unmap_page
#endif /* _KERNEL_DMA_DEBUG_H */
diff --git a/kernel/dma/direct.h b/kernel/dma/direct.h
index 18d346118fe8..1c30e1cd607a 100644
--- a/kernel/dma/direct.h
+++ b/kernel/dma/direct.h
@@ -125,4 +125,7 @@ static inline void dma_direct_unmap_page(struct device *dev, dma_addr_t addr,
swiotlb_tbl_unmap_single(dev, phys, size, dir,
attrs | DMA_ATTR_SKIP_CPU_SYNC);
}
+
+#define dma_direct_link_range dma_direct_map_page
+#define dma_direct_unlink_range dma_direct_unmap_page
#endif /* _KERNEL_DMA_DIRECT_H */
diff --git a/kernel/dma/mapping.c b/kernel/dma/mapping.c
index 4d14637d186b..787d7516434f 100644
--- a/kernel/dma/mapping.c
+++ b/kernel/dma/mapping.c
@@ -213,6 +213,63 @@ void dma_free_iova(struct dma_iova_attrs *iova)
}
EXPORT_SYMBOL(dma_free_iova);
+/**
+ * dma_link_range - Link a physical page to DMA address
+ * @page: The page to be mapped
+ * @offset: The offset within the page
+ * @iova: Preallocated IOVA attributes
+ * @dma_offset: DMA offset form which this page needs to be linked
+ *
+ * dma_alloc_iova() allocates IOVA based on the size specified by ther user in
+ * iova->size. Call this function after IOVA allocation to link @page from
+ * @offset to get the DMA address. Note that very first call to this function
+ * will have @dma_offset set to 0 in the IOVA space allocated from
+ * dma_alloc_iova(). For subsequent calls to this function on same @iova,
+ * @dma_offset needs to be advanced by the caller with the size of previous
+ * page that was linked + DMA address returned for the previous page that was
+ * linked by this function.
+ */
+dma_addr_t dma_link_range(struct page *page, unsigned long offset,
+ struct dma_iova_attrs *iova, dma_addr_t dma_offset)
+{
+ struct device *dev = iova->dev;
+ size_t size = iova->size;
+ enum dma_data_direction dir = iova->dir;
+ unsigned long attrs = iova->attrs;
+ dma_addr_t addr = iova->addr + dma_offset;
+ const struct dma_map_ops *ops = get_dma_ops(dev);
+
+ if (dma_map_direct(dev, ops) ||
+ arch_dma_link_range_direct(dev, page_to_phys(page) + offset + size))
+ addr = dma_direct_link_range(dev, page, offset, size, dir, attrs);
+ else if (ops->link_range)
+ addr = ops->link_range(dev, page, offset, addr, size, dir, attrs);
+
+ kmsan_handle_dma(page, offset, size, dir);
+ debug_dma_link_range(dev, page, offset, size, dir, addr, attrs);
+ return addr;
+}
+EXPORT_SYMBOL(dma_link_range);
+
+void dma_unlink_range(struct dma_iova_attrs *iova, dma_addr_t dma_offset)
+{
+ struct device *dev = iova->dev;
+ size_t size = iova->size;
+ enum dma_data_direction dir = iova->dir;
+ unsigned long attrs = iova->attrs;
+ dma_addr_t addr = iova->addr + dma_offset;
+ const struct dma_map_ops *ops = get_dma_ops(dev);
+
+ if (dma_map_direct(dev, ops) ||
+ arch_dma_unlink_range_direct(dev, addr + size))
+ dma_direct_unlink_range(dev, addr, size, dir, attrs);
+ else if (ops->unlink_range)
+ ops->unlink_range(dev, addr, size, dir, attrs);
+
+ debug_dma_unlink_range(dev, addr, size, dir);
+}
+EXPORT_SYMBOL(dma_unlink_range);
+
static int __dma_map_sg_attrs(struct device *dev, struct scatterlist *sg,
int nents, enum dma_data_direction dir, unsigned long attrs)
{
--
2.26.3
next prev parent reply other threads:[~2024-06-12 2:15 UTC|newest]
Thread overview: 46+ messages / expand[flat|nested] mbox.gz Atom feed top
2024-06-12 2:25 [CI 01/43] mm/hmm: let users to tag specific PFNs Oak Zeng
2024-06-12 2:25 ` [CI 02/43] dma-mapping: provide an interface to allocate IOVA Oak Zeng
2024-06-12 2:25 ` Oak Zeng [this message]
2024-06-12 2:25 ` [CI 04/43] iommu/dma: Provide an interface to allow preallocate IOVA Oak Zeng
2024-06-12 2:25 ` [CI 05/43] iommu/dma: Prepare map/unmap page functions to receive IOVA Oak Zeng
2024-06-12 2:25 ` [CI 06/43] iommu/dma: Implement link/unlink page callbacks Oak Zeng
2024-06-12 2:25 ` [CI 07/43] drm: move xe_sg_segment_size to drm layer Oak Zeng
2024-06-12 2:25 ` [CI 08/43] drm: Move GPUVA_START/LAST to drm_gpuvm.h Oak Zeng
2024-06-12 2:25 ` [CI 09/43] drm/svm: Mark drm_gpuvm to participate SVM Oak Zeng
2024-06-12 2:25 ` [CI 10/43] drm/svm: introduce drm_mem_region concept Oak Zeng
2024-06-12 2:25 ` [CI 11/43] drm/svm: introduce hmmptr and helper functions Oak Zeng
2024-06-12 2:25 ` [CI 12/43] drm/svm: Introduce helper to remap drm memory region Oak Zeng
2024-06-12 2:25 ` [CI 13/43] drm/svm: handle CPU page fault Oak Zeng
2024-06-12 2:25 ` [CI 14/43] drm/svm: Migrate a range of hmmptr to vram Oak Zeng
2024-06-12 2:25 ` [CI 15/43] drm/svm: Add DRM SVM documentation Oak Zeng
2024-06-12 2:25 ` [CI 16/43] drm/xe: s/xe_tile_migrate_engine/xe_tile_migrate_exec_queue Oak Zeng
2024-06-12 2:25 ` [CI 17/43] drm/xe: Add xe_vm_pgtable_update_op to xe_vma_ops Oak Zeng
2024-06-12 2:25 ` [CI 18/43] drm/xe: Convert multiple bind ops into single job Oak Zeng
2024-06-12 2:25 ` [CI 19/43] drm/xe: Update VM trace events Oak Zeng
2024-06-12 2:25 ` [CI 20/43] drm/xe: Update PT layer with better error handling Oak Zeng
2024-06-12 2:25 ` [CI 21/43] drm/xe: Retry BO allocation Oak Zeng
2024-06-12 2:25 ` [CI 22/43] drm/xe/uapi: Add DRM_XE_VM_BIND_FLAG_SYSTEM_ALLOCATOR flag Oak Zeng
2024-06-12 2:25 ` [CI 23/43] drm/xe: Add a helper to calculate userptr end address Oak Zeng
2024-06-12 2:25 ` [CI 24/43] drm/xe: Add dma_addr res cursor Oak Zeng
2024-06-12 2:25 ` [CI 25/43] drm/xe: Use drm_mem_region for xe Oak Zeng
2024-06-12 2:25 ` [CI 26/43] drm/xe: use drm_hmmptr in xe Oak Zeng
2024-06-12 2:25 ` [CI 27/43] drm/xe: Moving to range based vma invalidation Oak Zeng
2024-06-12 2:25 ` [CI 28/43] drm/xe: Support range based page table update Oak Zeng
2024-06-12 2:25 ` [CI 29/43] drm/xe/uapi: Add DRM_XE_VM_CREATE_FLAG_PARTICIPATE_SVM flag Oak Zeng
2024-06-12 2:25 ` [CI 30/43] drm/xe/svm: Create userptr if page fault occurs on system_allocator VMA Oak Zeng
2024-06-12 2:25 ` [CI 31/43] drm/xe/svm: Add faulted userptr VMA garbage collector Oak Zeng
2024-06-12 2:25 ` [CI 32/43] drm/xe: Introduce helper to get tile from memory region Oak Zeng
2024-06-12 2:25 ` [CI 33/43] drm/xe/svm: implement functions to allocate and free device memory Oak Zeng
2024-06-12 2:25 ` [CI 34/43] drm/xe/svm: Get drm device from drm memory region Oak Zeng
2024-06-12 2:25 ` [CI 35/43] drm/xe/svm: Get page map owner of a " Oak Zeng
2024-06-12 2:25 ` [CI 36/43] drm/xe/svm: Add migrate layer functions for SVM support Oak Zeng
2024-06-12 2:25 ` [CI 37/43] drm/xe/svm: introduce svm migration function Oak Zeng
2024-06-12 2:26 ` [CI 38/43] drm/xe/svm: Register xe memory region to drm layer Oak Zeng
2024-06-12 2:26 ` [CI 39/43] drm/xe/svm: Introduce DRM_XE_SVM kernel config Oak Zeng
2024-06-12 2:26 ` [CI 40/43] drm/xe/svm: Migration from sram to vram for system allocator Oak Zeng
2024-06-12 2:26 ` [CI 41/43] drm/xe/svm: Determine a vma is backed by device memory Oak Zeng
2024-06-12 2:26 ` [CI 42/43] drm/xe/svm: Introduce hmm_pfn array based resource cursor Oak Zeng
2024-06-12 2:26 ` [CI 43/43] drm/xe: Enable system allocator uAPI Oak Zeng
2024-06-12 3:14 ` ✓ CI.Patch_applied: success for series starting with [CI,01/43] mm/hmm: let users to tag specific PFNs Patchwork
2024-06-12 3:15 ` ✗ CI.checkpatch: warning " Patchwork
2024-06-12 3:16 ` ✗ CI.KUnit: failure " Patchwork
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20240612022605.385062-3-oak.zeng@intel.com \
--to=oak.zeng@intel.com \
--cc=intel-xe@lists.freedesktop.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox