From: jglisse@redhat.com
To: linux-mm@kvack.org
Cc: linux-kernel@vger.kernel.org,
"Jérôme Glisse" <jglisse@redhat.com>,
"Logan Gunthorpe" <logang@deltatee.com>,
"Greg Kroah-Hartman" <gregkh@linuxfoundation.org>,
"Rafael J . Wysocki" <rafael@kernel.org>,
"Bjorn Helgaas" <bhelgaas@google.com>,
"Christian Koenig" <christian.koenig@amd.com>,
"Felix Kuehling" <Felix.Kuehling@amd.com>,
"Jason Gunthorpe" <jgg@mellanox.com>,
linux-pci@vger.kernel.org, dri-devel@lists.freedesktop.org,
"Christoph Hellwig" <hch@lst.de>,
"Marek Szyprowski" <m.szyprowski@samsung.com>,
"Robin Murphy" <robin.murphy@arm.com>,
"Joerg Roedel" <jroedel@suse.de>,
iommu@lists.linux-foundation.org
Subject: [RFC PATCH 4/5] mm/hmm: add support for peer to peer to HMM device memory
Date: Tue, 29 Jan 2019 12:47:27 -0500 [thread overview]
Message-ID: <20190129174728.6430-5-jglisse@redhat.com> (raw)
In-Reply-To: <20190129174728.6430-1-jglisse@redhat.com>
From: Jérôme Glisse <jglisse@redhat.com>
Signed-off-by: Jérôme Glisse <jglisse@redhat.com>
Cc: Logan Gunthorpe <logang@deltatee.com>
Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Cc: Rafael J. Wysocki <rafael@kernel.org>
Cc: Bjorn Helgaas <bhelgaas@google.com>
Cc: Christian Koenig <christian.koenig@amd.com>
Cc: Felix Kuehling <Felix.Kuehling@amd.com>
Cc: Jason Gunthorpe <jgg@mellanox.com>
Cc: linux-pci@vger.kernel.org
Cc: dri-devel@lists.freedesktop.org
Cc: Christoph Hellwig <hch@lst.de>
Cc: Marek Szyprowski <m.szyprowski@samsung.com>
Cc: Robin Murphy <robin.murphy@arm.com>
Cc: Joerg Roedel <jroedel@suse.de>
Cc: iommu@lists.linux-foundation.org
---
include/linux/hmm.h | 47 +++++++++++++++++++++++++++++++++
mm/hmm.c | 63 +++++++++++++++++++++++++++++++++++++++++----
2 files changed, 105 insertions(+), 5 deletions(-)
diff --git a/include/linux/hmm.h b/include/linux/hmm.h
index 4a1454e3efba..7a3ac182cc48 100644
--- a/include/linux/hmm.h
+++ b/include/linux/hmm.h
@@ -710,6 +710,53 @@ struct hmm_devmem_ops {
const struct page *page,
unsigned int flags,
pmd_t *pmdp);
+
+ /*
+ * p2p_map() - map page for peer to peer between device
+ * @devmem: device memory structure (see struct hmm_devmem)
+ * @range: range of virtual address that is being mapped
+ * @device: device the range is being map to
+ * @addr: first virtual address in the range to consider
+ * @pa: device address (where actual mapping is store)
+ * Returns: number of page successfuly mapped, 0 otherwise
+ *
+ * Map page belonging to devmem to another device for peer to peer
+ * access. Device can decide not to map in which case memory will
+ * be migrated to main memory.
+ *
+ * Also there is no garantee that all the pages in the range does
+ * belongs to the devmem so it is up to the function to check that
+ * every single page does belong to devmem.
+ *
+ * Note for now we do not care about error exect error, so on failure
+ * function should just return 0.
+ */
+ long (*p2p_map)(struct hmm_devmem *devmem,
+ struct hmm_range *range,
+ struct device *device,
+ unsigned long addr,
+ dma_addr_t *pas);
+
+ /*
+ * p2p_unmap() - unmap page from peer to peer between device
+ * @devmem: device memory structure (see struct hmm_devmem)
+ * @range: range of virtual address that is being mapped
+ * @device: device the range is being map to
+ * @addr: first virtual address in the range to consider
+ * @pa: device address (where actual mapping is store)
+ * Returns: number of page successfuly unmapped, 0 otherwise
+ *
+ * Unmap page belonging to devmem previously map with p2p_map().
+ *
+ * Note there is no garantee that all the pages in the range does
+ * belongs to the devmem so it is up to the function to check that
+ * every single page does belong to devmem.
+ */
+ unsigned long (*p2p_unmap)(struct hmm_devmem *devmem,
+ struct hmm_range *range,
+ struct device *device,
+ unsigned long addr,
+ dma_addr_t *pas);
};
/*
diff --git a/mm/hmm.c b/mm/hmm.c
index 1a444885404e..fd49b1e116d0 100644
--- a/mm/hmm.c
+++ b/mm/hmm.c
@@ -1193,16 +1193,19 @@ long hmm_range_dma_map(struct hmm_range *range,
dma_addr_t *daddrs,
bool block)
{
- unsigned long i, npages, mapped, page_size;
+ unsigned long i, npages, mapped, page_size, addr;
long ret;
+again:
ret = hmm_range_fault(range, block);
if (ret <= 0)
return ret ? ret : -EBUSY;
+ mapped = 0;
+ addr = range->start;
page_size = hmm_range_page_size(range);
npages = (range->end - range->start) >> range->page_shift;
- for (i = 0, mapped = 0; i < npages; ++i) {
+ for (i = 0; i < npages; ++i, addr += page_size) {
enum dma_data_direction dir = DMA_FROM_DEVICE;
struct page *page;
@@ -1226,6 +1229,29 @@ long hmm_range_dma_map(struct hmm_range *range,
goto unmap;
}
+ if (is_device_private_page(page)) {
+ struct hmm_devmem *devmem = page->pgmap->data;
+
+ if (!devmem->ops->p2p_map || !devmem->ops->p2p_unmap) {
+ /* Fall-back to main memory. */
+ range->default_flags |=
+ range->flags[HMM_PFN_DEVICE_PRIVATE];
+ goto again;
+ }
+
+ ret = devmem->ops->p2p_map(devmem, range, device,
+ addr, daddrs);
+ if (ret <= 0) {
+ /* Fall-back to main memory. */
+ range->default_flags |=
+ range->flags[HMM_PFN_DEVICE_PRIVATE];
+ goto again;
+ }
+ mapped += ret;
+ i += ret;
+ continue;
+ }
+
/* If it is read and write than map bi-directional. */
if (range->pfns[i] & range->values[HMM_PFN_WRITE])
dir = DMA_BIDIRECTIONAL;
@@ -1242,7 +1268,9 @@ long hmm_range_dma_map(struct hmm_range *range,
return mapped;
unmap:
- for (npages = i, i = 0; (i < npages) && mapped; ++i) {
+ npages = i;
+ addr = range->start;
+ for (i = 0; (i < npages) && mapped; ++i, addr += page_size) {
enum dma_data_direction dir = DMA_FROM_DEVICE;
struct page *page;
@@ -1253,6 +1281,18 @@ long hmm_range_dma_map(struct hmm_range *range,
if (dma_mapping_error(device, daddrs[i]))
continue;
+ if (is_device_private_page(page)) {
+ struct hmm_devmem *devmem = page->pgmap->data;
+ unsigned long inc;
+
+ inc = devmem->ops->p2p_unmap(devmem, range, device,
+ addr, &daddrs[i]);
+ BUG_ON(inc > npages);
+ mapped += inc;
+ i += inc;
+ continue;
+ }
+
/* If it is read and write than map bi-directional. */
if (range->pfns[i] & range->values[HMM_PFN_WRITE])
dir = DMA_BIDIRECTIONAL;
@@ -1285,7 +1325,7 @@ long hmm_range_dma_unmap(struct hmm_range *range,
dma_addr_t *daddrs,
bool dirty)
{
- unsigned long i, npages, page_size;
+ unsigned long i, npages, page_size, addr;
long cpages = 0;
/* Sanity check. */
@@ -1298,7 +1338,7 @@ long hmm_range_dma_unmap(struct hmm_range *range,
page_size = hmm_range_page_size(range);
npages = (range->end - range->start) >> range->page_shift;
- for (i = 0; i < npages; ++i) {
+ for (i = 0, addr = range->start; i < npages; ++i, addr += page_size) {
enum dma_data_direction dir = DMA_FROM_DEVICE;
struct page *page;
@@ -1318,6 +1358,19 @@ long hmm_range_dma_unmap(struct hmm_range *range,
set_page_dirty(page);
}
+ if (is_device_private_page(page)) {
+ struct hmm_devmem *devmem = page->pgmap->data;
+ unsigned long ret;
+
+ BUG_ON(!devmem->ops->p2p_unmap);
+
+ ret = devmem->ops->p2p_unmap(devmem, range, device,
+ addr, &daddrs[i]);
+ BUG_ON(ret > npages);
+ i += ret;
+ continue;
+ }
+
/* Unmap and clear pfns/dma address */
dma_unmap_page(device, daddrs[i], page_size, dir);
range->pfns[i] = range->values[HMM_PFN_NONE];
--
2.17.2
next prev parent reply other threads:[~2019-01-29 17:47 UTC|newest]
Thread overview: 95+ messages / expand[flat|nested] mbox.gz Atom feed top
2019-01-29 17:47 [RFC PATCH 0/5] Device peer to peer (p2p) through vma jglisse
2019-01-29 17:47 ` [RFC PATCH 1/5] pci/p2p: add a function to test peer to peer capability jglisse
2019-01-29 18:24 ` Logan Gunthorpe
2019-01-29 19:44 ` Greg Kroah-Hartman
2019-01-29 19:53 ` Jerome Glisse
2019-01-29 20:44 ` Logan Gunthorpe
2019-01-29 21:00 ` Jerome Glisse
2019-01-29 19:56 ` Alex Deucher
2019-01-29 20:00 ` Jerome Glisse
2019-01-29 20:24 ` Logan Gunthorpe
2019-01-29 21:28 ` Alex Deucher
2019-01-30 10:25 ` Christian König
2019-01-29 17:47 ` [RFC PATCH 2/5] drivers/base: " jglisse
2019-01-29 18:26 ` Logan Gunthorpe
2019-01-29 19:54 ` Jerome Glisse
2019-01-29 19:46 ` Greg Kroah-Hartman
2019-01-29 19:56 ` Jerome Glisse
2019-01-29 17:47 ` [RFC PATCH 3/5] mm/vma: add support for peer to peer to device vma jglisse
2019-01-29 18:36 ` Logan Gunthorpe
2019-01-29 19:11 ` Jerome Glisse
2019-01-29 19:24 ` Logan Gunthorpe
2019-01-29 19:44 ` Jerome Glisse
2019-01-29 20:43 ` Logan Gunthorpe
2019-01-30 7:52 ` Christoph Hellwig
2019-01-29 19:32 ` Jason Gunthorpe
2019-01-29 19:50 ` Jerome Glisse
2019-01-29 20:24 ` Jason Gunthorpe
2019-01-29 20:44 ` Jerome Glisse
2019-01-29 23:02 ` Jason Gunthorpe
2019-01-30 0:08 ` Jerome Glisse
2019-01-30 4:30 ` Jason Gunthorpe
2019-01-30 15:43 ` Jerome Glisse
2019-01-29 20:39 ` Logan Gunthorpe
2019-01-29 20:57 ` Jerome Glisse
2019-01-29 21:30 ` Logan Gunthorpe
2019-01-29 21:50 ` Jerome Glisse
2019-01-29 22:58 ` Logan Gunthorpe
2019-01-29 23:47 ` Jerome Glisse
2019-01-30 1:17 ` Logan Gunthorpe
2019-01-30 2:48 ` Jerome Glisse
2019-01-30 4:18 ` Jason Gunthorpe
2019-01-30 8:00 ` Christoph Hellwig
2019-01-30 15:49 ` Jerome Glisse
2019-01-30 19:06 ` Jason Gunthorpe
2019-01-30 19:45 ` Logan Gunthorpe
2019-01-30 19:59 ` Jason Gunthorpe
2019-01-30 21:01 ` Logan Gunthorpe
2019-01-30 21:50 ` Jason Gunthorpe
2019-01-30 22:52 ` Logan Gunthorpe
2019-01-30 23:30 ` Jason Gunthorpe
2019-01-31 8:13 ` Christoph Hellwig
2019-01-31 15:37 ` Jerome Glisse
2019-01-31 19:02 ` Jason Gunthorpe
2019-01-31 19:19 ` Logan Gunthorpe
2019-01-31 19:54 ` Jason Gunthorpe
2019-01-31 19:35 ` Jerome Glisse
2019-01-31 19:44 ` Logan Gunthorpe
2019-01-31 19:58 ` Jason Gunthorpe
2019-01-30 17:17 ` Logan Gunthorpe
2019-01-30 18:56 ` Jason Gunthorpe
2019-01-30 19:22 ` Jerome Glisse
2019-01-30 19:38 ` Jason Gunthorpe
2019-01-30 20:00 ` Logan Gunthorpe
2019-01-30 20:11 ` Jason Gunthorpe
2019-01-30 20:43 ` Jerome Glisse
2019-01-30 20:50 ` Jason Gunthorpe
2019-01-30 21:45 ` Jerome Glisse
2019-01-30 21:56 ` Jason Gunthorpe
2019-01-30 22:30 ` Jerome Glisse
2019-01-30 22:33 ` Jason Gunthorpe
2019-01-30 22:47 ` Jerome Glisse
2019-01-30 22:51 ` Jason Gunthorpe
2019-01-30 22:58 ` Jerome Glisse
2019-01-30 19:52 ` Logan Gunthorpe
2019-01-30 20:35 ` Jerome Glisse
2019-01-29 20:58 ` Jason Gunthorpe
2019-01-30 8:02 ` Christoph Hellwig
2019-01-30 10:33 ` Koenig, Christian
2019-01-30 15:55 ` Jerome Glisse
2019-01-30 17:26 ` Christoph Hellwig
2019-01-30 17:32 ` Logan Gunthorpe
2019-01-30 17:39 ` Jason Gunthorpe
2019-01-30 18:05 ` Jerome Glisse
2019-01-30 17:44 ` Jason Gunthorpe
2019-01-30 18:13 ` Logan Gunthorpe
2019-01-30 18:50 ` Jerome Glisse
2019-01-31 8:02 ` Christoph Hellwig
2019-01-31 15:03 ` Jerome Glisse
2019-01-30 19:19 ` Jason Gunthorpe
2019-01-30 19:48 ` Logan Gunthorpe
2019-01-30 20:44 ` Jason Gunthorpe
2019-01-31 8:05 ` Christoph Hellwig
2019-01-31 15:11 ` Jerome Glisse
2019-01-29 17:47 ` jglisse [this message]
2019-01-29 17:47 ` [RFC PATCH 5/5] mm/hmm: add support for peer to peer to special " jglisse
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20190129174728.6430-5-jglisse@redhat.com \
--to=jglisse@redhat.com \
--cc=Felix.Kuehling@amd.com \
--cc=bhelgaas@google.com \
--cc=christian.koenig@amd.com \
--cc=dri-devel@lists.freedesktop.org \
--cc=gregkh@linuxfoundation.org \
--cc=hch@lst.de \
--cc=iommu@lists.linux-foundation.org \
--cc=jgg@mellanox.com \
--cc=jroedel@suse.de \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-mm@kvack.org \
--cc=linux-pci@vger.kernel.org \
--cc=logang@deltatee.com \
--cc=m.szyprowski@samsung.com \
--cc=rafael@kernel.org \
--cc=robin.murphy@arm.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).