stable.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
From: Peter Gonda <pgonda@google.com>
To: stable@vger.kernel.org
Cc: Peter Gonda <pgonda@google.com>, Christoph Hellwig <hch@lst.de>,
	Max Filippov <jcmvbkbc@gmail.com>
Subject: [PATCH 04/30 for 5.4] dma-mapping: merge the generic remapping helpers into dma-direct
Date: Fri, 25 Sep 2020 09:18:50 -0700	[thread overview]
Message-ID: <20200925161916.204667-5-pgonda@google.com> (raw)
In-Reply-To: <20200925161916.204667-1-pgonda@google.com>

From: Christoph Hellwig <hch@lst.de>

upstream 3acac065508f6cc60ac9d3e4b7c6cc37fd91d531 commit.

Integrate the generic dma remapping implementation into the main flow.
This prepares for architectures like xtensa that use an uncached
segment for pages in the kernel mapping, but can also remap highmem
from CMA.  To simplify that implementation we now always deduct the
page from the physical address via the DMA address instead of the
virtual address.

Signed-off-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Max Filippov <jcmvbkbc@gmail.com>
Signed-off-by: Peter Gonda <pgonda@google.com>
---
 kernel/dma/direct.c | 60 ++++++++++++++++++++++++++++++++++++---------
 kernel/dma/remap.c  | 49 ------------------------------------
 2 files changed, 48 insertions(+), 61 deletions(-)

diff --git a/kernel/dma/direct.c b/kernel/dma/direct.c
index 76c722bc9e0c..d30c5468a91a 100644
--- a/kernel/dma/direct.c
+++ b/kernel/dma/direct.c
@@ -12,6 +12,7 @@
 #include <linux/dma-contiguous.h>
 #include <linux/dma-noncoherent.h>
 #include <linux/pfn.h>
+#include <linux/vmalloc.h>
 #include <linux/set_memory.h>
 #include <linux/swiotlb.h>
 
@@ -138,6 +139,15 @@ void *dma_direct_alloc_pages(struct device *dev, size_t size,
 	struct page *page;
 	void *ret;
 
+	if (IS_ENABLED(CONFIG_DMA_DIRECT_REMAP) &&
+	    dma_alloc_need_uncached(dev, attrs) &&
+	    !gfpflags_allow_blocking(gfp)) {
+		ret = dma_alloc_from_pool(PAGE_ALIGN(size), &page, gfp);
+		if (!ret)
+			return NULL;
+		goto done;
+	}
+
 	page = __dma_direct_alloc_pages(dev, size, gfp, attrs);
 	if (!page)
 		return NULL;
@@ -147,9 +157,28 @@ void *dma_direct_alloc_pages(struct device *dev, size_t size,
 		/* remove any dirty cache lines on the kernel alias */
 		if (!PageHighMem(page))
 			arch_dma_prep_coherent(page, size);
-		*dma_handle = phys_to_dma(dev, page_to_phys(page));
 		/* return the page pointer as the opaque cookie */
-		return page;
+		ret = page;
+		goto done;
+	}
+
+	if ((IS_ENABLED(CONFIG_DMA_DIRECT_REMAP) &&
+	     dma_alloc_need_uncached(dev, attrs)) ||
+	    (IS_ENABLED(CONFIG_DMA_REMAP) && PageHighMem(page))) {
+		/* remove any dirty cache lines on the kernel alias */
+		arch_dma_prep_coherent(page, PAGE_ALIGN(size));
+
+		/* create a coherent mapping */
+		ret = dma_common_contiguous_remap(page, PAGE_ALIGN(size),
+				dma_pgprot(dev, PAGE_KERNEL, attrs),
+				__builtin_return_address(0));
+		if (!ret) {
+			dma_free_contiguous(dev, page, size);
+			return ret;
+		}
+
+		memset(ret, 0, size);
+		goto done;
 	}
 
 	if (PageHighMem(page)) {
@@ -165,12 +194,9 @@ void *dma_direct_alloc_pages(struct device *dev, size_t size,
 	}
 
 	ret = page_address(page);
-	if (force_dma_unencrypted(dev)) {
+	if (force_dma_unencrypted(dev))
 		set_memory_decrypted((unsigned long)ret, 1 << get_order(size));
-		*dma_handle = __phys_to_dma(dev, page_to_phys(page));
-	} else {
-		*dma_handle = phys_to_dma(dev, page_to_phys(page));
-	}
+
 	memset(ret, 0, size);
 
 	if (IS_ENABLED(CONFIG_ARCH_HAS_UNCACHED_SEGMENT) &&
@@ -178,7 +204,11 @@ void *dma_direct_alloc_pages(struct device *dev, size_t size,
 		arch_dma_prep_coherent(page, size);
 		ret = uncached_kernel_address(ret);
 	}
-
+done:
+	if (force_dma_unencrypted(dev))
+		*dma_handle = __phys_to_dma(dev, page_to_phys(page));
+	else
+		*dma_handle = phys_to_dma(dev, page_to_phys(page));
 	return ret;
 }
 
@@ -194,19 +224,24 @@ void dma_direct_free_pages(struct device *dev, size_t size, void *cpu_addr,
 		return;
 	}
 
+	if (IS_ENABLED(CONFIG_DMA_DIRECT_REMAP) &&
+	    dma_free_from_pool(cpu_addr, PAGE_ALIGN(size)))
+		return;
+
 	if (force_dma_unencrypted(dev))
 		set_memory_encrypted((unsigned long)cpu_addr, 1 << page_order);
 
-	if (IS_ENABLED(CONFIG_ARCH_HAS_UNCACHED_SEGMENT) &&
-	    dma_alloc_need_uncached(dev, attrs))
-		cpu_addr = cached_kernel_address(cpu_addr);
-	dma_free_contiguous(dev, virt_to_page(cpu_addr), size);
+	if (IS_ENABLED(CONFIG_DMA_REMAP) && is_vmalloc_addr(cpu_addr))
+		vunmap(cpu_addr);
+
+	dma_free_contiguous(dev, dma_direct_to_page(dev, dma_addr), size);
 }
 
 void *dma_direct_alloc(struct device *dev, size_t size,
 		dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs)
 {
 	if (!IS_ENABLED(CONFIG_ARCH_HAS_UNCACHED_SEGMENT) &&
+	    !IS_ENABLED(CONFIG_DMA_DIRECT_REMAP) &&
 	    dma_alloc_need_uncached(dev, attrs))
 		return arch_dma_alloc(dev, size, dma_handle, gfp, attrs);
 	return dma_direct_alloc_pages(dev, size, dma_handle, gfp, attrs);
@@ -216,6 +251,7 @@ void dma_direct_free(struct device *dev, size_t size,
 		void *cpu_addr, dma_addr_t dma_addr, unsigned long attrs)
 {
 	if (!IS_ENABLED(CONFIG_ARCH_HAS_UNCACHED_SEGMENT) &&
+	    !IS_ENABLED(CONFIG_DMA_DIRECT_REMAP) &&
 	    dma_alloc_need_uncached(dev, attrs))
 		arch_dma_free(dev, size, cpu_addr, dma_addr, attrs);
 	else
diff --git a/kernel/dma/remap.c b/kernel/dma/remap.c
index 3c49499ee6b0..d47bd40fc0f5 100644
--- a/kernel/dma/remap.c
+++ b/kernel/dma/remap.c
@@ -210,53 +210,4 @@ bool dma_free_from_pool(void *start, size_t size)
 	gen_pool_free(atomic_pool, (unsigned long)start, size);
 	return true;
 }
-
-void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle,
-		gfp_t flags, unsigned long attrs)
-{
-	struct page *page = NULL;
-	void *ret;
-
-	size = PAGE_ALIGN(size);
-
-	if (!gfpflags_allow_blocking(flags)) {
-		ret = dma_alloc_from_pool(size, &page, flags);
-		if (!ret)
-			return NULL;
-		goto done;
-	}
-
-	page = __dma_direct_alloc_pages(dev, size, flags, attrs);
-	if (!page)
-		return NULL;
-
-	/* remove any dirty cache lines on the kernel alias */
-	arch_dma_prep_coherent(page, size);
-
-	/* create a coherent mapping */
-	ret = dma_common_contiguous_remap(page, size,
-			dma_pgprot(dev, PAGE_KERNEL, attrs),
-			__builtin_return_address(0));
-	if (!ret) {
-		dma_free_contiguous(dev, page, size);
-		return ret;
-	}
-
-	memset(ret, 0, size);
-done:
-	*dma_handle = phys_to_dma(dev, page_to_phys(page));
-	return ret;
-}
-
-void arch_dma_free(struct device *dev, size_t size, void *vaddr,
-		dma_addr_t dma_handle, unsigned long attrs)
-{
-	if (!dma_free_from_pool(vaddr, PAGE_ALIGN(size))) {
-		phys_addr_t phys = dma_to_phys(dev, dma_handle);
-		struct page *page = pfn_to_page(__phys_to_pfn(phys));
-
-		vunmap(vaddr);
-		dma_free_contiguous(dev, page, size);
-	}
-}
 #endif /* CONFIG_DMA_DIRECT_REMAP */
-- 
2.28.0.618.gf4bc123cb7-goog


  parent reply	other threads:[~2020-09-25 16:20 UTC|newest]

Thread overview: 39+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2020-09-25 16:18 [PATCH 00/30 for 5.4] Backport unencrypted non-blocking DMA allocations Peter Gonda
2020-09-25 16:18 ` [PATCH 01/30 for 5.4] dma-direct: remove __dma_direct_free_pages Peter Gonda
2020-09-25 16:18 ` [PATCH 02/30 for 5.4] dma-direct: remove the dma_handle argument to __dma_direct_alloc_pages Peter Gonda
2020-09-25 16:18 ` [PATCH 03/30 for 5.4] dma-direct: provide mmap and get_sgtable method overrides Peter Gonda
2020-09-25 16:18 ` Peter Gonda [this message]
2020-09-25 16:18 ` [PATCH 05/30 for 5.4] lib/genalloc.c: rename addr_in_gen_pool to gen_pool_has_addr Peter Gonda
2020-09-25 16:18 ` [PATCH 06/30 for 5.4] dma-remap: separate DMA atomic pools from direct remap code Peter Gonda
2020-09-25 16:18 ` [PATCH 07/30 for 5.4] dma-pool: add additional coherent pools to map to gfp mask Peter Gonda
2020-09-25 16:18 ` [PATCH 08/30 for 5.4] dma-pool: dynamically expanding atomic pools Peter Gonda
2020-09-25 16:18 ` [PATCH 09/30 for 5.4] dma-direct: atomic allocations must come from atomic coherent pools Peter Gonda
2020-09-25 16:18 ` [PATCH 10/30 for 5.4] dma-pool: add pool sizes to debugfs Peter Gonda
2020-09-25 16:18 ` [PATCH 11/30 for 5.4] x86/mm: unencrypted non-blocking DMA allocations use coherent pools Peter Gonda
2020-09-25 16:18 ` [PATCH 12/30 for 5.4] dma-pool: scale the default DMA coherent pool size with memory capacity Peter Gonda
2020-09-25 16:18 ` [PATCH 13/30 for 5.4] dma-pool: fix too large DMA pools on medium memory size systems Peter Gonda
2020-09-25 16:19 ` [PATCH 14/30 for 5.4] dma-pool: decouple DMA_REMAP from DMA_COHERENT_POOL Peter Gonda
2020-09-25 16:19 ` [PATCH 15/30 for 5.4] dma-direct: consolidate the error handling in dma_direct_alloc_pages Peter Gonda
2020-09-25 16:19 ` [PATCH 16/30 for 5.4] xtensa: use the generic uncached segment support Peter Gonda
2020-09-25 16:19 ` [PATCH 17/30 for 5.4] dma-direct: make uncached_kernel_address more general Peter Gonda
2020-09-25 16:19 ` [PATCH 18/30 for 5.4] dma-direct: always align allocation size in dma_direct_alloc_pages() Peter Gonda
2020-09-25 16:19 ` [PATCH 19/30 for 5.4] dma-direct: re-encrypt memory if dma_direct_alloc_pages() fails Peter Gonda
2020-09-25 16:19 ` [PATCH 20/30 for 5.4] dma-direct: check return value when encrypting or decrypting memory Peter Gonda
2020-09-25 16:19 ` [PATCH 21/30 for 5.4] dma-direct: add missing set_memory_decrypted() for coherent mapping Peter Gonda
2020-09-25 16:19 ` [PATCH 22/30 for 5.4] dma-mapping: DMA_COHERENT_POOL should select GENERIC_ALLOCATOR Peter Gonda
2020-09-25 16:19 ` [PATCH 23/30 for 5.4] dma-mapping: warn when coherent pool is depleted Peter Gonda
2020-09-25 16:19 ` [PATCH 24/30 for 5.4] dma-direct: provide function to check physical memory area validity Peter Gonda
2020-09-25 16:19 ` [PATCH 25/30 for 5.4] dma-pool: get rid of dma_in_atomic_pool() Peter Gonda
2020-09-25 16:19 ` [PATCH 26/30 for 5.4] dma-pool: introduce dma_guess_pool() Peter Gonda
2020-09-25 16:19 ` [PATCH 27/30 for 5.4] dma-pool: make sure atomic pool suits device Peter Gonda
2020-09-25 16:19 ` [PATCH 28/30 for 5.4] dma-pool: do not allocate pool memory from CMA Peter Gonda
2020-09-25 16:19 ` [PATCH 29/30 for 5.4] dma-pool: fix coherent pool allocations for IOMMU mappings Peter Gonda
2020-09-25 16:19 ` [PATCH 30/30 for 5.4] dma/direct: turn ARCH_ZONE_DMA_BITS into a variable Peter Gonda
2020-10-05 13:07 ` [PATCH 00/30 for 5.4] Backport unencrypted non-blocking DMA allocations Greg KH
2020-10-06 15:26   ` Peter Gonda
2020-10-06 18:10     ` David Rientjes
2021-01-04 13:00       ` Greg KH
2021-01-04 22:37         ` David Rientjes
2021-01-05  5:58           ` Greg KH
2021-01-05  6:38             ` David Rientjes
2021-05-12 19:06               ` Peter Gonda

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20200925161916.204667-5-pgonda@google.com \
    --to=pgonda@google.com \
    --cc=hch@lst.de \
    --cc=jcmvbkbc@gmail.com \
    --cc=stable@vger.kernel.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).