From: Andi Kleen <andi@firstfloor.org>
To: linux-kernel@vger.kernel.org, linux-mm@kvack.org
Subject: [PATCH] [11/13] Switch x86-64 dma_alloc_coherent over to the maskable allocator
Date: Fri, 7 Mar 2008 10:07:21 +0100 (CET) [thread overview]
Message-ID: <20080307090721.B2DA41B419C@basil.firstfloor.org> (raw)
In-Reply-To: <200803071007.493903088@firstfloor.org>
Signed-off-by: Andi Kleen <ak@suse.de>
---
arch/x86/kernel/pci-dma_64.c | 49 +++++++++++++------------------------------
1 file changed, 15 insertions(+), 34 deletions(-)
Index: linux/arch/x86/kernel/pci-dma_64.c
===================================================================
--- linux.orig/arch/x86/kernel/pci-dma_64.c
+++ linux/arch/x86/kernel/pci-dma_64.c
@@ -47,11 +47,16 @@ struct device fallback_dev = {
/* Allocate DMA memory on node near device */
noinline static void *
-dma_alloc_pages(struct device *dev, gfp_t gfp, unsigned order)
+dma_alloc_pages(struct device *dev, gfp_t gfp, unsigned size,
+ unsigned long dma_mask)
{
struct page *page;
int node;
+ /* For small masks use DMA allocator without node affinity */
+ if (dma_mask < DMA_32BIT_MASK)
+ return get_pages_mask(gfp, size, dma_mask);
+
node = dev_to_node(dev);
if (node == -1)
node = numa_node_id();
@@ -59,7 +64,8 @@ dma_alloc_pages(struct device *dev, gfp_
if (node < first_node(node_online_map))
node = first_node(node_online_map);
- page = alloc_pages_node(node, gfp, order);
+ page = alloc_pages_node(node, gfp, get_order(size));
+
return page ? page_address(page) : NULL;
}
@@ -91,15 +97,10 @@ dma_alloc_coherent(struct device *dev, s
uses the normal dma_mask for alloc_coherent. */
dma_mask &= *dev->dma_mask;
- /* Why <=? Even when the mask is smaller than 4GB it is often
- larger than 16MB and in this case we have a chance of
- finding fitting memory in the next higher zone first. If
- not retry with true GFP_DMA. -AK */
if (dma_mask <= DMA_32BIT_MASK)
gfp |= GFP_DMA32;
- again:
- memory = dma_alloc_pages(dev, gfp, get_order(size));
+ memory = dma_alloc_pages(dev, gfp, size, dma_mask);
if (memory == NULL)
return NULL;
@@ -108,25 +109,10 @@ dma_alloc_coherent(struct device *dev, s
bus = virt_to_bus(memory);
high = (bus + size) >= dma_mask;
mmu = high;
- if (force_iommu && !(gfp & GFP_DMA))
+ if (force_iommu)
mmu = 1;
else if (high) {
- free_pages((unsigned long)memory,
- get_order(size));
-
- /* Don't use the 16MB ZONE_DMA unless absolutely
- needed. It's better to use remapping first. */
- if (dma_mask < DMA_32BIT_MASK && !(gfp & GFP_DMA)) {
- gfp = (gfp & ~GFP_DMA32) | GFP_DMA;
- goto again;
- }
-
- /* Let low level make its own zone decisions */
- gfp &= ~(GFP_DMA32|GFP_DMA);
-
- if (dma_ops->alloc_coherent)
- return dma_ops->alloc_coherent(dev, size,
- dma_handle, gfp);
+ free_pages_mask(memory, size);
return NULL;
}
@@ -137,12 +123,6 @@ dma_alloc_coherent(struct device *dev, s
}
}
- if (dma_ops->alloc_coherent) {
- free_pages((unsigned long)memory, get_order(size));
- gfp &= ~(GFP_DMA|GFP_DMA32);
- return dma_ops->alloc_coherent(dev, size, dma_handle, gfp);
- }
-
if (dma_ops->map_simple) {
*dma_handle = dma_ops->map_simple(dev, memory,
size,
@@ -153,7 +133,7 @@ dma_alloc_coherent(struct device *dev, s
if (panic_on_overflow)
panic("dma_alloc_coherent: IOMMU overflow by %lu bytes\n",size);
- free_pages((unsigned long)memory, get_order(size));
+ free_pages_mask(memory, size);
return NULL;
}
EXPORT_SYMBOL(dma_alloc_coherent);
@@ -166,9 +146,10 @@ void dma_free_coherent(struct device *de
void *vaddr, dma_addr_t bus)
{
WARN_ON(irqs_disabled()); /* for portability */
+ /* RED-PEN swiotlb does unnecessary copy here */
if (dma_ops->unmap_single)
dma_ops->unmap_single(dev, bus, size, 0);
- free_pages((unsigned long)vaddr, get_order(size));
+ free_pages_mask(vaddr, size);
}
EXPORT_SYMBOL(dma_free_coherent);
@@ -191,7 +172,7 @@ int dma_supported(struct device *dev, u6
/* Copied from i386. Doesn't make much sense, because it will
only work for pci_alloc_coherent.
- The caller just has to use GFP_DMA in this case. */
+ The caller just has to use *_mask allocations in this case. */
if (mask < DMA_24BIT_MASK)
return 0;
--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org. For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>
next prev parent reply other threads:[~2008-03-07 9:07 UTC|newest]
Thread overview: 56+ messages / expand[flat|nested] mbox.gz Atom feed top
2008-03-07 9:07 [PATCH] [0/13] General DMA zone rework Andi Kleen
2008-03-07 9:07 ` [PATCH] [2/13] Make get_order(0) return 0 Andi Kleen
2008-03-07 9:07 ` [PATCH] [3/13] Make kvm bad_page symbol static Andi Kleen
2008-03-07 9:07 ` [PATCH] [4/13] Prepare page_alloc for the maskable allocator Andi Kleen
2008-03-07 18:19 ` Sam Ravnborg
2008-03-07 18:36 ` Cyrill Gorcunov
2008-03-07 19:02 ` Andi Kleen
2008-03-07 9:07 ` [PATCH] [5/13] Add mask allocator statistics to vmstat.[ch] Andi Kleen
2008-03-08 2:24 ` Christoph Lameter
2008-03-07 9:07 ` [PATCH] [6/13] Core maskable allocator Andi Kleen
2008-03-07 10:53 ` Johannes Weiner
2008-03-07 11:14 ` Andi Kleen
2008-03-07 17:05 ` Randy Dunlap
2008-03-07 17:31 ` Andi Kleen
2008-03-07 17:33 ` Randy Dunlap
2008-03-07 17:43 ` Andi Kleen
2008-03-07 17:51 ` Randy Dunlap
2008-03-07 21:13 ` Cyrill Gorcunov
2008-03-07 23:28 ` Andi Kleen
2008-03-08 5:03 ` KAMEZAWA Hiroyuki
2008-03-08 5:41 ` KAMEZAWA Hiroyuki
2008-03-08 11:41 ` Andi Kleen
2008-03-11 15:34 ` Jonathan Corbet
2008-03-11 15:54 ` Andi Kleen
2008-03-07 9:07 ` [PATCH] [7/13] Implement compat hooks for GFP_DMA Andi Kleen
2008-03-07 9:07 ` [PATCH] [8/13] Enable the mask allocator for x86 Andi Kleen
2008-03-07 18:32 ` Sam Ravnborg
2008-03-07 19:03 ` Andi Kleen
2008-03-07 19:09 ` Sam Ravnborg
2008-03-08 2:37 ` Christoph Lameter
2008-03-08 6:35 ` Yinghai Lu
2008-03-08 7:31 ` Christoph Lameter
2008-03-08 11:54 ` Andi Kleen
2008-03-10 17:13 ` Christoph Lameter
2008-03-07 9:07 ` [PATCH] [9/13] Remove set_dma_reserve Andi Kleen
2008-03-07 9:07 ` [PATCH] [10/13] Switch the 32bit dma_alloc_coherent functions over to use the maskable allocator Andi Kleen
2008-03-07 9:07 ` Andi Kleen [this message]
2008-03-07 9:07 ` [PATCH] [12/13] Add vmstat statistics for new swiotlb code Andi Kleen
2008-03-08 2:38 ` Christoph Lameter
2008-03-07 9:07 ` [PATCH] [13/13] Convert x86-64 swiotlb to use the mask allocator directly Andi Kleen
2008-03-07 15:18 ` [PATCH] [0/13] General DMA zone rework Rene Herman
2008-03-07 15:22 ` Rene Herman
2008-03-07 15:31 ` Andi Kleen
2008-03-07 15:34 ` Andi Kleen
2008-03-07 20:51 ` Luiz Fernando N. Capitulino
2008-03-08 0:46 ` Andi Kleen
2008-03-10 18:03 ` Luiz Fernando N. Capitulino
2008-03-10 18:08 ` Andi Kleen
2008-03-11 17:26 ` Luiz Fernando N. Capitulino
2008-03-11 17:35 ` Andi Kleen
2008-03-11 18:00 ` Luiz Fernando N. Capitulino
2008-03-11 18:49 ` Andi Kleen
2008-03-11 19:36 ` Luiz Fernando N. Capitulino
2008-03-08 2:42 ` Christoph Lameter
2008-03-08 11:57 ` Andi Kleen
2008-03-10 17:14 ` Christoph Lameter
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20080307090721.B2DA41B419C@basil.firstfloor.org \
--to=andi@firstfloor.org \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-mm@kvack.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).