From: Christoph Hellwig <hch@lst.de> To: iommu@lists.linux-foundation.org Cc: linux-arch@vger.kernel.org, linux-mips@linux-mips.org, "Michal Simek" <monstr@monstr.eu>, linux-ia64@vger.kernel.org, "Christian König" <ckoenig.leichtzumerken@gmail.com>, x86@kernel.org, linux-kernel@vger.kernel.org, "Konrad Rzeszutek Wilk" <konrad@darnok.org>, "Guan Xuetao" <gxt@mprc.pku.edu.cn>, linuxppc-dev@lists.ozlabs.org, linux-arm-kernel@lists.infradead.org Subject: [PATCH 10/22] swiotlb: refactor coherent buffer allocation Date: Wed, 10 Jan 2018 09:09:20 +0100 [thread overview] Message-ID: <20180110080932.14157-11-hch@lst.de> (raw) In-Reply-To: <20180110080932.14157-1-hch@lst.de> Factor out a new swiotlb_alloc_buffer helper that allocates DMA coherent memory from the swiotlb bounce buffer. This allows to simplify the swiotlb_alloc implemenation that uses dma_direct_alloc to try to allocate a reachable buffer first. Signed-off-by: Christoph Hellwig <hch@lst.de> --- lib/swiotlb.c | 122 +++++++++++++++++++++++++++++++--------------------------- 1 file changed, 65 insertions(+), 57 deletions(-) diff --git a/lib/swiotlb.c b/lib/swiotlb.c index 1a147f1354a1..bf2d19ee91c1 100644 --- a/lib/swiotlb.c +++ b/lib/swiotlb.c @@ -709,75 +709,79 @@ void swiotlb_tbl_sync_single(struct device *hwdev, phys_addr_t tlb_addr, } EXPORT_SYMBOL_GPL(swiotlb_tbl_sync_single); -void * -swiotlb_alloc_coherent(struct device *hwdev, size_t size, - dma_addr_t *dma_handle, gfp_t flags) +static inline bool dma_coherent_ok(struct device *dev, dma_addr_t addr, + size_t size) { - bool warn = !(flags & __GFP_NOWARN); - dma_addr_t dev_addr; - void *ret; - int order = get_order(size); - u64 dma_mask = DMA_BIT_MASK(32); + u64 mask = DMA_BIT_MASK(32); - if (hwdev && hwdev->coherent_dma_mask) - dma_mask = hwdev->coherent_dma_mask; + if (dev && dev->coherent_dma_mask) + mask = dev->coherent_dma_mask; + return addr + size - 1 <= mask; +} - ret = (void *)__get_free_pages(flags, order); - if (ret) { - dev_addr = swiotlb_virt_to_bus(hwdev, ret); - if (dev_addr + size - 1 > dma_mask) { - /* - * The allocated memory isn't reachable by the device. - */ - free_pages((unsigned long) ret, order); - ret = NULL; - } - } - if (!ret) { - /* - * We are either out of memory or the device can't DMA to - * GFP_DMA memory; fall back on map_single(), which - * will grab memory from the lowest available address range. - */ - phys_addr_t paddr = map_single(hwdev, 0, size, DMA_FROM_DEVICE, - warn ? 0 : DMA_ATTR_NO_WARN); - if (paddr == SWIOTLB_MAP_ERROR) - goto err_warn; +static void * +swiotlb_alloc_buffer(struct device *dev, size_t size, dma_addr_t *dma_handle, + unsigned long attrs) +{ + phys_addr_t phys_addr; + + if (swiotlb_force == SWIOTLB_NO_FORCE) + goto out_warn; - ret = phys_to_virt(paddr); - dev_addr = swiotlb_phys_to_dma(hwdev, paddr); + phys_addr = swiotlb_tbl_map_single(dev, + swiotlb_phys_to_dma(dev, io_tlb_start), + 0, size, DMA_FROM_DEVICE, 0); + if (phys_addr == SWIOTLB_MAP_ERROR) + goto out_warn; - /* Confirm address can be DMA'd by device */ - if (dev_addr + size - 1 > dma_mask) { - printk("hwdev DMA mask = 0x%016Lx, dev_addr = 0x%016Lx\n", - (unsigned long long)dma_mask, - (unsigned long long)dev_addr); + *dma_handle = swiotlb_phys_to_dma(dev, phys_addr); - /* - * DMA_TO_DEVICE to avoid memcpy in unmap_single. - * The DMA_ATTR_SKIP_CPU_SYNC is optional. - */ - swiotlb_tbl_unmap_single(hwdev, paddr, - size, DMA_TO_DEVICE, - DMA_ATTR_SKIP_CPU_SYNC); - goto err_warn; - } - } + if (dma_coherent_ok(dev, *dma_handle, size)) + goto out_unmap; - *dma_handle = dev_addr; - memset(ret, 0, size); + memset(phys_to_virt(phys_addr), 0, size); + return phys_to_virt(phys_addr); - return ret; +out_unmap: + dev_warn(dev, "hwdev DMA mask = 0x%016Lx, dev_addr = 0x%016Lx\n", + (unsigned long long)(dev ? dev->coherent_dma_mask : 0), + (unsigned long long)*dma_handle); -err_warn: - if (warn && printk_ratelimit()) { - pr_warn("swiotlb: coherent allocation failed for device %s size=%zu\n", - dev_name(hwdev), size); + /* + * DMA_TO_DEVICE to avoid memcpy in unmap_single. + * DMA_ATTR_SKIP_CPU_SYNC is optional. + */ + swiotlb_tbl_unmap_single(dev, phys_addr, size, DMA_TO_DEVICE, + DMA_ATTR_SKIP_CPU_SYNC); +out_warn: + if ((attrs & DMA_ATTR_NO_WARN) && printk_ratelimit()) { + dev_warn(dev, + "swiotlb: coherent allocation failed, size=%zu\n", + size); dump_stack(); } - return NULL; } + +void * +swiotlb_alloc_coherent(struct device *hwdev, size_t size, + dma_addr_t *dma_handle, gfp_t flags) +{ + int order = get_order(size); + unsigned long attrs = (flags & __GFP_NOWARN) ? DMA_ATTR_NO_WARN : 0; + void *ret; + + ret = (void *)__get_free_pages(flags, order); + if (ret) { + *dma_handle = swiotlb_virt_to_bus(hwdev, ret); + if (dma_coherent_ok(hwdev, *dma_handle, size)) { + memset(ret, 0, size); + return ret; + } + } + + return swiotlb_alloc_buffer(hwdev, size, dma_handle, attrs); +} EXPORT_SYMBOL(swiotlb_alloc_coherent); static bool swiotlb_free_buffer(struct device *dev, size_t size, @@ -1103,6 +1107,10 @@ void *swiotlb_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle, { void *vaddr; + /* temporary workaround: */ + if (gfp & __GFP_NOWARN) + attrs |= DMA_ATTR_NO_WARN; + /* * Don't print a warning when the first allocation attempt fails. * swiotlb_alloc_coherent() will print a warning when the DMA memory @@ -1112,7 +1120,7 @@ void *swiotlb_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle, vaddr = dma_direct_alloc(dev, size, dma_handle, gfp, attrs); if (!vaddr) - vaddr = swiotlb_alloc_coherent(dev, size, dma_handle, gfp); + vaddr = swiotlb_alloc_buffer(dev, size, dma_handle, attrs); return vaddr; } -- 2.14.2
WARNING: multiple messages have this Message-ID (diff)
From: Christoph Hellwig <hch@lst.de> To: iommu@lists.linux-foundation.org Cc: "Konrad Rzeszutek Wilk" <konrad@darnok.org>, "Michal Simek" <monstr@monstr.eu>, "Guan Xuetao" <gxt@mprc.pku.edu.cn>, "Christian König" <ckoenig.leichtzumerken@gmail.com>, linux-arm-kernel@lists.infradead.org, linux-ia64@vger.kernel.org, linux-mips@linux-mips.org, linuxppc-dev@lists.ozlabs.org, x86@kernel.org, linux-arch@vger.kernel.org, linux-kernel@vger.kernel.org Subject: [PATCH 10/22] swiotlb: refactor coherent buffer allocation Date: Wed, 10 Jan 2018 09:09:20 +0100 [thread overview] Message-ID: <20180110080932.14157-11-hch@lst.de> (raw) Message-ID: <20180110080920.RJybazpH5i0n8u1KhxGemUUhSazBhxWQvq6589uvRa4@z> (raw) In-Reply-To: <20180110080932.14157-1-hch@lst.de> Factor out a new swiotlb_alloc_buffer helper that allocates DMA coherent memory from the swiotlb bounce buffer. This allows to simplify the swiotlb_alloc implemenation that uses dma_direct_alloc to try to allocate a reachable buffer first. Signed-off-by: Christoph Hellwig <hch@lst.de> --- lib/swiotlb.c | 122 +++++++++++++++++++++++++++++++--------------------------- 1 file changed, 65 insertions(+), 57 deletions(-) diff --git a/lib/swiotlb.c b/lib/swiotlb.c index 1a147f1354a1..bf2d19ee91c1 100644 --- a/lib/swiotlb.c +++ b/lib/swiotlb.c @@ -709,75 +709,79 @@ void swiotlb_tbl_sync_single(struct device *hwdev, phys_addr_t tlb_addr, } EXPORT_SYMBOL_GPL(swiotlb_tbl_sync_single); -void * -swiotlb_alloc_coherent(struct device *hwdev, size_t size, - dma_addr_t *dma_handle, gfp_t flags) +static inline bool dma_coherent_ok(struct device *dev, dma_addr_t addr, + size_t size) { - bool warn = !(flags & __GFP_NOWARN); - dma_addr_t dev_addr; - void *ret; - int order = get_order(size); - u64 dma_mask = DMA_BIT_MASK(32); + u64 mask = DMA_BIT_MASK(32); - if (hwdev && hwdev->coherent_dma_mask) - dma_mask = hwdev->coherent_dma_mask; + if (dev && dev->coherent_dma_mask) + mask = dev->coherent_dma_mask; + return addr + size - 1 <= mask; +} - ret = (void *)__get_free_pages(flags, order); - if (ret) { - dev_addr = swiotlb_virt_to_bus(hwdev, ret); - if (dev_addr + size - 1 > dma_mask) { - /* - * The allocated memory isn't reachable by the device. - */ - free_pages((unsigned long) ret, order); - ret = NULL; - } - } - if (!ret) { - /* - * We are either out of memory or the device can't DMA to - * GFP_DMA memory; fall back on map_single(), which - * will grab memory from the lowest available address range. - */ - phys_addr_t paddr = map_single(hwdev, 0, size, DMA_FROM_DEVICE, - warn ? 0 : DMA_ATTR_NO_WARN); - if (paddr == SWIOTLB_MAP_ERROR) - goto err_warn; +static void * +swiotlb_alloc_buffer(struct device *dev, size_t size, dma_addr_t *dma_handle, + unsigned long attrs) +{ + phys_addr_t phys_addr; + + if (swiotlb_force == SWIOTLB_NO_FORCE) + goto out_warn; - ret = phys_to_virt(paddr); - dev_addr = swiotlb_phys_to_dma(hwdev, paddr); + phys_addr = swiotlb_tbl_map_single(dev, + swiotlb_phys_to_dma(dev, io_tlb_start), + 0, size, DMA_FROM_DEVICE, 0); + if (phys_addr == SWIOTLB_MAP_ERROR) + goto out_warn; - /* Confirm address can be DMA'd by device */ - if (dev_addr + size - 1 > dma_mask) { - printk("hwdev DMA mask = 0x%016Lx, dev_addr = 0x%016Lx\n", - (unsigned long long)dma_mask, - (unsigned long long)dev_addr); + *dma_handle = swiotlb_phys_to_dma(dev, phys_addr); - /* - * DMA_TO_DEVICE to avoid memcpy in unmap_single. - * The DMA_ATTR_SKIP_CPU_SYNC is optional. - */ - swiotlb_tbl_unmap_single(hwdev, paddr, - size, DMA_TO_DEVICE, - DMA_ATTR_SKIP_CPU_SYNC); - goto err_warn; - } - } + if (dma_coherent_ok(dev, *dma_handle, size)) + goto out_unmap; - *dma_handle = dev_addr; - memset(ret, 0, size); + memset(phys_to_virt(phys_addr), 0, size); + return phys_to_virt(phys_addr); - return ret; +out_unmap: + dev_warn(dev, "hwdev DMA mask = 0x%016Lx, dev_addr = 0x%016Lx\n", + (unsigned long long)(dev ? dev->coherent_dma_mask : 0), + (unsigned long long)*dma_handle); -err_warn: - if (warn && printk_ratelimit()) { - pr_warn("swiotlb: coherent allocation failed for device %s size=%zu\n", - dev_name(hwdev), size); + /* + * DMA_TO_DEVICE to avoid memcpy in unmap_single. + * DMA_ATTR_SKIP_CPU_SYNC is optional. + */ + swiotlb_tbl_unmap_single(dev, phys_addr, size, DMA_TO_DEVICE, + DMA_ATTR_SKIP_CPU_SYNC); +out_warn: + if ((attrs & DMA_ATTR_NO_WARN) && printk_ratelimit()) { + dev_warn(dev, + "swiotlb: coherent allocation failed, size=%zu\n", + size); dump_stack(); } - return NULL; } + +void * +swiotlb_alloc_coherent(struct device *hwdev, size_t size, + dma_addr_t *dma_handle, gfp_t flags) +{ + int order = get_order(size); + unsigned long attrs = (flags & __GFP_NOWARN) ? DMA_ATTR_NO_WARN : 0; + void *ret; + + ret = (void *)__get_free_pages(flags, order); + if (ret) { + *dma_handle = swiotlb_virt_to_bus(hwdev, ret); + if (dma_coherent_ok(hwdev, *dma_handle, size)) { + memset(ret, 0, size); + return ret; + } + } + + return swiotlb_alloc_buffer(hwdev, size, dma_handle, attrs); +} EXPORT_SYMBOL(swiotlb_alloc_coherent); static bool swiotlb_free_buffer(struct device *dev, size_t size, @@ -1103,6 +1107,10 @@ void *swiotlb_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle, { void *vaddr; + /* temporary workaround: */ + if (gfp & __GFP_NOWARN) + attrs |= DMA_ATTR_NO_WARN; + /* * Don't print a warning when the first allocation attempt fails. * swiotlb_alloc_coherent() will print a warning when the DMA memory @@ -1112,7 +1120,7 @@ void *swiotlb_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle, vaddr = dma_direct_alloc(dev, size, dma_handle, gfp, attrs); if (!vaddr) - vaddr = swiotlb_alloc_coherent(dev, size, dma_handle, gfp); + vaddr = swiotlb_alloc_buffer(dev, size, dma_handle, attrs); return vaddr; } -- 2.14.2
next prev parent reply other threads:[~2018-01-10 8:09 UTC|newest] Thread overview: 87+ messages / expand[flat|nested] mbox.gz Atom feed top 2018-01-10 8:09 consolidate swiotlb dma_map implementations Christoph Hellwig 2018-01-10 8:09 ` Christoph Hellwig 2018-01-10 8:09 ` [PATCH 01/22] swiotlb: suppress warning when __GFP_NOWARN is set Christoph Hellwig 2018-01-10 8:09 ` Christoph Hellwig 2018-01-10 8:09 ` [PATCH 02/22] arm64: rename swiotlb_dma_ops Christoph Hellwig 2018-01-10 8:09 ` Christoph Hellwig [not found] ` <20180110080932.14157-3-hch-jcswGhMUV9g@public.gmane.org> 2018-01-10 12:13 ` Robin Murphy 2018-01-10 12:13 ` Robin Murphy 2018-01-10 8:09 ` [PATCH 03/22] ia64: " Christoph Hellwig 2018-01-10 8:09 ` Christoph Hellwig 2018-01-12 13:24 ` Konrad Rzeszutek Wilk 2018-01-12 13:24 ` Konrad Rzeszutek Wilk 2018-01-10 8:09 ` [PATCH 04/22] powerpc: " Christoph Hellwig 2018-01-10 8:09 ` Christoph Hellwig 2018-01-12 13:25 ` Konrad Rzeszutek Wilk 2018-01-12 13:25 ` Konrad Rzeszutek Wilk 2018-01-10 8:09 ` [PATCH 05/22] x86: " Christoph Hellwig 2018-01-10 8:09 ` Christoph Hellwig [not found] ` <20180110080932.14157-6-hch-jcswGhMUV9g@public.gmane.org> 2018-01-12 13:25 ` Konrad Rzeszutek Wilk 2018-01-12 13:25 ` Konrad Rzeszutek Wilk 2018-01-10 8:09 ` [PATCH 06/22] swiotlb: rename swiotlb_free to swiotlb_exit Christoph Hellwig 2018-01-10 8:09 ` Christoph Hellwig 2018-01-12 13:39 ` Konrad Rzeszutek Wilk 2018-01-12 13:39 ` Konrad Rzeszutek Wilk 2018-01-10 8:09 ` [PATCH 07/22] swiotlb: add common swiotlb_map_ops Christoph Hellwig 2018-01-10 8:09 ` Christoph Hellwig 2018-01-10 8:09 ` [PATCH 08/22] swiotlb: wire up ->dma_supported in swiotlb_dma_ops Christoph Hellwig 2018-01-10 8:09 ` Christoph Hellwig 2018-01-10 12:16 ` Robin Murphy 2018-01-10 12:16 ` Robin Murphy 2018-01-10 15:35 ` Christoph Hellwig 2018-01-10 15:35 ` Christoph Hellwig [not found] ` <20180110153517.GF17790-jcswGhMUV9g@public.gmane.org> 2018-01-10 17:23 ` Robin Murphy 2018-01-10 17:23 ` Robin Murphy 2018-01-10 8:09 ` [PATCH 09/22] swiotlb: refactor coherent buffer freeing Christoph Hellwig 2018-01-10 8:09 ` Christoph Hellwig 2018-01-10 8:09 ` Christoph Hellwig [this message] 2018-01-10 8:09 ` [PATCH 10/22] swiotlb: refactor coherent buffer allocation Christoph Hellwig 2018-01-10 12:22 ` Robin Murphy 2018-01-10 15:46 ` Christoph Hellwig 2018-01-10 15:46 ` Christoph Hellwig 2018-01-10 17:02 ` Robin Murphy [not found] ` <03c25dda-30da-9169-a8a1-1720ec741b9d-5wv7dgnIgG8@public.gmane.org> 2018-01-15 9:10 ` Christoph Hellwig 2018-01-15 9:10 ` Christoph Hellwig 2018-01-10 8:09 ` [PATCH 11/22] swiotlb: remove various exports Christoph Hellwig 2018-01-10 8:09 ` Christoph Hellwig 2018-01-10 8:09 ` [PATCH 12/22] ia64: replace ZONE_DMA with ZONE_DMA32 Christoph Hellwig 2018-01-10 8:09 ` Christoph Hellwig 2018-01-10 8:09 ` [PATCH 13/22] ia64: use generic swiotlb_ops Christoph Hellwig 2018-01-10 8:09 ` Christoph Hellwig 2018-01-10 8:09 ` [PATCH 14/22] ia64: clean up swiotlb support Christoph Hellwig 2018-01-10 8:09 ` Christoph Hellwig 2018-01-10 8:09 ` [PATCH 15/22] ia64: remove an ifdef around the content of pci-dma.c Christoph Hellwig 2018-01-10 8:09 ` Christoph Hellwig 2018-01-10 8:09 ` [PATCH 16/22] unicore32: use generic swiotlb_ops Christoph Hellwig 2018-01-10 8:09 ` Christoph Hellwig 2018-01-10 8:09 ` [PATCH 17/22] tile: replace ZONE_DMA with ZONE_DMA32 Christoph Hellwig 2018-01-10 8:09 ` Christoph Hellwig 2018-01-10 8:09 ` [PATCH 18/22] tile: use generic swiotlb_ops Christoph Hellwig 2018-01-10 8:09 ` Christoph Hellwig 2018-01-10 8:09 ` [PATCH 19/22] mips/netlogic: remove swiotlb support Christoph Hellwig 2018-01-10 8:09 ` Christoph Hellwig 2018-01-10 8:09 ` [PATCH 20/22] mips: use swiotlb_{alloc,free} Christoph Hellwig 2018-01-10 8:09 ` Christoph Hellwig 2018-01-10 8:09 ` [PATCH 21/22] arm64: replace ZONE_DMA with ZONE_DMA32 Christoph Hellwig 2018-01-10 8:09 ` Christoph Hellwig [not found] ` <20180110080932.14157-22-hch-jcswGhMUV9g@public.gmane.org> 2018-01-10 12:58 ` Robin Murphy 2018-01-10 12:58 ` Robin Murphy 2018-01-10 15:55 ` Christoph Hellwig 2018-01-10 15:55 ` Christoph Hellwig [not found] ` <20180110155517.GA18774-jcswGhMUV9g@public.gmane.org> 2018-01-10 15:55 ` Christoph Hellwig 2018-01-10 15:55 ` Christoph Hellwig [not found] ` <20180110155546.GB18903-jcswGhMUV9g@public.gmane.org> 2018-01-10 17:10 ` Robin Murphy 2018-01-10 17:10 ` Robin Murphy 2018-01-10 8:09 ` [PATCH 22/22] arm64: use swiotlb_alloc and swiotlb_free Christoph Hellwig 2018-01-10 8:09 ` Christoph Hellwig 2018-01-10 13:16 ` Robin Murphy 2018-01-10 13:16 ` Robin Murphy 2018-01-10 8:23 ` consolidate swiotlb dma_map implementations Christian König [not found] ` <20180110080932.14157-1-hch-jcswGhMUV9g@public.gmane.org> 2018-01-16 7:53 ` Christoph Hellwig 2018-01-16 7:53 ` Christoph Hellwig [not found] ` <20180116075338.GB12693-wEGCiKHe2LqWVfeAwA7xHQ@public.gmane.org> 2018-01-16 8:22 ` Christian König 2018-01-16 8:22 ` Christian König 2018-01-16 8:28 ` Christoph Hellwig 2018-01-16 8:28 ` Christoph Hellwig [not found] ` <20180116082827.GA9211-jcswGhMUV9g@public.gmane.org> 2018-01-16 8:52 ` Christian König 2018-01-16 8:52 ` Christian König
Reply instructions: You may reply publicly to this message via plain-text email using any one of the following methods: * Save the following mbox file, import it into your mail client, and reply-to-all from there: mbox Avoid top-posting and favor interleaved quoting: https://en.wikipedia.org/wiki/Posting_style#Interleaved_style * Reply using the --to, --cc, and --in-reply-to switches of git-send-email(1): git send-email \ --in-reply-to=20180110080932.14157-11-hch@lst.de \ --to=hch@lst.de \ --cc=ckoenig.leichtzumerken@gmail.com \ --cc=gxt@mprc.pku.edu.cn \ --cc=iommu@lists.linux-foundation.org \ --cc=konrad@darnok.org \ --cc=linux-arch@vger.kernel.org \ --cc=linux-arm-kernel@lists.infradead.org \ --cc=linux-ia64@vger.kernel.org \ --cc=linux-kernel@vger.kernel.org \ --cc=linux-mips@linux-mips.org \ --cc=linuxppc-dev@lists.ozlabs.org \ --cc=monstr@monstr.eu \ --cc=x86@kernel.org \ /path/to/YOUR_REPLY https://kernel.org/pub/software/scm/git/docs/git-send-email.html * If your mail client supports setting the In-Reply-To header via mailto: links, try the mailto: linkBe sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions for how to clone and mirror all data and code used for this inbox; as well as URLs for NNTP newsgroup(s).