linux-arm-kernel.lists.infradead.org archive mirror
 help / color / mirror / Atom feed
From: konrad.wilk@oracle.com (Konrad Rzeszutek Wilk)
To: linux-arm-kernel@lists.infradead.org
Subject: [PATCH 04/10] swiotlb: remove the overflow buffer
Date: Thu, 18 Oct 2018 20:23:12 -0400	[thread overview]
Message-ID: <20181019002312.GF1251@char.us.oracle.com> (raw)
In-Reply-To: <20181008080246.20543-5-hch@lst.de>

On Mon, Oct 08, 2018 at 10:02:40AM +0200, Christoph Hellwig wrote:
> Like all other dma mapping drivers just return an error code instead
> of an actual memory buffer.  The reason for the overflow buffer was
> that at the time swiotlb was invented there was no way to check for
> dma mapping errors, but this has long been fixed.

RIP overflow buffer.

Reviewed-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>

Thank you!
> 
> Signed-off-by: Christoph Hellwig <hch@lst.de>
> ---
>  arch/arm64/mm/dma-mapping.c       |  2 +-
>  arch/powerpc/kernel/dma-swiotlb.c |  4 +--
>  include/linux/dma-direct.h        |  2 ++
>  include/linux/swiotlb.h           |  3 --
>  kernel/dma/direct.c               |  2 --
>  kernel/dma/swiotlb.c              | 59 ++-----------------------------
>  6 files changed, 8 insertions(+), 64 deletions(-)
> 
> diff --git a/arch/arm64/mm/dma-mapping.c b/arch/arm64/mm/dma-mapping.c
> index 072c51fb07d7..8d91b927e09e 100644
> --- a/arch/arm64/mm/dma-mapping.c
> +++ b/arch/arm64/mm/dma-mapping.c
> @@ -324,7 +324,7 @@ static int __swiotlb_dma_supported(struct device *hwdev, u64 mask)
>  static int __swiotlb_dma_mapping_error(struct device *hwdev, dma_addr_t addr)
>  {
>  	if (swiotlb)
> -		return swiotlb_dma_mapping_error(hwdev, addr);
> +		return dma_direct_mapping_error(hwdev, addr);
>  	return 0;
>  }
>  
> diff --git a/arch/powerpc/kernel/dma-swiotlb.c b/arch/powerpc/kernel/dma-swiotlb.c
> index 88f3963ca30f..5fc335f4d9cd 100644
> --- a/arch/powerpc/kernel/dma-swiotlb.c
> +++ b/arch/powerpc/kernel/dma-swiotlb.c
> @@ -11,7 +11,7 @@
>   *
>   */
>  
> -#include <linux/dma-mapping.h>
> +#include <linux/dma-direct.h>
>  #include <linux/memblock.h>
>  #include <linux/pfn.h>
>  #include <linux/of_platform.h>
> @@ -59,7 +59,7 @@ const struct dma_map_ops powerpc_swiotlb_dma_ops = {
>  	.sync_single_for_device = swiotlb_sync_single_for_device,
>  	.sync_sg_for_cpu = swiotlb_sync_sg_for_cpu,
>  	.sync_sg_for_device = swiotlb_sync_sg_for_device,
> -	.mapping_error = swiotlb_dma_mapping_error,
> +	.mapping_error = dma_direct_mapping_error,
>  	.get_required_mask = swiotlb_powerpc_get_required,
>  };
>  
> diff --git a/include/linux/dma-direct.h b/include/linux/dma-direct.h
> index fbca184ff5a0..bd73e7a91410 100644
> --- a/include/linux/dma-direct.h
> +++ b/include/linux/dma-direct.h
> @@ -5,6 +5,8 @@
>  #include <linux/dma-mapping.h>
>  #include <linux/mem_encrypt.h>
>  
> +#define DIRECT_MAPPING_ERROR		0
> +
>  #ifdef CONFIG_ARCH_HAS_PHYS_TO_DMA
>  #include <asm/dma-direct.h>
>  #else
> diff --git a/include/linux/swiotlb.h b/include/linux/swiotlb.h
> index 7ef541ce8f34..f847c1b265c4 100644
> --- a/include/linux/swiotlb.h
> +++ b/include/linux/swiotlb.h
> @@ -106,9 +106,6 @@ extern void
>  swiotlb_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg,
>  			   int nelems, enum dma_data_direction dir);
>  
> -extern int
> -swiotlb_dma_mapping_error(struct device *hwdev, dma_addr_t dma_addr);
> -
>  extern int
>  swiotlb_dma_supported(struct device *hwdev, u64 mask);
>  
> diff --git a/kernel/dma/direct.c b/kernel/dma/direct.c
> index 674a8da22844..12798abba55e 100644
> --- a/kernel/dma/direct.c
> +++ b/kernel/dma/direct.c
> @@ -14,8 +14,6 @@
>  #include <linux/pfn.h>
>  #include <linux/set_memory.h>
>  
> -#define DIRECT_MAPPING_ERROR		0
> -
>  /*
>   * Most architectures use ZONE_DMA for the first 16 Megabytes, but
>   * some use it for entirely different regions:
> diff --git a/kernel/dma/swiotlb.c b/kernel/dma/swiotlb.c
> index 69bf305ee5f8..11dbcd80b4a6 100644
> --- a/kernel/dma/swiotlb.c
> +++ b/kernel/dma/swiotlb.c
> @@ -72,13 +72,6 @@ static phys_addr_t io_tlb_start, io_tlb_end;
>   */
>  static unsigned long io_tlb_nslabs;
>  
> -/*
> - * When the IOMMU overflows we return a fallback buffer. This sets the size.
> - */
> -static unsigned long io_tlb_overflow = 32*1024;
> -
> -static phys_addr_t io_tlb_overflow_buffer;
> -
>  /*
>   * This is a free list describing the number of free entries available from
>   * each index
> @@ -126,7 +119,6 @@ setup_io_tlb_npages(char *str)
>  	return 0;
>  }
>  early_param("swiotlb", setup_io_tlb_npages);
> -/* make io_tlb_overflow tunable too? */
>  
>  unsigned long swiotlb_nr_tbl(void)
>  {
> @@ -194,16 +186,10 @@ void __init swiotlb_update_mem_attributes(void)
>  	bytes = PAGE_ALIGN(io_tlb_nslabs << IO_TLB_SHIFT);
>  	set_memory_decrypted((unsigned long)vaddr, bytes >> PAGE_SHIFT);
>  	memset(vaddr, 0, bytes);
> -
> -	vaddr = phys_to_virt(io_tlb_overflow_buffer);
> -	bytes = PAGE_ALIGN(io_tlb_overflow);
> -	set_memory_decrypted((unsigned long)vaddr, bytes >> PAGE_SHIFT);
> -	memset(vaddr, 0, bytes);
>  }
>  
>  int __init swiotlb_init_with_tbl(char *tlb, unsigned long nslabs, int verbose)
>  {
> -	void *v_overflow_buffer;
>  	unsigned long i, bytes;
>  
>  	bytes = nslabs << IO_TLB_SHIFT;
> @@ -212,17 +198,6 @@ int __init swiotlb_init_with_tbl(char *tlb, unsigned long nslabs, int verbose)
>  	io_tlb_start = __pa(tlb);
>  	io_tlb_end = io_tlb_start + bytes;
>  
> -	/*
> -	 * Get the overflow emergency buffer
> -	 */
> -	v_overflow_buffer = memblock_virt_alloc_low_nopanic(
> -						PAGE_ALIGN(io_tlb_overflow),
> -						PAGE_SIZE);
> -	if (!v_overflow_buffer)
> -		return -ENOMEM;
> -
> -	io_tlb_overflow_buffer = __pa(v_overflow_buffer);
> -
>  	/*
>  	 * Allocate and initialize the free list array.  This array is used
>  	 * to find contiguous free memory regions of size up to IO_TLB_SEGSIZE
> @@ -330,7 +305,6 @@ int
>  swiotlb_late_init_with_tbl(char *tlb, unsigned long nslabs)
>  {
>  	unsigned long i, bytes;
> -	unsigned char *v_overflow_buffer;
>  
>  	bytes = nslabs << IO_TLB_SHIFT;
>  
> @@ -341,19 +315,6 @@ swiotlb_late_init_with_tbl(char *tlb, unsigned long nslabs)
>  	set_memory_decrypted((unsigned long)tlb, bytes >> PAGE_SHIFT);
>  	memset(tlb, 0, bytes);
>  
> -	/*
> -	 * Get the overflow emergency buffer
> -	 */
> -	v_overflow_buffer = (void *)__get_free_pages(GFP_DMA,
> -						     get_order(io_tlb_overflow));
> -	if (!v_overflow_buffer)
> -		goto cleanup2;
> -
> -	set_memory_decrypted((unsigned long)v_overflow_buffer,
> -			io_tlb_overflow >> PAGE_SHIFT);
> -	memset(v_overflow_buffer, 0, io_tlb_overflow);
> -	io_tlb_overflow_buffer = virt_to_phys(v_overflow_buffer);
> -
>  	/*
>  	 * Allocate and initialize the free list array.  This array is used
>  	 * to find contiguous free memory regions of size up to IO_TLB_SEGSIZE
> @@ -390,10 +351,6 @@ swiotlb_late_init_with_tbl(char *tlb, unsigned long nslabs)
>  	                                                 sizeof(int)));
>  	io_tlb_list = NULL;
>  cleanup3:
> -	free_pages((unsigned long)v_overflow_buffer,
> -		   get_order(io_tlb_overflow));
> -	io_tlb_overflow_buffer = 0;
> -cleanup2:
>  	io_tlb_end = 0;
>  	io_tlb_start = 0;
>  	io_tlb_nslabs = 0;
> @@ -407,8 +364,6 @@ void __init swiotlb_exit(void)
>  		return;
>  
>  	if (late_alloc) {
> -		free_pages((unsigned long)phys_to_virt(io_tlb_overflow_buffer),
> -			   get_order(io_tlb_overflow));
>  		free_pages((unsigned long)io_tlb_orig_addr,
>  			   get_order(io_tlb_nslabs * sizeof(phys_addr_t)));
>  		free_pages((unsigned long)io_tlb_list, get_order(io_tlb_nslabs *
> @@ -416,8 +371,6 @@ void __init swiotlb_exit(void)
>  		free_pages((unsigned long)phys_to_virt(io_tlb_start),
>  			   get_order(io_tlb_nslabs << IO_TLB_SHIFT));
>  	} else {
> -		memblock_free_late(io_tlb_overflow_buffer,
> -				   PAGE_ALIGN(io_tlb_overflow));
>  		memblock_free_late(__pa(io_tlb_orig_addr),
>  				   PAGE_ALIGN(io_tlb_nslabs * sizeof(phys_addr_t)));
>  		memblock_free_late(__pa(io_tlb_list),
> @@ -790,7 +743,7 @@ dma_addr_t swiotlb_map_page(struct device *dev, struct page *page,
>  	/* Oh well, have to allocate and map a bounce buffer. */
>  	map = map_single(dev, phys, size, dir, attrs);
>  	if (map == SWIOTLB_MAP_ERROR)
> -		return __phys_to_dma(dev, io_tlb_overflow_buffer);
> +		return DIRECT_MAPPING_ERROR;
>  
>  	dev_addr = __phys_to_dma(dev, map);
>  
> @@ -801,7 +754,7 @@ dma_addr_t swiotlb_map_page(struct device *dev, struct page *page,
>  	attrs |= DMA_ATTR_SKIP_CPU_SYNC;
>  	swiotlb_tbl_unmap_single(dev, map, size, dir, attrs);
>  
> -	return __phys_to_dma(dev, io_tlb_overflow_buffer);
> +	return DIRECT_MAPPING_ERROR;
>  }
>  
>  /*
> @@ -985,12 +938,6 @@ swiotlb_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg,
>  	swiotlb_sync_sg(hwdev, sg, nelems, dir, SYNC_FOR_DEVICE);
>  }
>  
> -int
> -swiotlb_dma_mapping_error(struct device *hwdev, dma_addr_t dma_addr)
> -{
> -	return (dma_addr == __phys_to_dma(hwdev, io_tlb_overflow_buffer));
> -}
> -
>  /*
>   * Return whether the given device DMA address mask can be supported
>   * properly.  For example, if your device can only drive the low 24-bits
> @@ -1033,7 +980,7 @@ void swiotlb_free(struct device *dev, size_t size, void *vaddr,
>  }
>  
>  const struct dma_map_ops swiotlb_dma_ops = {
> -	.mapping_error		= swiotlb_dma_mapping_error,
> +	.mapping_error		= dma_direct_mapping_error,
>  	.alloc			= swiotlb_alloc,
>  	.free			= swiotlb_free,
>  	.sync_single_for_cpu	= swiotlb_sync_single_for_cpu,
> -- 
> 2.19.0
> 

  parent reply	other threads:[~2018-10-19  0:23 UTC|newest]

Thread overview: 57+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2018-10-08  8:02 move swiotlb noncoherent dma support from arm64 to generic code V2 Christoph Hellwig
2018-10-08  8:02 ` [PATCH 01/10] swiotlb: remove a pointless comment Christoph Hellwig
2018-10-11 17:49   ` Robin Murphy
2018-10-19  0:09   ` Konrad Rzeszutek Wilk
2018-10-08  8:02 ` [PATCH 02/10] swiotlb: mark is_swiotlb_buffer static Christoph Hellwig
2018-10-11 17:54   ` Robin Murphy
2018-10-19  0:12   ` Konrad Rzeszutek Wilk
2018-10-08  8:02 ` [PATCH 03/10] swiotlb: do not panic on mapping failures Christoph Hellwig
2018-10-11 18:06   ` Robin Murphy
2018-10-19  0:18     ` Konrad Rzeszutek Wilk
2018-10-19  0:17   ` Konrad Rzeszutek Wilk
2018-10-19  6:04     ` Christoph Hellwig
2018-10-19 13:45       ` Konrad Rzeszutek Wilk
2018-10-08  8:02 ` [PATCH 04/10] swiotlb: remove the overflow buffer Christoph Hellwig
2018-10-11 18:19   ` Robin Murphy
2018-10-12 17:04   ` Catalin Marinas
2018-10-19  0:23   ` Konrad Rzeszutek Wilk [this message]
2018-10-08  8:02 ` [PATCH 05/10] swiotlb: merge swiotlb_unmap_page and unmap_single Christoph Hellwig
2018-10-18 17:44   ` Robin Murphy
2018-10-19  0:25   ` Konrad Rzeszutek Wilk
2018-10-08  8:02 ` [PATCH 06/10] swiotlb: use swiotlb_map_page in swiotlb_map_sg_attrs Christoph Hellwig
2018-10-18 17:53   ` Robin Murphy
2018-10-19  0:33   ` Konrad Rzeszutek Wilk
2018-11-07  1:27   ` John Stultz
2018-11-09  7:49     ` Christoph Hellwig
2018-11-09 16:37       ` Robin Murphy
2018-11-19 19:36         ` Robin Murphy
2018-11-20  9:22           ` Christoph Hellwig
2018-11-13  0:07       ` John Stultz
2018-11-13  0:26         ` John Stultz
2018-11-14 14:13         ` Christoph Hellwig
2018-11-14 16:12           ` Christoph Hellwig
2018-11-19 23:22             ` John Stultz
2018-11-20  9:25               ` Christoph Hellwig
2018-11-23 18:27                 ` Will Deacon
2018-11-23 19:34                   ` Robin Murphy
2018-11-26 19:31                     ` Will Deacon
2018-10-08  8:02 ` [PATCH 07/10] swiotlb: refactor swiotlb_map_page Christoph Hellwig
2018-10-18 18:09   ` Robin Murphy
2018-10-19  0:37     ` Konrad Rzeszutek Wilk
2018-10-19  6:52       ` Christoph Hellwig
2018-10-19 13:46         ` Konrad Rzeszutek Wilk
2018-10-08  8:02 ` [PATCH 08/10] swiotlb: don't dip into swiotlb pool for coherent allocations Christoph Hellwig
2018-10-12 17:04   ` Catalin Marinas
2018-10-19  0:40   ` Konrad Rzeszutek Wilk
2018-10-19 16:45   ` Robin Murphy
2018-10-08  8:02 ` [PATCH 09/10] swiotlb: add support for non-coherent DMA Christoph Hellwig
2018-10-19  0:49   ` Konrad Rzeszutek Wilk
2018-10-22 17:11   ` Robin Murphy
2018-10-26  8:04     ` Christoph Hellwig
2018-10-26  9:59       ` Robin Murphy
2018-10-08  8:02 ` [PATCH 10/10] arm64: use the generic swiotlb_dma_ops Christoph Hellwig
2018-10-12 13:01   ` Robin Murphy
2018-10-12 14:40     ` Christoph Hellwig
2018-10-12 17:05       ` Catalin Marinas
2018-10-22 17:52   ` Robin Murphy
2018-10-26 12:44     ` Christoph Hellwig

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20181019002312.GF1251@char.us.oracle.com \
    --to=konrad.wilk@oracle.com \
    --cc=linux-arm-kernel@lists.infradead.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).