public inbox for kvm@vger.kernel.org
 help / color / mirror / Atom feed
From: Glauber Costa <gcosta@redhat.com>
To: Ingo Molnar <mingo@elte.hu>
Cc: kvm-devel@lists.sourceforge.net, linux-kernel@vger.kernel.org,
	avi@qumranet.com, akpm@linux-foundation.org
Subject: Re: [PATCH 0/20] dma_ops for i386
Date: Wed, 26 Mar 2008 10:16:33 -0300	[thread overview]
Message-ID: <47EA4CB1.8070609@redhat.com> (raw)
In-Reply-To: <20080326130417.GA13602@elte.hu>

Ingo Molnar wrote:
> * Ingo Molnar <mingo@elte.hu> wrote:
> 
>> what i came up is the prototype 32-bit fix below - this works on 
>> 32-bit but breaks 64-bit because we pass in physical addresses instead 
>> of virtual direct addresses.
>>
>> i'll fix the 64-bit side but that means materially touching all the 
>> dma_mapping_ops instantiations materially on the 64-bit side - not 
>> really something we wanted to do :-/
> 
> the full fix ended up being the one below. It's not that bad - and 
> gart_64.c looks even a bit cleaner. Still, it needs careful review.
> 
> 	Ingo
> 
> --------------->
> Subject: x86: dma-ops on highmem fix
> From: Ingo Molnar <mingo@elte.hu>
> 
> Signed-off-by: Ingo Molnar <mingo@elte.hu>
> ---
>  arch/x86/kernel/pci-base_32.c    |    4 ++--
>  arch/x86/kernel/pci-calgary_64.c |    3 ++-
>  arch/x86/kernel/pci-dma_64.c     |    2 +-
>  arch/x86/kernel/pci-gart_64.c    |   15 +++++++--------
>  arch/x86/kernel/pci-nommu_64.c   |    4 ++--
>  arch/x86/kernel/pci-swiotlb_64.c |    9 ++++++++-
>  include/asm-x86/dma-mapping.h    |   10 ++++++----
>  7 files changed, 28 insertions(+), 19 deletions(-)
> 
> Index: linux-x86.q/arch/x86/kernel/pci-base_32.c
> ===================================================================
> --- linux-x86.q.orig/arch/x86/kernel/pci-base_32.c
> +++ linux-x86.q/arch/x86/kernel/pci-base_32.c
> @@ -4,12 +4,12 @@
>  #include <linux/dma-mapping.h>
>  #include <asm/dma-mapping.h>
>  
> -static dma_addr_t pci32_map_single(struct device *dev, void *ptr,
> +static dma_addr_t pci32_map_single(struct device *dev, phys_addr_t ptr,
>  				   size_t size, int direction)
>  {
>  	WARN_ON(size == 0);
>  	flush_write_buffers();
> -	return virt_to_phys(ptr);
> +	return ptr;
>  }
>  
>  static int pci32_dma_map_sg(struct device *dev, struct scatterlist *sglist,
> Index: linux-x86.q/arch/x86/kernel/pci-calgary_64.c
> ===================================================================
> --- linux-x86.q.orig/arch/x86/kernel/pci-calgary_64.c
> +++ linux-x86.q/arch/x86/kernel/pci-calgary_64.c
> @@ -470,10 +470,11 @@ error:
>  	return 0;
>  }
>  
> -static dma_addr_t calgary_map_single(struct device *dev, void *vaddr,
> +static dma_addr_t calgary_map_single(struct device *dev, phys_addr_t paddr,
>  	size_t size, int direction)
>  {
>  	dma_addr_t dma_handle = bad_dma_address;
> +	void *vaddr = phys_to_virt(paddr);
>  	unsigned long uaddr;
>  	unsigned int npages;
>  	struct iommu_table *tbl = find_iommu_table(dev);
> Index: linux-x86.q/arch/x86/kernel/pci-dma_64.c
> ===================================================================
> --- linux-x86.q.orig/arch/x86/kernel/pci-dma_64.c
> +++ linux-x86.q/arch/x86/kernel/pci-dma_64.c
> @@ -141,7 +141,7 @@ dma_alloc_coherent(struct device *dev, s
>  	}
>  
>  	if (dma_ops->map_simple) {
> -		*dma_handle = dma_ops->map_simple(dev, memory,
> +		*dma_handle = dma_ops->map_simple(dev, virt_to_phys(memory),
>  					      size,
>  					      PCI_DMA_BIDIRECTIONAL);
>  		if (*dma_handle != bad_dma_address)
> Index: linux-x86.q/arch/x86/kernel/pci-gart_64.c
> ===================================================================
> --- linux-x86.q.orig/arch/x86/kernel/pci-gart_64.c
> +++ linux-x86.q/arch/x86/kernel/pci-gart_64.c
> @@ -264,9 +264,9 @@ static dma_addr_t dma_map_area(struct de
>  }
>  
>  static dma_addr_t
> -gart_map_simple(struct device *dev, char *buf, size_t size, int dir)
> +gart_map_simple(struct device *dev, phys_addr_t paddr, size_t size, int dir)
>  {
> -	dma_addr_t map = dma_map_area(dev, virt_to_bus(buf), size, dir);
> +	dma_addr_t map = dma_map_area(dev, paddr, size, dir);
>  
>  	flush_gart();
>  
> @@ -275,18 +275,17 @@ gart_map_simple(struct device *dev, char
>  
>  /* Map a single area into the IOMMU */
>  static dma_addr_t
> -gart_map_single(struct device *dev, void *addr, size_t size, int dir)
> +gart_map_single(struct device *dev, phys_addr_t paddr, size_t size, int dir)
>  {
> -	unsigned long phys_mem, bus;
> +	unsigned long bus;
>  
>  	if (!dev)
>  		dev = &fallback_dev;
>  
> -	phys_mem = virt_to_phys(addr);
> -	if (!need_iommu(dev, phys_mem, size))
> -		return phys_mem;
> +	if (!need_iommu(dev, paddr, size))
> +		return paddr;
>  
> -	bus = gart_map_simple(dev, addr, size, dir);
> +	bus = gart_map_simple(dev, paddr, size, dir);
>  
>  	return bus;
>  }
> Index: linux-x86.q/arch/x86/kernel/pci-nommu_64.c
> ===================================================================
> --- linux-x86.q.orig/arch/x86/kernel/pci-nommu_64.c
> +++ linux-x86.q/arch/x86/kernel/pci-nommu_64.c
> @@ -26,10 +26,10 @@ check_addr(char *name, struct device *hw
>  }
>  
>  static dma_addr_t
> -nommu_map_single(struct device *hwdev, void *ptr, size_t size,
> +nommu_map_single(struct device *hwdev, phys_addr_t paddr, size_t size,
>  	       int direction)
>  {
> -	dma_addr_t bus = virt_to_bus(ptr);
> +	dma_addr_t bus = paddr;
>  	if (!check_addr("map_single", hwdev, bus, size))
>  				return bad_dma_address;
>  	return bus;
> Index: linux-x86.q/arch/x86/kernel/pci-swiotlb_64.c
> ===================================================================
> --- linux-x86.q.orig/arch/x86/kernel/pci-swiotlb_64.c
> +++ linux-x86.q/arch/x86/kernel/pci-swiotlb_64.c
> @@ -11,11 +11,18 @@
>  
>  int swiotlb __read_mostly;
>  
> +static dma_addr_t
> +swiotlb_map_single_phys(struct device *hwdev, phys_addr_t paddr, size_t size,
> +			int direction)
> +{
> +	return swiotlb_map_single(hwdev, phys_to_virt(paddr), size, direction);
> +}
> +
>  const struct dma_mapping_ops swiotlb_dma_ops = {
>  	.mapping_error = swiotlb_dma_mapping_error,
>  	.alloc_coherent = swiotlb_alloc_coherent,
>  	.free_coherent = swiotlb_free_coherent,
> -	.map_single = swiotlb_map_single,
> +	.map_single = swiotlb_map_single_phys,
>  	.unmap_single = swiotlb_unmap_single,
>  	.sync_single_for_cpu = swiotlb_sync_single_for_cpu,
>  	.sync_single_for_device = swiotlb_sync_single_for_device,
> Index: linux-x86.q/include/asm-x86/dma-mapping.h
> ===================================================================
> --- linux-x86.q.orig/include/asm-x86/dma-mapping.h
> +++ linux-x86.q/include/asm-x86/dma-mapping.h
> @@ -16,10 +16,10 @@ struct dma_mapping_ops {
>  				dma_addr_t *dma_handle, gfp_t gfp);
>  	void            (*free_coherent)(struct device *dev, size_t size,
>  				void *vaddr, dma_addr_t dma_handle);
> -	dma_addr_t      (*map_single)(struct device *hwdev, void *ptr,
> +	dma_addr_t      (*map_single)(struct device *hwdev, phys_addr_t ptr,
>  				size_t size, int direction);
>  	/* like map_single, but doesn't check the device mask */
> -	dma_addr_t      (*map_simple)(struct device *hwdev, char *ptr,
> +	dma_addr_t      (*map_simple)(struct device *hwdev, phys_addr_t ptr,
>  				size_t size, int direction);
>  	void            (*unmap_single)(struct device *dev, dma_addr_t addr,
>  				size_t size, int direction);
> @@ -73,7 +73,7 @@ dma_map_single(struct device *hwdev, voi
>  	       int direction)
>  {
>  	BUG_ON(!valid_dma_direction(direction));
> -	return dma_ops->map_single(hwdev, ptr, size, direction);
> +	return dma_ops->map_single(hwdev, virt_to_phys(ptr), size, direction);
>  }
>  
>  static inline void
> @@ -174,7 +174,9 @@ static inline dma_addr_t dma_map_page(st
>  				      size_t offset, size_t size,
>  				      int direction)
>  {
> -	return dma_map_single(dev, page_address(page)+offset, size, direction);
> +	BUG_ON(!valid_dma_direction(direction));
> +	return dma_ops->map_single(dev, page_to_phys(page)+offset,
> +				   size, direction);
>  }
>  
>  static inline void dma_unmap_page(struct device *dev, dma_addr_t addr,
It looks all good to me.
I'll give a shot in my systems to see if it goes okay.

-------------------------------------------------------------------------
Check out the new SourceForge.net Marketplace.
It's the best place to buy or sell services for
just about anything Open Source.
http://ad.doubleclick.net/clk;164216239;13503038;w?http://sf.net/marketplace

  reply	other threads:[~2008-03-26 13:16 UTC|newest]

Thread overview: 31+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2008-03-25 21:36 [PATCH 0/20] dma_ops for i386 Glauber Costa
2008-03-25 21:36 ` [PATCH 01/20] x86: move dma_ops struct definition to dma-mapping.h Glauber Costa
2008-03-25 21:36   ` [PATCH 02/20] x86: implement dma_map_single through dma_ops Glauber Costa
2008-03-25 21:36     ` [PATCH 03/20] x86: move dma_unmap_single to common header Glauber Costa
2008-03-25 21:36       ` [PATCH 04/20] x86: move dma_map_sg " Glauber Costa
2008-03-25 21:36         ` [PATCH 05/20] x86: move dma_unmap_sg " Glauber Costa
2008-03-25 21:36           ` [PATCH 06/20] x86: move dma_sync_single_for_cpu " Glauber Costa
2008-03-25 21:36             ` [PATCH 07/20] x86: move dma_sync_single_for_device " Glauber Costa
2008-03-25 21:36               ` [PATCH 08/20] x86: move dma_sync_single_range_for_cpu " Glauber Costa
2008-03-25 21:36                 ` [PATCH 09/20] x86: move dma_sync_single_range_for_device " Glauber Costa
2008-03-25 21:36                   ` [PATCH 10/20] x86: move dma_sync_sg_for_cpu " Glauber Costa
2008-03-25 21:36                     ` [PATCH 11/20] x86: move dma_sync_sg_for_device " Glauber Costa
2008-03-25 21:36                       ` [PATCH 12/20] x86: move alloc and free coherent " Glauber Costa
2008-03-25 21:36                         ` [PATCH 13/20] x86: move dma_map_page and dma_unmap_page " Glauber Costa
2008-03-25 21:36                           ` [PATCH 14/20] x86: move dma_cache_sync " Glauber Costa
2008-03-25 21:36                             ` [PATCH 15/20] x86: move dma_supported and dma_set_mask to pci-dma_32.c Glauber Costa
2008-03-25 21:36                               ` [PATCH 16/20] x86: align to clflush size Glauber Costa
2008-03-25 21:36                                 ` [PATCH 17/20] x86: provide a bad_dma_address symbol for i386 Glauber Costa
2008-03-25 21:36                                   ` [PATCH 18/20] x86: unify dma_mapping_error Glauber Costa
2008-03-25 21:36                                     ` [PATCH 19/20] x86: move ARCH_HAS_DMA_DECLARE_COHERENT_MEMORY to dma-mapping.h Glauber Costa
2008-03-25 21:36                                       ` [PATCH 20/20] x86: delete the arch-specific dma-mapping headers Glauber Costa
2008-03-26  7:09                                 ` [PATCH 16/20] x86: align to clflush size Ingo Molnar
2008-03-27 11:03                               ` [PATCH 15/20] x86: move dma_supported and dma_set_mask to pci-dma_32.c Mark McLoughlin
2008-03-27 11:54                                 ` Ingo Molnar
2008-03-26  7:06 ` [PATCH 0/20] dma_ops for i386 Ingo Molnar
2008-03-26 12:49   ` Ingo Molnar
2008-03-26 13:04     ` Ingo Molnar
2008-03-26 13:16       ` Glauber Costa [this message]
2008-03-26 10:01 ` Avi Kivity
2008-03-26 12:03   ` Glauber Costa
2008-03-27  9:49 ` Amit Shah

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=47EA4CB1.8070609@redhat.com \
    --to=gcosta@redhat.com \
    --cc=akpm@linux-foundation.org \
    --cc=avi@qumranet.com \
    --cc=kvm-devel@lists.sourceforge.net \
    --cc=linux-kernel@vger.kernel.org \
    --cc=mingo@elte.hu \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox