public inbox for linux-kernel@vger.kernel.org
 help / color / mirror / Atom feed
From: Robin Murphy <robin.murphy@arm.com>
To: Joerg Roedel <joro@8bytes.org>, iommu@lists.linux-foundation.org
Cc: Vincent.Wan@amd.com, Joerg Roedel <jroedel@suse.de>,
	linux-kernel@vger.kernel.org
Subject: Re: [PATCH 07/20] iommu/amd: Remove special mapping code for dma_ops path
Date: Tue, 12 Jul 2016 11:55:39 +0100	[thread overview]
Message-ID: <5784CCAB.8000007@arm.com> (raw)
In-Reply-To: <1467978311-28322-8-git-send-email-joro@8bytes.org>

Hi Joerg,

On 08/07/16 12:44, Joerg Roedel wrote:
> From: Joerg Roedel <jroedel@suse.de>
> 
> Use the iommu-api map/unmap functions instead. This will be
> required anyway when IOVA code is used for address
> allocation.
> 
> Signed-off-by: Joerg Roedel <jroedel@suse.de>
> ---
>  drivers/iommu/amd_iommu.c | 107 ++++++----------------------------------------
>  1 file changed, 14 insertions(+), 93 deletions(-)
> 
> diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
> index e7c042b..08aa46c 100644
> --- a/drivers/iommu/amd_iommu.c
> +++ b/drivers/iommu/amd_iommu.c
> @@ -2557,94 +2557,6 @@ static void update_domain(struct protection_domain *domain)
>  }
>  
>  /*
> - * This function fetches the PTE for a given address in the aperture
> - */
> -static u64* dma_ops_get_pte(struct dma_ops_domain *dom,
> -			    unsigned long address)
> -{
> -	struct aperture_range *aperture;
> -	u64 *pte, *pte_page;
> -
> -	aperture = dom->aperture[APERTURE_RANGE_INDEX(address)];
> -	if (!aperture)
> -		return NULL;
> -
> -	pte = aperture->pte_pages[APERTURE_PAGE_INDEX(address)];
> -	if (!pte) {
> -		pte = alloc_pte(&dom->domain, address, PAGE_SIZE, &pte_page,
> -				GFP_ATOMIC);
> -		aperture->pte_pages[APERTURE_PAGE_INDEX(address)] = pte_page;
> -	} else
> -		pte += PM_LEVEL_INDEX(0, address);
> -
> -	update_domain(&dom->domain);
> -
> -	return pte;
> -}
> -
> -/*
> - * This is the generic map function. It maps one 4kb page at paddr to
> - * the given address in the DMA address space for the domain.
> - */
> -static dma_addr_t dma_ops_domain_map(struct dma_ops_domain *dom,
> -				     unsigned long address,
> -				     phys_addr_t paddr,
> -				     int direction)
> -{
> -	u64 *pte, __pte;
> -
> -	WARN_ON(address > dom->aperture_size);
> -
> -	paddr &= PAGE_MASK;
> -
> -	pte  = dma_ops_get_pte(dom, address);
> -	if (!pte)
> -		return DMA_ERROR_CODE;
> -
> -	__pte = paddr | IOMMU_PTE_P | IOMMU_PTE_FC;
> -
> -	if (direction == DMA_TO_DEVICE)
> -		__pte |= IOMMU_PTE_IR;
> -	else if (direction == DMA_FROM_DEVICE)
> -		__pte |= IOMMU_PTE_IW;
> -	else if (direction == DMA_BIDIRECTIONAL)
> -		__pte |= IOMMU_PTE_IR | IOMMU_PTE_IW;
> -
> -	WARN_ON_ONCE(*pte);
> -
> -	*pte = __pte;
> -
> -	return (dma_addr_t)address;
> -}
> -
> -/*
> - * The generic unmapping function for on page in the DMA address space.
> - */
> -static void dma_ops_domain_unmap(struct dma_ops_domain *dom,
> -				 unsigned long address)
> -{
> -	struct aperture_range *aperture;
> -	u64 *pte;
> -
> -	if (address >= dom->aperture_size)
> -		return;
> -
> -	aperture = dom->aperture[APERTURE_RANGE_INDEX(address)];
> -	if (!aperture)
> -		return;
> -
> -	pte  = aperture->pte_pages[APERTURE_PAGE_INDEX(address)];
> -	if (!pte)
> -		return;
> -
> -	pte += PM_LEVEL_INDEX(0, address);
> -
> -	WARN_ON_ONCE(!*pte);
> -
> -	*pte = 0ULL;
> -}
> -
> -/*
>   * This function contains common code for mapping of a physically
>   * contiguous memory region into DMA address space. It is used by all
>   * mapping functions provided with this IOMMU driver.
> @@ -2654,7 +2566,7 @@ static dma_addr_t __map_single(struct device *dev,
>  			       struct dma_ops_domain *dma_dom,
>  			       phys_addr_t paddr,
>  			       size_t size,
> -			       int dir,
> +			       int direction,
>  			       bool align,
>  			       u64 dma_mask)
>  {
> @@ -2662,6 +2574,7 @@ static dma_addr_t __map_single(struct device *dev,
>  	dma_addr_t address, start, ret;
>  	unsigned int pages;
>  	unsigned long align_mask = 0;
> +	int prot = 0;
>  	int i;
>  
>  	pages = iommu_num_pages(paddr, size, PAGE_SIZE);
> @@ -2676,10 +2589,18 @@ static dma_addr_t __map_single(struct device *dev,
>  	if (address == DMA_ERROR_CODE)
>  		goto out;
>  
> +	if (direction == DMA_TO_DEVICE)
> +		prot = IOMMU_PROT_IR;
> +	else if (direction == DMA_FROM_DEVICE)
> +		prot = IOMMU_PROT_IW;
> +	else if (direction == DMA_BIDIRECTIONAL)
> +		prot = IOMMU_PROT_IW | IOMMU_PROT_IR;
> +
>  	start = address;
>  	for (i = 0; i < pages; ++i) {
> -		ret = dma_ops_domain_map(dma_dom, start, paddr, dir);
> -		if (ret == DMA_ERROR_CODE)
> +		ret = iommu_map_page(&dma_dom->domain, start, paddr,
> +				     PAGE_SIZE, prot, GFP_ATOMIC);

I see that amd_iommu_map/unmap() takes a lock around calling
iommu_map/unmap_page(), but we don't appear to do that here. That seems
to suggest that either one is unsafe or the other is unnecessary.

Robin.

> +		if (ret)
>  			goto out_unmap;
>  
>  		paddr += PAGE_SIZE;
> @@ -2699,7 +2620,7 @@ out_unmap:
>  
>  	for (--i; i >= 0; --i) {
>  		start -= PAGE_SIZE;
> -		dma_ops_domain_unmap(dma_dom, start);
> +		iommu_unmap_page(&dma_dom->domain, start, PAGE_SIZE);
>  	}
>  
>  	dma_ops_free_addresses(dma_dom, address, pages);
> @@ -2730,7 +2651,7 @@ static void __unmap_single(struct dma_ops_domain *dma_dom,
>  	start = dma_addr;
>  
>  	for (i = 0; i < pages; ++i) {
> -		dma_ops_domain_unmap(dma_dom, start);
> +		iommu_unmap_page(&dma_dom->domain, start, PAGE_SIZE);
>  		start += PAGE_SIZE;
>  	}
>  
> 

  reply	other threads:[~2016-07-12 10:55 UTC|newest]

Thread overview: 33+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2016-07-08 11:44 [PATCH 00/20] iommu/amd: Use generic IOVA allocator Joerg Roedel
2016-07-08 11:44 ` [PATCH 01/20] iommu: Add apply_dm_region call-back to iommu-ops Joerg Roedel
2016-07-08 11:44 ` [PATCH 02/20] iommu/amd: Select IOMMU_IOVA for AMD IOMMU Joerg Roedel
2016-07-08 11:44 ` [PATCH 03/20] iommu/amd: Allocate iova_domain for dma_ops_domain Joerg Roedel
2016-07-08 11:44 ` [PATCH 04/20] iommu/amd: Create a list of reserved iova addresses Joerg Roedel
2016-07-08 11:44 ` [PATCH 05/20] iommu/amd: Implement apply_dm_region call-back Joerg Roedel
2016-07-08 11:44 ` [PATCH 06/20] iommu/amd: Pass gfp-flags to iommu_map_page() Joerg Roedel
2016-07-08 11:44 ` [PATCH 07/20] iommu/amd: Remove special mapping code for dma_ops path Joerg Roedel
2016-07-12 10:55   ` Robin Murphy [this message]
2016-07-12 11:08     ` Joerg Roedel
2016-07-12 11:42       ` Robin Murphy
2016-07-12 11:48         ` Joerg Roedel
2016-07-08 11:44 ` [PATCH 08/20] iommu/amd: Make use of the generic IOVA allocator Joerg Roedel
2016-07-08 11:45 ` [PATCH 09/20] iommu/amd: Remove other remains of old address allocator Joerg Roedel
2016-07-08 11:45 ` [PATCH 10/20] iommu/amd: Remove align-parameter from __map_single() Joerg Roedel
2016-07-08 11:45 ` [PATCH 11/20] iommu/amd: Set up data structures for flush queue Joerg Roedel
2016-07-08 11:45 ` [PATCH 12/20] iommu/amd: Allow NULL pointer parameter for domain_flush_complete() Joerg Roedel
2016-07-08 11:45 ` [PATCH 13/20] iommu/amd: Implement flush queue Joerg Roedel
2016-07-08 11:45 ` [PATCH 14/20] iommu/amd: Implement timeout to flush unmap queues Joerg Roedel
2016-07-08 11:45 ` [PATCH 15/20] iommu/amd: Introduce dir2prot() helper Joerg Roedel
2016-07-08 11:45 ` [PATCH 16/20] iommu/amd: Optimize map_sg and unmap_sg Joerg Roedel
2016-07-12 11:33   ` Robin Murphy
2016-07-12 13:30     ` [PATCH 16/20 v2] " Joerg Roedel
2016-07-12 15:34       ` Robin Murphy
2016-07-13 10:27         ` Joerg Roedel
2016-07-08 11:45 ` [PATCH 17/20] iommu/amd: Use dev_data->domain in get_domain() Joerg Roedel
2016-07-08 11:45 ` [PATCH 18/20] iommu/amd: Handle IOMMU_DOMAIN_DMA in ops->domain_free call-back Joerg Roedel
2016-07-08 11:45 ` [PATCH 19/20] iommu/amd: Flush iova queue before releasing dma_ops_domain Joerg Roedel
2016-07-08 11:45 ` [PATCH 20/20] iommu/amd: Use container_of to get dma_ops_domain Joerg Roedel
2016-07-12  9:03 ` [PATCH 00/20] iommu/amd: Use generic IOVA allocator Wan Zongshun
2016-07-12 10:55   ` Joerg Roedel
2016-07-13  9:44     ` Wan Zongshun
2016-07-13  9:51       ` Joerg Roedel

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=5784CCAB.8000007@arm.com \
    --to=robin.murphy@arm.com \
    --cc=Vincent.Wan@amd.com \
    --cc=iommu@lists.linux-foundation.org \
    --cc=joro@8bytes.org \
    --cc=jroedel@suse.de \
    --cc=linux-kernel@vger.kernel.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox