public inbox for linux-kernel@vger.kernel.org
 help / color / mirror / Atom feed
From: Andrew Morton <akpm@linux-foundation.org>
To: anil.s.keshavamurthy@intel.com
Cc: linux-kernel@vger.kernel.org, ak@suse.de, gregkh@suse.de,
	muli@il.ibm.com, asit.k.mallick@intel.com,
	suresh.b.siddha@intel.com, arjan@linux.intel.com,
	ashok.raj@intel.com, shaohua.li@intel.com, davem@davemloft.net
Subject: Re: [Intel-IOMMU 05/10] IOVA allocation and management routines
Date: Thu, 7 Jun 2007 16:34:17 -0700	[thread overview]
Message-ID: <20070607163417.84f96405.akpm@linux-foundation.org> (raw)
In-Reply-To: <20070606190042.741931000@askeshav-devel.jf.intel.com>

On Wed, 06 Jun 2007 11:57:03 -0700
anil.s.keshavamurthy@intel.com wrote:

> 	This code implements a generic IOVA allocation and 
> management. As per Dave's suggestion we are now allocating
> IO virtual address from Higher DMA limit address rather
> than lower end address and this eliminated the need to preserve
> the IO virtual address for multiple devices sharing the same
> domain virtual address.
> 
> Also this code uses red black trees to store the allocated and
> reserved iova nodes. This showed a good performance improvements
> over previous linear linked list.
> 
> ...
>
> +
> +/**
> + * alloc_iova - allocates an iova
> + * @iovad - iova domain in question
> + * @size - size of page frames to allocate
> + * @limit_pfn - max limit address
> + * This function allocates an iova in the range limit_pfn to IOVA_START_PFN
> + * looking from limit_pfn instead from IOVA_START_PFN.
> + */
> +
> +struct iova *
> +alloc_iova(struct iova_domain *iovad, unsigned long size, unsigned long limit_pfn)

Generally we omit the blank line between the end-of-comment and the
function definition.

> +{
> +	unsigned long flags,flags1;
> +	struct iova *new_iova;
> +	int ret;
> +
> +	new_iova = alloc_iova_mem();
> +	if (!new_iova)
> +		return NULL;
> +
> +	spin_lock_irqsave(&iovad->iova_alloc_lock, flags1);
> +	ret = __alloc_iova_range(iovad, size, limit_pfn, new_iova);
> +
> +	if (ret) {
> +		spin_unlock_irqrestore(&iovad->iova_alloc_lock, flags1);
> +		free_iova_mem(new_iova);
> +		return NULL;
> +	}
> +
> +	/* Insert the new_iova into domain rbtree by holding writer lock */
> +	spin_lock_irqsave(&iovad->iova_rbtree_lock, flags);
> +	iova_insert_rbtree(&iovad->rbroot, new_iova);
> +	__cached_rbnode_insert_update(iovad, limit_pfn, new_iova);
> +	spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
> +
> +	spin_unlock_irqrestore(&iovad->iova_alloc_lock, flags1);

spin_unlock_irqrestore() within spin_unlock_irqrestore() is fairly
pointless.  You can just use spin_lock()/spin_unlock() for the innermost
pair.

> +	return new_iova;
> +}
>
> ...
>
> +__is_range_overlap(struct rb_node *node, unsigned long pfn_lo, unsigned long pfn_hi)
> +{
> +	struct iova * iova = container_of(node, struct iova, node);

run checkpatch.pl, please.

> +	if ((pfn_lo <= iova->pfn_hi) && (pfn_hi >= iova->pfn_lo))
> +		return 1;
> +	return 0;
> +}
> +
>
> ...
>
> +struct iova *
> +reserve_iova(struct iova_domain *iovad, unsigned long pfn_lo, unsigned long pfn_hi)
> +{
> +	struct rb_node *node;
> +	unsigned long flags, flags1;
> +	struct iova *iova;
> +	unsigned int overlap = 0;
> +
> +	spin_lock_irqsave(&iovad->iova_alloc_lock, flags);
> +	spin_lock_irqsave(&iovad->iova_rbtree_lock, flags1);
> +	for (node = rb_first(&iovad->rbroot); node; node = rb_next(node)) {
> +		if (__is_range_overlap(node, pfn_lo, pfn_hi)) {
> +			iova = container_of(node, struct iova, node);
> +			__adjust_overlap_range(iova, &pfn_lo, &pfn_hi);
> +			if ((pfn_lo >= iova->pfn_lo) &&
> +				(pfn_hi <= iova->pfn_hi))
> +				goto finish;
> +			overlap = 1;
> +
> +		} else if (overlap)
> +				break;
> +	}
> +
> +	/* We are here either becasue this is the first reserver node
> +	 * or need to insert remaining non overlap addr range
> +	 */
> +	iova = __insert_new_range(iovad, pfn_lo, pfn_hi);
> +finish:
> +
> +	spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags1);
> +	spin_unlock_irqrestore(&iovad->iova_alloc_lock, flags);

ditto

> +	return iova;
> +}
> +
> +/**
> + * copy_reserved_iova - copies the reserved between domains
> + * @from: - source doamin from where to copy
> + * @to: - destination domin where to copy
> + * This function copies reserved iova's from one doamin to
> + * other.
> + */
> +void
> +copy_reserved_iova(struct iova_domain *from, struct iova_domain *to)
> +{
> +	unsigned long flags, flags1;
> +	struct rb_node *node;
> +	spin_lock_irqsave(&from->iova_alloc_lock, flags);
> +	spin_lock_irqsave(&from->iova_rbtree_lock, flags1);

Add a blank line betwee the end-of-locals and the start-of-statements

> +	for (node = rb_first(&from->rbroot); node; node = rb_next(node)) {
> +		struct iova *iova = container_of(node, struct iova, node);
> +		struct iova *new_iova;
> +		new_iova = reserve_iova(to, iova->pfn_lo, iova->pfn_hi);
> +		if (!new_iova)
> +			printk(KERN_ERR "Reserve iova range %lx@%lx failed\n",
> +				iova->pfn_lo, iova->pfn_lo);
> +	}
> +	spin_unlock_irqrestore(&from->iova_rbtree_lock, flags1);
> +	spin_unlock_irqrestore(&from->iova_alloc_lock, flags);

ditto

> +}
> Index: linux-2.6.22-rc3/drivers/pci/iova.h
> ===================================================================
> --- /dev/null	1970-01-01 00:00:00.000000000 +0000
> +++ linux-2.6.22-rc3/drivers/pci/iova.h	2007-06-04 12:40:20.000000000 -0700
> @@ -0,0 +1,57 @@
> +/*
> + * Copyright (c) 2006, Intel Corporation.
> + *
> + * This file is released under the GPLv2.
> + *
> + * Copyright (C) 2006 Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
> + *
> + */
> +
> +#ifndef _IOVA_H_
> +#define _IOVA_H_
> +
> +#include <linux/types.h>
> +#include <linux/kernel.h>
> +#include <linux/rbtree.h>
> +#include <linux/dma-mapping.h>
> +
> +
> +#define PAGE_SHIFT_4K		(12)
> +#define PAGE_SIZE_4K		(1UL << PAGE_SHIFT_4K)
> +#define PAGE_MASK_4K		(((u64)-1) << PAGE_SHIFT_4K)
> +#define PAGE_ALIGN_4K(addr)	(((addr) + PAGE_SIZE_4K - 1) & PAGE_MASK_4K)

hm.  We can't use the architecture's PAGE_SHIFT and friends here?

> +#define IOVA_START_ADDR		(0x1000)
> +#define IOVA_START_PFN		(IOVA_START_ADDR >> PAGE_SHIFT_4K)
> +
> +#define IOVA_PFN(addr)		((addr) >> PAGE_SHIFT_4K)
> +#define DMA_32BIT_PFN	IOVA_PFN(DMA_32BIT_MASK)
> +#define DMA_64BIT_PFN	IOVA_PFN(DMA_64BIT_MASK)
> +
> +/* iova structure */
> +struct iova {
> +	struct rb_node	node;
> +	unsigned long	pfn_hi; /* IOMMU dish out addr hi */
> +	unsigned long	pfn_lo; /* IOMMU dish out addr lo */
> +};
> +
> +/* holds all the iova translations for a domain */
> +struct iova_domain {
> +	spinlock_t	iova_alloc_lock;/* Lock to protect iova  allocation */
> +	spinlock_t	iova_rbtree_lock; /* Lock to protect update of rbtree */
> +	struct rb_root	rbroot;		/* iova domain rbtree root */
> +	struct rb_node	*cached32_node; /* Save last alloced node to optimize alloc */
> +};
> +
> +struct iova *alloc_iova_mem(void);
> +void free_iova_mem(struct iova *iova);
> +void free_iova(struct iova_domain *iovad, unsigned long pfn);
> +void __free_iova(struct iova_domain *iovad, struct iova *iova);
> +struct iova * alloc_iova(struct iova_domain *iovad, unsigned long size, unsigned long limit_pfn);
> +struct iova * reserve_iova(struct iova_domain *iovad, unsigned long pfn_lo, unsigned long pfn_hi);
> +void copy_reserved_iova(struct iova_domain *from, struct iova_domain *to);
> +void init_iova_domain(struct iova_domain *iovad);
> +struct iova * find_iova(struct iova_domain *iovad, unsigned long pfn);
> +void put_iova_domain(struct iova_domain *iovad);
> +
> +#endif
> 
> -- 

  reply	other threads:[~2007-06-07 23:34 UTC|newest]

Thread overview: 64+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2007-06-06 18:56 [Intel-IOMMU 00/10] Intel IOMMU Support anil.s.keshavamurthy
2007-06-06 18:56 ` [Intel-IOMMU 01/10] DMAR detection and parsing logic anil.s.keshavamurthy
2007-06-06 18:57 ` [Intel-IOMMU 02/10] Library routine for pre-allocat pool handling anil.s.keshavamurthy
2007-06-07 23:27   ` Andrew Morton
2007-06-08 18:21     ` Keshavamurthy, Anil S
2007-06-08 19:01       ` Andrew Morton
2007-06-08 20:12         ` Keshavamurthy, Anil S
2007-06-08 20:40           ` Siddha, Suresh B
2007-06-08 20:44           ` Andrew Morton
2007-06-08 22:33           ` Christoph Lameter
2007-06-08 22:49             ` Keshavamurthy, Anil S
2007-06-08 20:43         ` Andreas Kleen
2007-06-08 20:55           ` Andrew Morton
2007-06-08 22:31             ` Andi Kleen
2007-06-08 21:20           ` Keshavamurthy, Anil S
2007-06-08 21:42             ` Andrew Morton
2007-06-08 22:17               ` Arjan van de Ven
2007-06-08 22:18               ` Siddha, Suresh B
2007-06-08 22:38                 ` Christoph Lameter
2007-06-08 22:36           ` Christoph Lameter
2007-06-08 22:56             ` Andi Kleen
2007-06-08 22:59               ` Christoph Lameter
2007-06-09  9:47                 ` Andi Kleen
2007-06-11 20:44                   ` Keshavamurthy, Anil S
2007-06-11 21:14                     ` Andrew Morton
2007-06-11  9:46                       ` Ashok Raj
2007-06-11 22:16                       ` Andi Kleen
2007-06-11 23:28                         ` Christoph Lameter
2007-06-11 23:52                       ` Keshavamurthy, Anil S
2007-06-12  0:30                         ` Andrew Morton
2007-06-12  1:10                           ` Arjan van de Ven
2007-06-12  1:30                             ` Christoph Lameter
2007-06-12  1:35                             ` Andrew Morton
2007-06-12  1:55                               ` Arjan van de Ven
2007-06-12  2:08                                 ` Siddha, Suresh B
2007-06-13 18:40                                 ` Matt Mackall
2007-06-13 19:04                                   ` Andi Kleen
2007-06-12  0:38                         ` Christoph Lameter
2007-06-11 21:29                     ` Christoph Lameter
2007-06-11 21:40                       ` Keshavamurthy, Anil S
2007-06-11 22:25                     ` Andi Kleen
2007-06-11 11:29                       ` Ashok Raj
2007-06-11 23:15                       ` Keshavamurthy, Anil S
2007-06-08 22:32       ` Christoph Lameter
2007-06-08 22:45         ` Keshavamurthy, Anil S
2007-06-08 22:55           ` Christoph Lameter
2007-06-10 16:38             ` Arjan van de Ven
2007-06-11 16:10               ` Christoph Lameter
2007-06-06 18:57 ` [Intel-IOMMU 03/10] PCI generic helper function anil.s.keshavamurthy
2007-06-06 18:57 ` [Intel-IOMMU 04/10] clflush_cache_range now takes size param anil.s.keshavamurthy
2007-06-06 18:57 ` [Intel-IOMMU 05/10] IOVA allocation and management routines anil.s.keshavamurthy
2007-06-07 23:34   ` Andrew Morton [this message]
2007-06-08 18:25     ` Keshavamurthy, Anil S
2007-06-06 18:57 ` [Intel-IOMMU 06/10] Intel IOMMU driver anil.s.keshavamurthy
2007-06-07 23:57   ` Andrew Morton
2007-06-08 22:30     ` Christoph Lameter
2007-06-13 20:20     ` Keshavamurthy, Anil S
2007-06-06 18:57 ` [Intel-IOMMU 07/10] Intel iommu cmdline option - forcedac anil.s.keshavamurthy
2007-06-07 23:58   ` Andrew Morton
2007-06-06 18:57 ` [Intel-IOMMU 08/10] DMAR fault handling support anil.s.keshavamurthy
2007-06-06 18:57 ` [Intel-IOMMU 09/10] Iommu Gfx workaround anil.s.keshavamurthy
2007-06-08  0:01   ` Andrew Morton
2007-06-06 18:57 ` [Intel-IOMMU 10/10] Iommu floppy workaround anil.s.keshavamurthy
  -- strict thread matches above, loose matches on Subject: below --
2007-06-04 21:02 [Intel-IOMMU 00/10] Intel IOMMU support anil.s.keshavamurthy
2007-06-04 21:02 ` [Intel-IOMMU 05/10] IOVA allocation and management routines anil.s.keshavamurthy

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20070607163417.84f96405.akpm@linux-foundation.org \
    --to=akpm@linux-foundation.org \
    --cc=ak@suse.de \
    --cc=anil.s.keshavamurthy@intel.com \
    --cc=arjan@linux.intel.com \
    --cc=ashok.raj@intel.com \
    --cc=asit.k.mallick@intel.com \
    --cc=davem@davemloft.net \
    --cc=gregkh@suse.de \
    --cc=linux-kernel@vger.kernel.org \
    --cc=muli@il.ibm.com \
    --cc=shaohua.li@intel.com \
    --cc=suresh.b.siddha@intel.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox