public inbox for linux-kernel@vger.kernel.org
 help / color / mirror / Atom feed
From: David Chinner <dgc@sgi.com>
To: Jeremy Fitzhardinge <jeremy@goop.org>
Cc: "David Chinner" <dgc@sgi.com>, "Andi Kleen" <andi@firstfloor.org>,
	"dean gaudet" <dean@arctic.org>,
	"Nick Piggin" <nickpiggin@yahoo.com.au>,
	Xen-devel <xen-devel@lists.xensource.com>,
	Morten@suse.de,
	"Linux Kernel Mailing List" <linux-kernel@vger.kernel.org>,
	Bøgeskov <xen-users@morten.bogeskov.dk>,
	xfs@oss.sgi.com, xfs-masters@oss.sgi.com,
	"Mark Williamson" <mark.williamson@cl.cam.ac.uk>
Subject: Re: [PATCH] Allow lazy unmapping by taking extra page references V3
Date: Thu, 25 Oct 2007 09:21:10 +1000	[thread overview]
Message-ID: <20071024232110.GO66820511@sgi.com> (raw)
In-Reply-To: <471FCB38.10106@goop.org>

On Wed, Oct 24, 2007 at 03:46:16PM -0700, Jeremy Fitzhardinge wrote:
> David Chinner wrote:
> > Version 3:
> >   - compile on latest -git
> >   
> 
> Not quite:
> 
>   CC      mm/vmalloc.o
> /home/jeremy/hg/xen/paravirt/linux/mm/vmalloc.c: In function 'vm_area_alloc_pagearray':
> /home/jeremy/hg/xen/paravirt/linux/mm/vmalloc.c:338: error: 'GFP_LEVEL_MASK' undeclared (first use in this function)
> /home/jeremy/hg/xen/paravirt/linux/mm/vmalloc.c:338: error: (Each undeclared identifier is reported only once
> /home/jeremy/hg/xen/paravirt/linux/mm/vmalloc.c:338: error: for each function it appears in.)
> make[3]: *** [mm/vmalloc.o] Error 1
> make[2]: *** [mm] Error 2
> make[2]: *** Waiting for unfinished jobs....
> 
> 
> GFP_RECLAM_MASK now?

Yeah, it is. Not sure what happened there - I did make that change.
new diff below.

Cheers,

Dave.
-- 
Dave Chinner
Principal Engineer
SGI Australian Software Group


diff --git a/fs/xfs/linux-2.6/xfs_buf.c b/fs/xfs/linux-2.6/xfs_buf.c
index b9c8589..38f073f 100644
--- a/fs/xfs/linux-2.6/xfs_buf.c
+++ b/fs/xfs/linux-2.6/xfs_buf.c
@@ -187,19 +187,6 @@ free_address(
 {
 	a_list_t	*aentry;
 
-#ifdef CONFIG_XEN
-	/*
-	 * Xen needs to be able to make sure it can get an exclusive
-	 * RO mapping of pages it wants to turn into a pagetable.  If
-	 * a newly allocated page is also still being vmap()ed by xfs,
-	 * it will cause pagetable construction to fail.  This is a
-	 * quick workaround to always eagerly unmap pages so that Xen
-	 * is happy.
-	 */
-	vunmap(addr);
-	return;
-#endif
-
 	aentry = kmalloc(sizeof(a_list_t), GFP_NOWAIT);
 	if (likely(aentry)) {
 		spin_lock(&as_lock);
@@ -209,7 +196,7 @@ #endif
 		as_list_len++;
 		spin_unlock(&as_lock);
 	} else {
-		vunmap(addr);
+		vunmap_pages(addr);
 	}
 }
 
@@ -228,7 +215,7 @@ purge_addresses(void)
 	spin_unlock(&as_lock);
 
 	while ((old = aentry) != NULL) {
-		vunmap(aentry->vm_addr);
+		vunmap_pages(aentry->vm_addr);
 		aentry = aentry->next;
 		kfree(old);
 	}
@@ -458,8 +445,8 @@ _xfs_buf_map_pages(
 	} else if (flags & XBF_MAPPED) {
 		if (as_list_len > 64)
 			purge_addresses();
-		bp->b_addr = vmap(bp->b_pages, bp->b_page_count,
-					VM_MAP, PAGE_KERNEL);
+		bp->b_addr = vmap_pages(bp->b_pages, bp->b_page_count,
+					VM_MAP, PAGE_KERNEL, xb_to_gfp(flags));
 		if (unlikely(bp->b_addr == NULL))
 			return -ENOMEM;
 		bp->b_addr += bp->b_offset;
diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h
index 89338b4..40c34da 100644
--- a/include/linux/vmalloc.h
+++ b/include/linux/vmalloc.h
@@ -51,6 +51,10 @@ extern void *vmap(struct page **pages, u
 			unsigned long flags, pgprot_t prot);
 extern void vunmap(void *addr);
 
+extern void *vmap_pages(struct page **pages, unsigned int count,
+			unsigned long flags, pgprot_t prot, gfp_t gfp_mask);
+extern void vunmap_pages(void *addr);
+
 extern int remap_vmalloc_range(struct vm_area_struct *vma, void *addr,
 							unsigned long pgoff);
 void vmalloc_sync_all(void);
diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index af77e17..720f338 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -319,6 +319,34 @@ struct vm_struct *remove_vm_area(void *a
 	return v;
 }
 
+static int vm_area_alloc_pagearray(struct vm_struct *area, gfp_t gfp_mask,
+				unsigned int nr_pages, int node)
+{
+	struct page **pages;
+	unsigned int array_size;
+
+	array_size = (nr_pages * sizeof(struct page *));
+
+	area->nr_pages = nr_pages;
+	/* Please note that the recursion is strictly bounded. */
+	if (array_size > PAGE_SIZE) {
+		pages = __vmalloc_node(array_size, gfp_mask | __GFP_ZERO,
+					PAGE_KERNEL, node);
+		area->flags |= VM_VPAGES;
+	} else {
+		pages = kmalloc_node(array_size,
+				(gfp_mask & GFP_RECLAIM_MASK) | __GFP_ZERO,
+				node);
+	}
+	area->pages = pages;
+	if (!area->pages) {
+		remove_vm_area(area->addr);
+		kfree(area);
+		return -ENOMEM;
+	}
+	return 0;
+}
+
 static void __vunmap(void *addr, int deallocate_pages)
 {
 	struct vm_struct *area;
@@ -347,7 +375,7 @@ static void __vunmap(void *addr, int dea
 
 		for (i = 0; i < area->nr_pages; i++) {
 			BUG_ON(!area->pages[i]);
-			__free_page(area->pages[i]);
+			put_page(area->pages[i]);
 		}
 
 		if (area->flags & VM_VPAGES)
@@ -394,6 +422,23 @@ void vunmap(void *addr)
 EXPORT_SYMBOL(vunmap);
 
 /**
+ *	vunmap_pages  -  release virtual mapping obtained by vmap_pages()
+ *	@addr:		memory base address
+ *
+ *	Free the virtually contiguous memory area starting at @addr,
+ *	which was created from the page array passed to vmap_pages(),
+ *	releasing the reference on the pages gained in vmap_pages().
+ *
+ *	Must not be called in interrupt context.
+ */
+void vunmap_pages(void *addr)
+{
+	BUG_ON(in_interrupt());
+	__vunmap(addr, 1);
+}
+EXPORT_SYMBOL(vunmap_pages);
+
+/**
  *	vmap  -  map an array of pages into virtually contiguous space
  *	@pages:		array of page pointers
  *	@count:		number of pages to map
@@ -423,32 +468,63 @@ void *vmap(struct page **pages, unsigned
 }
 EXPORT_SYMBOL(vmap);
 
+/**
+ *	vmap_pages  -  map an array of pages into virtually contiguous space
+ *	@pages:		array of page pointers
+ *	@count:		number of pages to map
+ *	@flags:		vm_area->flags
+ *	@prot:		page protection for the mapping
+ *	@gfp_mask:	flags for the page level allocator
+ *
+ *	Maps @count pages from @pages into contiguous kernel virtual
+ *	space taking a reference to each page and keeping track of all
+ *	the pages within the vm area structure.
+ */
+void *vmap_pages(struct page **pages, unsigned int count,
+		unsigned long flags, pgprot_t prot, gfp_t gfp_mask)
+{
+	struct vm_struct *area;
+	struct page **pgp;
+	int i;
+
+	if (count > num_physpages)
+		return NULL;
+
+	area = get_vm_area((count << PAGE_SHIFT), flags);
+	if (!area)
+		return NULL;
+	if (vm_area_alloc_pagearray(area, gfp_mask, count, -1))
+		return NULL;
+
+	/* map_vm_area modifies pgp */
+	pgp = pages;
+	if (map_vm_area(area, prot, &pgp)) {
+		vunmap(area->addr);
+		return NULL;
+	}
+	/*
+	 * now that the region is mapped, take a reference to each
+	 * page and store them in the area page array.
+	 */
+	for (i = 0; i < area->nr_pages; i++) {
+		get_page(pages[i]);
+		area->pages[i] = pages[i];
+	}
+
+	return area->addr;
+}
+EXPORT_SYMBOL(vmap_pages);
+
 void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask,
 				pgprot_t prot, int node)
 {
 	struct page **pages;
-	unsigned int nr_pages, array_size, i;
+	unsigned int nr_pages;
+	int i;
 
 	nr_pages = (area->size - PAGE_SIZE) >> PAGE_SHIFT;
-	array_size = (nr_pages * sizeof(struct page *));
-
-	area->nr_pages = nr_pages;
-	/* Please note that the recursion is strictly bounded. */
-	if (array_size > PAGE_SIZE) {
-		pages = __vmalloc_node(array_size, gfp_mask | __GFP_ZERO,
-					PAGE_KERNEL, node);
-		area->flags |= VM_VPAGES;
-	} else {
-		pages = kmalloc_node(array_size,
-				(gfp_mask & GFP_RECLAIM_MASK) | __GFP_ZERO,
-				node);
-	}
-	area->pages = pages;
-	if (!area->pages) {
-		remove_vm_area(area->addr);
-		kfree(area);
+	if (vm_area_alloc_pagearray(area, gfp_mask, nr_pages, node))
 		return NULL;
-	}
 
 	for (i = 0; i < area->nr_pages; i++) {
 		if (node < 0)
@@ -462,6 +538,8 @@ void *__vmalloc_area_node(struct vm_stru
 		}
 	}
 
+	/* map_vm_area modifies pages */
+	pages = area->pages;
 	if (map_vm_area(area, prot, &pages))
 		goto fail;
 	return area->addr;

  reply	other threads:[~2007-10-24 23:21 UTC|newest]

Thread overview: 45+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2007-10-12 16:58 Interaction between Xen and XFS: stray RW mappings Jeremy Fitzhardinge
2007-10-12 17:08 ` Jeremy Fitzhardinge
2007-10-14 22:56 ` David Chinner
2007-10-14 23:12   ` Jeremy Fitzhardinge
2007-10-14 23:33     ` David Chinner
2007-10-15  4:15     ` Nick Piggin
2007-10-15  0:57       ` Jeremy Fitzhardinge
2007-10-15  7:26         ` Nick Piggin
2007-10-15  3:42           ` Jeremy Fitzhardinge
2007-10-15  4:11             ` David Chinner
2007-10-15  4:18               ` Jeremy Fitzhardinge
2007-10-15  4:25                 ` David Chinner
2007-10-15  8:31                   ` [xfs-masters] " Christoph Hellwig
2007-10-22  3:18       ` dean gaudet
2007-10-22  3:34         ` Jeremy Fitzhardinge
2007-10-22  4:28           ` dean gaudet
2007-10-22  4:39             ` Nick Piggin
2007-10-22 18:37               ` Jeremy Fitzhardinge
2007-10-22 18:32             ` Jeremy Fitzhardinge
2007-10-22 13:47           ` Andi Kleen
2007-10-22 18:40             ` Jeremy Fitzhardinge
2007-10-22 19:07               ` Andi Kleen
2007-10-22 19:11                 ` Jeremy Fitzhardinge
2007-10-22 22:32                 ` David Chinner
2007-10-22 23:35                   ` Andi Kleen
2007-10-23  0:16                     ` Zachary Amsden
2007-10-23  0:36                     ` David Chinner
2007-10-23  7:04                       ` [patch] " David Chinner
2007-10-23  9:30                         ` Andi Kleen
2007-10-23 12:41                           ` David Chinner
2007-10-23 14:33                             ` Jeremy Fitzhardinge
2007-10-24  4:36                           ` [PATCH] Allow lazy unmapping by taking extra page references V2 David Chinner
2007-10-24  5:08                             ` Jeremy Fitzhardinge
2007-10-24 21:48                               ` [PATCH] Allow lazy unmapping by taking extra page references V3 David Chinner
2007-10-24 22:46                                 ` Jeremy Fitzhardinge
2007-10-24 23:21                                   ` David Chinner [this message]
2007-10-23  9:28                       ` Interaction between Xen and XFS: stray RW mappings Andi Kleen
2007-10-15  9:36   ` Andi Kleen
2007-10-15 14:56     ` Nick Piggin
2007-10-15 11:07       ` Andi Kleen
2007-10-15 11:28         ` Nick Piggin
2007-10-15 12:54           ` Andi Kleen
2007-10-21 12:17             ` Dave Airlie
2007-10-21 22:16         ` Benjamin Herrenschmidt
2007-10-22  9:49           ` Andi Kleen

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20071024232110.GO66820511@sgi.com \
    --to=dgc@sgi.com \
    --cc=Morten@suse.de \
    --cc=andi@firstfloor.org \
    --cc=dean@arctic.org \
    --cc=jeremy@goop.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=mark.williamson@cl.cam.ac.uk \
    --cc=nickpiggin@yahoo.com.au \
    --cc=xen-devel@lists.xensource.com \
    --cc=xen-users@morten.bogeskov.dk \
    --cc=xfs-masters@oss.sgi.com \
    --cc=xfs@oss.sgi.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox