linux-mm.kvack.org archive mirror
 help / color / mirror / Atom feed
* [PATCH] list corruption by gather_surplus
@ 2012-07-26 21:43 Cliff Wickman
  2012-07-27 12:47 ` Hillf Danton
  2012-07-27 17:51 ` Aneesh Kumar K.V
  0 siblings, 2 replies; 3+ messages in thread
From: Cliff Wickman @ 2012-07-26 21:43 UTC (permalink / raw)
  To: cmetcalf, dave, dhillf, dwg, kamezawa.hiroyuki, khlebnikov,
	lee.schermerhorn, mgorman, mhocko, shhuiw, viro
  Cc: linux-mm

From: Cliff Wickman <cpw@sgi.com>

Gentlemen,
I see that you all have done maintenance on mm/hugetlb.c, so I'm hoping one
or two of you could comment on a problem and proposed fix.


I am seeing list corruption occurring from within gather_surplus_pages()
(mm/hugetlb.c).  The problem occurs under a heavy load, and seems to be
because this function drops the hugetlb_lock.

I have CONFIG_DEBUG_LIST=y, and am running an MPI application with 64 threads
and a library that creates a large heap of hugetlbfs pages for it.

The below patch fixes the problem.
The gist of this patch is that gather_surplus_pages() does not have to drop
the lock if alloc_buddy_huge_page() is told whether the lock is already held.

But I may be missing some reason why gather_surplus_pages() is unlocking and
locking the hugetlb_lock several times (besides around the allocator).

Could you take a look and advise?

Signed-off-by: Cliff Wickman <cpw@sgi.com>
---
 mm/hugetlb.c |   28 +++++++++++++++++-----------
 1 file changed, 17 insertions(+), 11 deletions(-)

Index: linux/mm/hugetlb.c
===================================================================
--- linux.orig/mm/hugetlb.c
+++ linux/mm/hugetlb.c
@@ -747,7 +747,9 @@ static int free_pool_huge_page(struct hs
 	return ret;
 }
 
-static struct page *alloc_buddy_huge_page(struct hstate *h, int nid)
+/* already_locked means the caller has already locked hugetlb_lock */
+static struct page *alloc_buddy_huge_page(struct hstate *h, int nid,
+						int already_locked)
 {
 	struct page *page;
 	unsigned int r_nid;
@@ -778,7 +780,8 @@ static struct page *alloc_buddy_huge_pag
 	 * the node values until we've gotten the hugepage and only the
 	 * per-node value is checked there.
 	 */
-	spin_lock(&hugetlb_lock);
+	if (!already_locked)
+		spin_lock(&hugetlb_lock);
 	if (h->surplus_huge_pages >= h->nr_overcommit_huge_pages) {
 		spin_unlock(&hugetlb_lock);
 		return NULL;
@@ -787,6 +790,7 @@ static struct page *alloc_buddy_huge_pag
 		h->surplus_huge_pages++;
 	}
 	spin_unlock(&hugetlb_lock);
+	/* page allocation may sleep, so the lock must be unlocked */
 
 	if (nid == NUMA_NO_NODE)
 		page = alloc_pages(htlb_alloc_mask|__GFP_COMP|
@@ -799,6 +803,9 @@ static struct page *alloc_buddy_huge_pag
 
 	if (page && arch_prepare_hugepage(page)) {
 		__free_pages(page, huge_page_order(h));
+		if (already_locked)
+			/* leave it like it was */
+			spin_lock(&hugetlb_lock);
 		return NULL;
 	}
 
@@ -817,7 +824,9 @@ static struct page *alloc_buddy_huge_pag
 		h->surplus_huge_pages--;
 		__count_vm_event(HTLB_BUDDY_PGALLOC_FAIL);
 	}
-	spin_unlock(&hugetlb_lock);
+	if (!already_locked)
+		/* leave it like it was */
+		spin_unlock(&hugetlb_lock);
 
 	return page;
 }
@@ -836,7 +845,7 @@ struct page *alloc_huge_page_node(struct
 	spin_unlock(&hugetlb_lock);
 
 	if (!page)
-		page = alloc_buddy_huge_page(h, nid);
+		page = alloc_buddy_huge_page(h, nid, 0);
 
 	return page;
 }
@@ -844,6 +853,7 @@ struct page *alloc_huge_page_node(struct
 /*
  * Increase the hugetlb pool such that it can accomodate a reservation
  * of size 'delta'.
+ * This is entered and exited with hugetlb_lock locked.
  */
 static int gather_surplus_pages(struct hstate *h, int delta)
 {
@@ -863,9 +873,8 @@ static int gather_surplus_pages(struct h
 
 	ret = -ENOMEM;
 retry:
-	spin_unlock(&hugetlb_lock);
 	for (i = 0; i < needed; i++) {
-		page = alloc_buddy_huge_page(h, NUMA_NO_NODE);
+		page = alloc_buddy_huge_page(h, NUMA_NO_NODE, 1);
 		if (!page)
 			/*
 			 * We were not able to allocate enough pages to
@@ -879,10 +888,9 @@ retry:
 	allocated += needed;
 
 	/*
-	 * After retaking hugetlb_lock, we need to recalculate 'needed'
+	 * With hugetlb_lock still locked, we need to recalculate 'needed'
 	 * because either resv_huge_pages or free_huge_pages may have changed.
 	 */
-	spin_lock(&hugetlb_lock);
 	needed = (h->resv_huge_pages + delta) -
 			(h->free_huge_pages + allocated);
 	if (needed > 0)
@@ -900,7 +908,6 @@ retry:
 	h->resv_huge_pages += delta;
 	ret = 0;
 
-	spin_unlock(&hugetlb_lock);
 	/* Free the needed pages to the hugetlb pool */
 	list_for_each_entry_safe(page, tmp, &surplus_list, lru) {
 		if ((--needed) < 0)
@@ -923,7 +930,6 @@ free:
 			put_page(page);
 		}
 	}
-	spin_lock(&hugetlb_lock);
 
 	return ret;
 }
@@ -1043,7 +1049,7 @@ static struct page *alloc_huge_page(stru
 	spin_unlock(&hugetlb_lock);
 
 	if (!page) {
-		page = alloc_buddy_huge_page(h, NUMA_NO_NODE);
+		page = alloc_buddy_huge_page(h, NUMA_NO_NODE, 0);
 		if (!page) {
 			hugetlb_put_quota(inode->i_mapping, chg);
 			return ERR_PTR(-VM_FAULT_SIGBUS);

--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org.  For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>

^ permalink raw reply	[flat|nested] 3+ messages in thread

* Re: [PATCH] list corruption by gather_surplus
  2012-07-26 21:43 [PATCH] list corruption by gather_surplus Cliff Wickman
@ 2012-07-27 12:47 ` Hillf Danton
  2012-07-27 17:51 ` Aneesh Kumar K.V
  1 sibling, 0 replies; 3+ messages in thread
From: Hillf Danton @ 2012-07-27 12:47 UTC (permalink / raw)
  To: Cliff Wickman, LKML
  Cc: cmetcalf, dave, dwg, kamezawa.hiroyuki, khlebnikov,
	lee.schermerhorn, mgorman, mhocko, shhuiw, viro, linux-mm

On Fri, Jul 27, 2012 at 5:43 AM, Cliff Wickman <cpw@sgi.com> wrote:
> From: Cliff Wickman <cpw@sgi.com>
>
> Gentlemen,
> I see that you all have done maintenance on mm/hugetlb.c, so I'm hoping one
> or two of you could comment on a problem and proposed fix.
>
>
> I am seeing list corruption occurring from within gather_surplus_pages()
> (mm/hugetlb.c).  The problem occurs under a heavy load, and seems to be
> because this function drops the hugetlb_lock.
>
> I have CONFIG_DEBUG_LIST=y, and am running an MPI application with 64 threads
> and a library that creates a large heap of hugetlbfs pages for it.
>
> The below patch fixes the problem.
> The gist of this patch is that gather_surplus_pages() does not have to drop
> the lock if alloc_buddy_huge_page() is told whether the lock is already held.
>
> But I may be missing some reason why gather_surplus_pages() is unlocking and
> locking the hugetlb_lock several times (besides around the allocator).
>
> Could you take a look and advise?
>
> Signed-off-by: Cliff Wickman <cpw@sgi.com>
> ---
>  mm/hugetlb.c |   28 +++++++++++++++++-----------
>  1 file changed, 17 insertions(+), 11 deletions(-)
>
> Index: linux/mm/hugetlb.c
> ===================================================================
> --- linux.orig/mm/hugetlb.c
> +++ linux/mm/hugetlb.c
> @@ -747,7 +747,9 @@ static int free_pool_huge_page(struct hs
>         return ret;
>  }
>
> -static struct page *alloc_buddy_huge_page(struct hstate *h, int nid)
> +/* already_locked means the caller has already locked hugetlb_lock */
> +static struct page *alloc_buddy_huge_page(struct hstate *h, int nid,
> +                                               int already_locked)
>  {
>         struct page *page;
>         unsigned int r_nid;
> @@ -778,7 +780,8 @@ static struct page *alloc_buddy_huge_pag
>          * the node values until we've gotten the hugepage and only the
>          * per-node value is checked there.
>          */
> -       spin_lock(&hugetlb_lock);
> +       if (!already_locked)
> +               spin_lock(&hugetlb_lock);
>         if (h->surplus_huge_pages >= h->nr_overcommit_huge_pages) {
>                 spin_unlock(&hugetlb_lock);
>                 return NULL;
> @@ -787,6 +790,7 @@ static struct page *alloc_buddy_huge_pag
>                 h->surplus_huge_pages++;
>         }
>         spin_unlock(&hugetlb_lock);
> +       /* page allocation may sleep, so the lock must be unlocked */
>
>         if (nid == NUMA_NO_NODE)
>                 page = alloc_pages(htlb_alloc_mask|__GFP_COMP|
> @@ -799,6 +803,9 @@ static struct page *alloc_buddy_huge_pag
>
>         if (page && arch_prepare_hugepage(page)) {
>                 __free_pages(page, huge_page_order(h));
> +               if (already_locked)
> +                       /* leave it like it was */
> +                       spin_lock(&hugetlb_lock);
>                 return NULL;
>         }
>
> @@ -817,7 +824,9 @@ static struct page *alloc_buddy_huge_pag
>                 h->surplus_huge_pages--;
>                 __count_vm_event(HTLB_BUDDY_PGALLOC_FAIL);
>         }
> -       spin_unlock(&hugetlb_lock);
> +       if (!already_locked)
> +               /* leave it like it was */


Same comment for two different checks?


> +               spin_unlock(&hugetlb_lock);
>
>         return page;
>  }
> @@ -836,7 +845,7 @@ struct page *alloc_huge_page_node(struct
>         spin_unlock(&hugetlb_lock);
>
>         if (!page)
> -               page = alloc_buddy_huge_page(h, nid);
> +               page = alloc_buddy_huge_page(h, nid, 0);
>
>         return page;
>  }
> @@ -844,6 +853,7 @@ struct page *alloc_huge_page_node(struct
>  /*
>   * Increase the hugetlb pool such that it can accomodate a reservation
>   * of size 'delta'.
> + * This is entered and exited with hugetlb_lock locked.
>   */
>  static int gather_surplus_pages(struct hstate *h, int delta)
>  {
> @@ -863,9 +873,8 @@ static int gather_surplus_pages(struct h
>
>         ret = -ENOMEM;
>  retry:
> -       spin_unlock(&hugetlb_lock);
>         for (i = 0; i < needed; i++) {
> -               page = alloc_buddy_huge_page(h, NUMA_NO_NODE);
> +               page = alloc_buddy_huge_page(h, NUMA_NO_NODE, 1);
>                 if (!page)
>                         /*
>                          * We were not able to allocate enough pages to

Hm, what tree are you using(next tree please if not)?

Good Weekend
Hillf

--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org.  For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>

^ permalink raw reply	[flat|nested] 3+ messages in thread

* Re: [PATCH] list corruption by gather_surplus
  2012-07-26 21:43 [PATCH] list corruption by gather_surplus Cliff Wickman
  2012-07-27 12:47 ` Hillf Danton
@ 2012-07-27 17:51 ` Aneesh Kumar K.V
  1 sibling, 0 replies; 3+ messages in thread
From: Aneesh Kumar K.V @ 2012-07-27 17:51 UTC (permalink / raw)
  To: Cliff Wickman, cmetcalf, dave, dhillf, dwg, kamezawa.hiroyuki,
	khlebnikov, lee.schermerhorn, mgorman, mhocko, shhuiw, viro
  Cc: linux-mm

Cliff Wickman <cpw@sgi.com> writes:

> From: Cliff Wickman <cpw@sgi.com>
>
> Gentlemen,
> I see that you all have done maintenance on mm/hugetlb.c, so I'm hoping one
> or two of you could comment on a problem and proposed fix.
>
>
> I am seeing list corruption occurring from within gather_surplus_pages()
> (mm/hugetlb.c).  The problem occurs under a heavy load, and seems to be
> because this function drops the hugetlb_lock.
>
> I have CONFIG_DEBUG_LIST=y, and am running an MPI application with 64 threads
> and a library that creates a large heap of hugetlbfs pages for it.
>
> The below patch fixes the problem.
> The gist of this patch is that gather_surplus_pages() does not have to drop
> the lock if alloc_buddy_huge_page() is told whether the lock is
> already held.


But you didn't explain the corruption details right ? What cause the
corruption ? It would be nice to document that in the commit.

>
> But I may be missing some reason why gather_surplus_pages() is unlocking and
> locking the hugetlb_lock several times (besides around the allocator).
>
> Could you take a look and advise?
>
> Signed-off-by: Cliff Wickman <cpw@sgi.com>
> ---
>  mm/hugetlb.c |   28 +++++++++++++++++-----------
>  1 file changed, 17 insertions(+), 11 deletions(-)
>
> Index: linux/mm/hugetlb.c
> ===================================================================
> --- linux.orig/mm/hugetlb.c
> +++ linux/mm/hugetlb.c
> @@ -747,7 +747,9 @@ static int free_pool_huge_page(struct hs
>  	return ret;
>  }
>
> -static struct page *alloc_buddy_huge_page(struct hstate *h, int nid)
> +/* already_locked means the caller has already locked hugetlb_lock */
> +static struct page *alloc_buddy_huge_page(struct hstate *h, int nid,
> +						int already_locked)
>  {

Why ? Why can't we always call this with lock held ?

>  	struct page *page;
>  	unsigned int r_nid;
> @@ -778,7 +780,8 @@ static struct page *alloc_buddy_huge_pag
>  	 * the node values until we've gotten the hugepage and only the

-aneesh

--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org.  For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>

^ permalink raw reply	[flat|nested] 3+ messages in thread

end of thread, other threads:[~2012-07-27 17:52 UTC | newest]

Thread overview: 3+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2012-07-26 21:43 [PATCH] list corruption by gather_surplus Cliff Wickman
2012-07-27 12:47 ` Hillf Danton
2012-07-27 17:51 ` Aneesh Kumar K.V

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).