From: Nishanth Aravamudan <nacc@us.ibm.com>
To: clameter@sgi.com
Cc: anton@samba.org, lee.schermerhorn@hp.com, wli@holomorphy.com,
melgor@ie.ibm.com, akpm@linux-foundation.org, linux-mm@kvack.org,
agl@us.ibm.com
Subject: [RFC][PATCH 1/5] Fix hugetlb pool allocation with empty nodes V9
Date: Mon, 6 Aug 2007 09:37:26 -0700 [thread overview]
Message-ID: <20070806163726.GK15714@us.ibm.com> (raw)
In-Reply-To: <20070806163254.GJ15714@us.ibm.com>
Fix hugetlb pool allocation with empty nodes V9
Anton found a problem with the hugetlb pool allocation when some nodes
have no memory (http://marc.info/?l=linux-mm&m=118133042025995&w=2). Lee
worked on versions that tried to fix it, but none were accepted.
Christoph has created a set of patches which allow for GFP_THISNODE
allocations to fail if the node has no memory and for exporting a
node_memory_map indicating which nodes have memory. Since mempolicy.c
already has a number of functions which support interleaving, create a
mempolicy when we invoke alloc_fresh_huge_page() that specifies
interleaving across all the nodes in node_memory_map, rather than custom
interleaving code in hugetlb.c. This requires adding some dummy
functions, and some declarations, in mempolicy.h to compile with NUMA or
!NUMA. Since interleave_nodes() assumes that il_next has been set
properly (and it usually has by a syscall), make sure the interleaving
starts on a valid node.
On a 4-node ppc64 box with 2 memoryless nodes:
Before:
Trying to clear the hugetlb pool
Done. 0 free
Trying to resize the pool to 100
Node 3 HugePages_Free: 0
Node 2 HugePages_Free: 0
Node 1 HugePages_Free: 75
Node 0 HugePages_Free: 25
Done. Initially 100 free
After:
Trying to clear the hugetlb pool
Done. 0 free
Trying to resize the pool to 100
Node 3 HugePages_Free: 0
Node 2 HugePages_Free: 0
Node 1 HugePages_Free: 50
Node 0 HugePages_Free: 50
Done. Initially 100 free
Tested on: 2-node IA64, 4-node ppc64 (2 memoryless nodes), 4-node ppc64
(no memoryless nodes), 4-node x86_64, !NUMA x86, 1-node x86 (NUMA-Q),
Signed-off-by: Nishanth Aravamudan <nacc@us.ibm.com>
diff --git a/include/linux/mempolicy.h b/include/linux/mempolicy.h
index 3930de2..6848072 100644
--- a/include/linux/mempolicy.h
+++ b/include/linux/mempolicy.h
@@ -76,6 +76,8 @@ struct mempolicy {
* The default fast path of a NULL MPOL_DEFAULT policy is always inlined.
*/
+extern struct mempolicy *mpol_new(int mode, nodemask_t *nodes);
+
extern void __mpol_free(struct mempolicy *pol);
static inline void mpol_free(struct mempolicy *pol)
{
@@ -161,6 +163,10 @@ static inline void check_highest_zone(enum zone_type k)
policy_zone = k;
}
+extern void set_first_interleave_node(nodemask_t mask);
+
+extern unsigned interleave_nodes(struct mempolicy *policy);
+
int do_migrate_pages(struct mm_struct *mm,
const nodemask_t *from_nodes, const nodemask_t *to_nodes, int flags);
@@ -176,6 +182,11 @@ static inline int mpol_equal(struct mempolicy *a, struct mempolicy *b)
#define mpol_set_vma_default(vma) do {} while(0)
+static inline struct mempolicy *mpol_new(int mode, nodemask_t *nodes)
+{
+ return NULL;
+}
+
static inline void mpol_free(struct mempolicy *p)
{
}
@@ -253,6 +264,15 @@ static inline int do_migrate_pages(struct mm_struct *mm,
static inline void check_highest_zone(int k)
{
}
+
+static inline void set_first_interleave_node(nodemask_t mask)
+{
+}
+
+static inline unsigned interleave_nodes(struct mempolicy *policy)
+{
+ return 0;
+}
#endif /* CONFIG_NUMA */
#endif /* __KERNEL__ */
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index d7ca59d..4f320b4 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -101,26 +101,23 @@ static void free_huge_page(struct page *page)
spin_unlock(&hugetlb_lock);
}
-static int alloc_fresh_huge_page(void)
+static int alloc_fresh_huge_page(struct mempolicy *policy)
{
- static int prev_nid;
struct page *page;
int nid;
+ int start_nid = interleave_nodes(policy);
- /*
- * Copy static prev_nid to local nid, work on that, then copy it
- * back to prev_nid afterwards: otherwise there's a window in which
- * a racer might pass invalid nid MAX_NUMNODES to alloc_pages_node.
- * But we don't need to use a spin_lock here: it really doesn't
- * matter if occasionally a racer chooses the same nid as we do.
- */
- nid = next_node(prev_nid, node_online_map);
- if (nid == MAX_NUMNODES)
- nid = first_node(node_online_map);
- prev_nid = nid;
+ nid = start_nid;
+
+ do {
+ page = alloc_pages_node(nid,
+ htlb_alloc_mask|__GFP_COMP|GFP_THISNODE,
+ HUGETLB_PAGE_ORDER);
+ if (page)
+ break;
+ nid = interleave_nodes(policy);
+ } while (nid != start_nid);
- page = alloc_pages_node(nid, htlb_alloc_mask|__GFP_COMP|__GFP_NOWARN,
- HUGETLB_PAGE_ORDER);
if (page) {
set_compound_page_dtor(page, free_huge_page);
spin_lock(&hugetlb_lock);
@@ -162,18 +159,30 @@ fail:
static int __init hugetlb_init(void)
{
unsigned long i;
+ struct mempolicy *pol;
if (HPAGE_SHIFT == 0)
return 0;
- for (i = 0; i < MAX_NUMNODES; ++i)
+ for_each_node_state(i, N_HIGH_MEMORY)
INIT_LIST_HEAD(&hugepage_freelists[i]);
+ pol = mpol_new(MPOL_INTERLEAVE, &node_states[N_HIGH_MEMORY]);
+ if (IS_ERR(pol))
+ goto quit;
+ /*
+ * since the mempolicy we are using was not specified by a
+ * process, we need to make sure il_next has a good starting
+ * value
+ */
+ set_first_interleave_node(node_states[N_HIGH_MEMORY]);
for (i = 0; i < max_huge_pages; ++i) {
- if (!alloc_fresh_huge_page())
+ if (!alloc_fresh_huge_page(pol))
break;
}
+ mpol_free(pol);
max_huge_pages = free_huge_pages = nr_huge_pages = i;
+quit:
printk("Total HugeTLB memory allocated, %ld\n", free_huge_pages);
return 0;
}
@@ -219,7 +228,7 @@ static void try_to_free_low(unsigned long count)
{
int i;
- for (i = 0; i < MAX_NUMNODES; ++i) {
+ for_each_node_state(i, N_HIGH_MEMORY)
struct page *page, *next;
list_for_each_entry_safe(page, next, &hugepage_freelists[i], lru) {
if (PageHighMem(page))
@@ -241,10 +250,22 @@ static inline void try_to_free_low(unsigned long count)
static unsigned long set_max_huge_pages(unsigned long count)
{
+ struct mempolicy *pol;
+
+ pol = mpol_new(MPOL_INTERLEAVE, &node_states[N_HIGH_MEMORY]);
+ if (IS_ERR(pol))
+ return nr_huge_pages;
+ /*
+ * since the mempolicy we are using was not specified by a
+ * process, we need to make sure il_next has a good starting
+ * value
+ */
+ set_first_interleave_node(node_states[N_HIGH_MEMORY]);
while (count > nr_huge_pages) {
- if (!alloc_fresh_huge_page())
- return nr_huge_pages;
+ if (!alloc_fresh_huge_page(pol))
+ break;
}
+ mpol_free(pol);
if (count >= nr_huge_pages)
return nr_huge_pages;
diff --git a/mm/mempolicy.c b/mm/mempolicy.c
index 87eb69e..c069891 100644
--- a/mm/mempolicy.c
+++ b/mm/mempolicy.c
@@ -171,7 +171,7 @@ static struct zonelist *bind_zonelist(nodemask_t *nodes)
}
/* Create a new policy */
-static struct mempolicy *mpol_new(int mode, nodemask_t *nodes)
+struct mempolicy *mpol_new(int mode, nodemask_t *nodes)
{
struct mempolicy *policy;
@@ -1125,8 +1125,13 @@ static struct zonelist *zonelist_policy(gfp_t gfp, struct mempolicy *policy)
return NODE_DATA(nd)->node_zonelists + gfp_zone(gfp);
}
+void set_first_interleave_node(nodemask_t mask)
+{
+ current->il_next = first_node(mask);
+}
+
/* Do dynamic interleaving for a process */
-static unsigned interleave_nodes(struct mempolicy *policy)
+unsigned interleave_nodes(struct mempolicy *policy)
{
unsigned nid, next;
struct task_struct *me = current;
--
Nishanth Aravamudan <nacc@us.ibm.com>
IBM Linux Technology Center
--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org. For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>
next prev parent reply other threads:[~2007-08-06 16:37 UTC|newest]
Thread overview: 24+ messages / expand[flat|nested] mbox.gz Atom feed top
2007-08-06 16:32 [RFC][PATCH 0/5] hugetlb NUMA improvements Nishanth Aravamudan
2007-08-06 16:37 ` Nishanth Aravamudan [this message]
2007-08-06 16:38 ` [RFC][PATCH 2/5] hugetlb: numafy several functions Nishanth Aravamudan
2007-08-06 16:40 ` [RFC][PATCH 3/5] hugetlb: add per-node nr_hugepages sysfs attribute Nishanth Aravamudan
2007-08-06 16:44 ` [RFC][PATCH 4/5] hugetlb: fix cpuset-constrained pool resizing Nishanth Aravamudan
2007-08-06 16:45 ` Nishanth Aravamudan
2007-08-06 16:48 ` [RFC][PATCH 5/5] hugetlb: interleave dequeueing of huge pages Nishanth Aravamudan
2007-08-06 18:04 ` [RFC][PATCH 4/5] hugetlb: fix cpuset-constrained pool resizing Christoph Lameter
2007-08-06 18:26 ` Nishanth Aravamudan
2007-08-06 18:41 ` Christoph Lameter
2007-08-07 0:03 ` Nishanth Aravamudan
2007-08-06 19:37 ` Lee Schermerhorn
2007-08-08 1:50 ` Nishanth Aravamudan
2007-08-08 13:26 ` Lee Schermerhorn
2007-08-06 17:59 ` [RFC][PATCH 2/5] hugetlb: numafy several functions Christoph Lameter
2007-08-06 18:15 ` Nishanth Aravamudan
2007-08-07 0:34 ` Nishanth Aravamudan
2007-08-06 18:00 ` [RFC][PATCH 1/5] Fix hugetlb pool allocation with empty nodes V9 Christoph Lameter
2007-08-06 18:19 ` Nishanth Aravamudan
2007-08-06 18:37 ` Christoph Lameter
2007-08-06 19:52 ` Lee Schermerhorn
2007-08-06 20:15 ` Christoph Lameter
2007-08-07 0:04 ` Nishanth Aravamudan
2007-08-06 16:39 ` [RFC][PATCH 0/5] hugetlb NUMA improvements Nishanth Aravamudan
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20070806163726.GK15714@us.ibm.com \
--to=nacc@us.ibm.com \
--cc=agl@us.ibm.com \
--cc=akpm@linux-foundation.org \
--cc=anton@samba.org \
--cc=clameter@sgi.com \
--cc=lee.schermerhorn@hp.com \
--cc=linux-mm@kvack.org \
--cc=melgor@ie.ibm.com \
--cc=wli@holomorphy.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).