linux-mm.kvack.org archive mirror
 help / color / mirror / Atom feed
From: Anshuman Khandual <khandual@linux.vnet.ibm.com>
To: linux-kernel@vger.kernel.org, linux-mm@kvack.org
Cc: mhocko@suse.com, vbabka@suse.cz, mgorman@suse.de,
	minchan@kernel.org, aneesh.kumar@linux.vnet.ibm.com,
	bsingharora@gmail.com, srikar@linux.vnet.ibm.com,
	haren@linux.vnet.ibm.com, jglisse@redhat.com,
	dave.hansen@intel.com
Subject: [RFC 3/4] mm/hugetlb: Restrict HugeTLB page allocations only to system ram nodemask
Date: Tue, 22 Nov 2016 19:49:39 +0530	[thread overview]
Message-ID: <1479824388-30446-4-git-send-email-khandual@linux.vnet.ibm.com> (raw)
In-Reply-To: <1479824388-30446-1-git-send-email-khandual@linux.vnet.ibm.com>

HugeTLB allocation/release/accounting currently spans across all the nodes
under N_MEMORY node mask. Coherent memory nodes should not be part of these
allocations. So use system_ram() call to fetch system RAM only nodes on the
platform which can then be used for HugeTLB allocation purpose instead of
N_MEMORY node mask. This isolates coherent device memory nodes from HugeTLB
allocations.

Signed-off-by: Anshuman Khandual <khandual@linux.vnet.ibm.com>
---
 mm/hugetlb.c | 32 +++++++++++++++++++++++---------
 1 file changed, 23 insertions(+), 9 deletions(-)

diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 418bf01..f7236e1 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -1782,6 +1782,9 @@ static void return_unused_surplus_pages(struct hstate *h,
 					unsigned long unused_resv_pages)
 {
 	unsigned long nr_pages;
+	nodemask_t nodes;
+
+	nodes = ram_nodemask();
 
 	/* Uncommit the reservation */
 	h->resv_huge_pages -= unused_resv_pages;
@@ -1801,7 +1804,7 @@ static void return_unused_surplus_pages(struct hstate *h,
 	 * on-line nodes with memory and will handle the hstate accounting.
 	 */
 	while (nr_pages--) {
-		if (!free_pool_huge_page(h, &node_states[N_MEMORY], 1))
+		if (!free_pool_huge_page(h, &nodes, 1))
 			break;
 		cond_resched_lock(&hugetlb_lock);
 	}
@@ -2088,8 +2091,10 @@ int __weak alloc_bootmem_huge_page(struct hstate *h)
 {
 	struct huge_bootmem_page *m;
 	int nr_nodes, node;
+	nodemask_t nodes;
 
-	for_each_node_mask_to_alloc(h, nr_nodes, node, &node_states[N_MEMORY]) {
+	nodes = ram_nodemask();
+	for_each_node_mask_to_alloc(h, nr_nodes, node, &nodes) {
 		void *addr;
 
 		addr = memblock_virt_alloc_try_nid_nopanic(
@@ -2158,13 +2163,15 @@ static void __init gather_bootmem_prealloc(void)
 static void __init hugetlb_hstate_alloc_pages(struct hstate *h)
 {
 	unsigned long i;
+	nodemask_t nodes;
+
 
+	nodes = ram_nodemask();
 	for (i = 0; i < h->max_huge_pages; ++i) {
 		if (hstate_is_gigantic(h)) {
 			if (!alloc_bootmem_huge_page(h))
 				break;
-		} else if (!alloc_fresh_huge_page(h,
-					 &node_states[N_MEMORY]))
+		} else if (!alloc_fresh_huge_page(h, &nodes))
 			break;
 	}
 	h->max_huge_pages = i;
@@ -2401,8 +2408,11 @@ static ssize_t __nr_hugepages_store_common(bool obey_mempolicy,
 					   unsigned long count, size_t len)
 {
 	int err;
+	nodemask_t ram_nodes;
+
 	NODEMASK_ALLOC(nodemask_t, nodes_allowed, GFP_KERNEL | __GFP_NORETRY);
 
+	ram_nodes = ram_nodemask();
 	if (hstate_is_gigantic(h) && !gigantic_page_supported()) {
 		err = -EINVAL;
 		goto out;
@@ -2415,7 +2425,7 @@ static ssize_t __nr_hugepages_store_common(bool obey_mempolicy,
 		if (!(obey_mempolicy &&
 				init_nodemask_of_mempolicy(nodes_allowed))) {
 			NODEMASK_FREE(nodes_allowed);
-			nodes_allowed = &node_states[N_MEMORY];
+			nodes_allowed = &ram_nodes;
 		}
 	} else if (nodes_allowed) {
 		/*
@@ -2425,11 +2435,11 @@ static ssize_t __nr_hugepages_store_common(bool obey_mempolicy,
 		count += h->nr_huge_pages - h->nr_huge_pages_node[nid];
 		init_nodemask_of_node(nodes_allowed, nid);
 	} else
-		nodes_allowed = &node_states[N_MEMORY];
+		nodes_allowed = &ram_nodes;
 
 	h->max_huge_pages = set_max_huge_pages(h, count, nodes_allowed);
 
-	if (nodes_allowed != &node_states[N_MEMORY])
+	if (nodes_allowed != &ram_nodes)
 		NODEMASK_FREE(nodes_allowed);
 
 	return len;
@@ -2726,9 +2736,11 @@ static void hugetlb_register_node(struct node *node)
  */
 static void __init hugetlb_register_all_nodes(void)
 {
+	nodemask_t nodes;
 	int nid;
 
-	for_each_node_state(nid, N_MEMORY) {
+	nodes = ram_nodemask();
+	for_each_node_mask(nid, nodes) {
 		struct node *node = node_devices[nid];
 		if (node->dev.id == nid)
 			hugetlb_register_node(node);
@@ -2998,13 +3010,15 @@ int hugetlb_report_node_meminfo(int nid, char *buf)
 
 void hugetlb_show_meminfo(void)
 {
+	nodemask_t nodes;
 	struct hstate *h;
 	int nid;
 
 	if (!hugepages_supported())
 		return;
 
-	for_each_node_state(nid, N_MEMORY)
+	nodes = ram_nodemask();
+	for_each_node_mask(nid, nodes)
 		for_each_hstate(h)
 			pr_info("Node %d hugepages_total=%u hugepages_free=%u hugepages_surp=%u hugepages_size=%lukB\n",
 				nid,
-- 
1.8.3.1

--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org.  For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>

  parent reply	other threads:[~2016-11-22 14:20 UTC|newest]

Thread overview: 20+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2016-11-22 14:19 [RFC 0/4] Define coherent device memory node Anshuman Khandual
2016-11-22 14:19 ` [RFC 1/4] mm: " Anshuman Khandual
2016-11-29 17:57   ` Dave Hansen
2016-11-30 11:46     ` Anshuman Khandual
2016-11-22 14:19 ` [RFC 2/4] mm/cpuset: Exclude coherent device memory nodes from mems_allowed Anshuman Khandual
2016-11-22 14:19 ` Anshuman Khandual [this message]
2016-11-22 14:19 ` [RFC 4/4] mm: Ignore cpuset enforcement when allocation flag has __GFP_THISNODE Anshuman Khandual
2016-11-28 21:12   ` Dave Hansen
2016-11-29  6:51     ` Anshuman Khandual
2016-11-29 16:52       ` Dave Hansen
2016-11-30 11:17         ` Anshuman Khandual
2016-11-30 19:43           ` Dave Hansen
2016-11-22 14:19 ` [DEBUG 05/12] powerpc/mm: Identify coherent device memory nodes during platform init Anshuman Khandual
2016-11-22 14:19 ` [DEBUG 06/12] powerpc/mm: Create numa nodes for hotplug memory Anshuman Khandual
2016-11-22 14:19 ` [DEBUG 07/12] powerpc/mm: Allow memory hotplug into a memory less node Anshuman Khandual
2016-11-22 14:19 ` [DEBUG 08/12] mm: Enable CONFIG_MOVABLE_NODE on powerpc Anshuman Khandual
2016-11-22 14:19 ` [DEBUG 09/12] powerpc: Enable CONFIG_MOVABLE_NODE for PPC64 platform Anshuman Khandual
2016-11-22 14:19 ` [DEBUG 10/12] mm: Add a new migration function migrate_virtual_range() Anshuman Khandual
2016-11-22 14:19 ` [DEBUG 11/12] drivers: Add two drivers for coherent device memory tests Anshuman Khandual
2016-11-22 14:19 ` [DEBUG 12/12] test: Add a script to perform random VMA migrations across nodes Anshuman Khandual

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1479824388-30446-4-git-send-email-khandual@linux.vnet.ibm.com \
    --to=khandual@linux.vnet.ibm.com \
    --cc=aneesh.kumar@linux.vnet.ibm.com \
    --cc=bsingharora@gmail.com \
    --cc=dave.hansen@intel.com \
    --cc=haren@linux.vnet.ibm.com \
    --cc=jglisse@redhat.com \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-mm@kvack.org \
    --cc=mgorman@suse.de \
    --cc=mhocko@suse.com \
    --cc=minchan@kernel.org \
    --cc=srikar@linux.vnet.ibm.com \
    --cc=vbabka@suse.cz \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).