linux-mm.kvack.org archive mirror
 help / color / mirror / Atom feed
From: Mel Gorman <mel@csn.ul.ie>
To: Christoph Lameter <cl@linux-foundation.org>
Cc: Linux Memory Management List <linux-mm@kvack.org>,
	Pekka Enberg <penberg@cs.helsinki.fi>,
	Rik van Riel <riel@redhat.com>,
	KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>,
	Johannes Weiner <hannes@cmpxchg.org>,
	Nick Piggin <npiggin@suse.de>,
	Linux Kernel Mailing List <linux-kernel@vger.kernel.org>,
	Lin Ming <ming.m.lin@intel.com>,
	Zhang Yanmin <yanmin_zhang@linux.intel.com>,
	Peter Zijlstra <peterz@infradead.org>
Subject: Re: [PATCH 20/35] Use a pre-calculated value for num_online_nodes()
Date: Thu, 19 Mar 2009 22:21:06 +0000	[thread overview]
Message-ID: <20090319222106.GD24586@csn.ul.ie> (raw)
In-Reply-To: <alpine.DEB.1.10.0903191642160.22425@qirst.com>

On Thu, Mar 19, 2009 at 04:43:55PM -0400, Christoph Lameter wrote:
> Trying to the same in the style of nr_node_ids etc.
> 

Because of some issues with the patch and what it does for possible
nodes, I reworked the patch slightly into the following and is what I'm
actually testing.

commit 58652d22798591d4df5421a90d65cbd7e60b9048
Author: Christoph Lameter <cl@linux-foundation.org>
Date:   Thu Mar 19 21:34:17 2009 +0000

    Use a pre-calculated value for num_online_nodes()
    
    num_online_nodes() is called in a number of places but most often by the
    page allocator when deciding whether the zonelist needs to be filtered based
    on cpusets or the zonelist cache.  This is actually a heavy function and
    touches a number of cache lines.  This patch stores the number of online
    nodes at boot time and updates the value when nodes get onlined and offlined.
    
    Signed-off-by: Christoph Lameter <cl@linux-foundation.org>
    Signed-off-by: Mel Gorman <mel@csn.ul.ie>

diff --git a/include/linux/nodemask.h b/include/linux/nodemask.h
index 848025c..474e73e 100644
--- a/include/linux/nodemask.h
+++ b/include/linux/nodemask.h
@@ -408,6 +408,19 @@ static inline int num_node_state(enum node_states state)
 #define next_online_node(nid)	next_node((nid), node_states[N_ONLINE])
 
 extern int nr_node_ids;
+extern int nr_online_nodes;
+
+static inline void node_set_online(int nid)
+{
+	node_set_state(nid, N_ONLINE);
+	nr_online_nodes = num_node_state(N_ONLINE);
+}
+
+static inline void node_set_offline(int nid)
+{
+	node_clear_state(nid, N_ONLINE);
+	nr_online_nodes = num_node_state(N_ONLINE);
+}
 #else
 
 static inline int node_state(int node, enum node_states state)
@@ -434,7 +447,7 @@ static inline int num_node_state(enum node_states state)
 #define first_online_node	0
 #define next_online_node(nid)	(MAX_NUMNODES)
 #define nr_node_ids		1
-
+#define nr_online_nodes		1
 #endif
 
 #define node_online_map 	node_states[N_ONLINE]
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 1e99997..210e28c 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -875,7 +875,7 @@ static void return_unused_surplus_pages(struct hstate *h,
 	 * can no longer free unreserved surplus pages. This occurs when
 	 * the nodes with surplus pages have no free pages.
 	 */
-	unsigned long remaining_iterations = num_online_nodes();
+	unsigned long remaining_iterations = nr_online_nodes;
 
 	/* Uncommit the reservation */
 	h->resv_huge_pages -= unused_resv_pages;
@@ -904,7 +904,7 @@ static void return_unused_surplus_pages(struct hstate *h,
 			h->surplus_huge_pages--;
 			h->surplus_huge_pages_node[nid]--;
 			nr_pages--;
-			remaining_iterations = num_online_nodes();
+			remaining_iterations = nr_online_nodes;
 		}
 	}
 }
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 18f0b56..4131ff9 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -164,7 +164,9 @@ static unsigned long __meminitdata dma_reserve;
 
 #if MAX_NUMNODES > 1
 int nr_node_ids __read_mostly = MAX_NUMNODES;
+int nr_online_nodes __read_mostly = 1;
 EXPORT_SYMBOL(nr_node_ids);
+EXPORT_SYMBOL(nr_online_nodes);
 #endif
 
 int page_group_by_mobility_disabled __read_mostly;
@@ -1445,7 +1447,7 @@ get_page_from_freelist(gfp_t gfp_mask, nodemask_t *nodemask, unsigned int order,
 	/* Determine in advance if the zonelist needs filtering */
 	if ((alloc_flags & ALLOC_CPUSET) && unlikely(number_of_cpusets > 1))
 		zonelist_filter = 1;
-	if (num_online_nodes() > 1)
+	if (nr_online_nodes > 1)
 		zonelist_filter = 1;
 
 zonelist_scan:
@@ -1486,7 +1488,7 @@ this_zone_full:
 			zlc_mark_zone_full(zonelist, z);
 try_next_zone:
 		if (NUMA_BUILD && zonelist_filter) {
-			if (!did_zlc_setup && num_online_nodes() > 1) {
+			if (!did_zlc_setup && nr_online_nodes > 1) {
 				/*
 				 * do zlc_setup after the first zone is tried
 				 * but only if there are multiple nodes to make
@@ -2285,7 +2287,7 @@ int numa_zonelist_order_handler(ctl_table *table, int write,
 }
 
 
-#define MAX_NODE_LOAD (num_online_nodes())
+#define MAX_NODE_LOAD (nr_online_nodes)
 static int node_load[MAX_NUMNODES];
 
 /**
@@ -2494,7 +2496,7 @@ static void build_zonelists(pg_data_t *pgdat)
 
 	/* NUMA-aware ordering of nodes */
 	local_node = pgdat->node_id;
-	load = num_online_nodes();
+	load = nr_online_nodes;
 	prev_node = local_node;
 	nodes_clear(used_mask);
 
@@ -2645,7 +2647,7 @@ void build_all_zonelists(void)
 
 	printk("Built %i zonelists in %s order, mobility grouping %s.  "
 		"Total pages: %ld\n",
-			num_online_nodes(),
+			nr_online_nodes,
 			zonelist_order_name[current_zonelist_order],
 			page_group_by_mobility_disabled ? "off" : "on",
 			vm_total_pages);
diff --git a/mm/slab.c b/mm/slab.c
index e7f1ded..e6157a0 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -3526,7 +3526,7 @@ static inline void __cache_free(struct kmem_cache *cachep, void *objp)
 	 * variable to skip the call, which is mostly likely to be present in
 	 * the cache.
 	 */
-	if (numa_platform && cache_free_alien(cachep, objp))
+	if (numa_platform > 1 && cache_free_alien(cachep, objp))
 		return;
 
 	if (likely(ac->avail < ac->limit)) {
diff --git a/mm/slub.c b/mm/slub.c
index 0280eee..8ce6be8 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -3648,7 +3648,7 @@ static int list_locations(struct kmem_cache *s, char *buf,
 						 to_cpumask(l->cpus));
 		}
 
-		if (num_online_nodes() > 1 && !nodes_empty(l->nodes) &&
+		if (nr_online_nodes > 1 && !nodes_empty(l->nodes) &&
 				len < PAGE_SIZE - 60) {
 			len += sprintf(buf + len, " nodes=");
 			len += nodelist_scnprintf(buf + len, PAGE_SIZE - len - 50,
diff --git a/net/sunrpc/svc.c b/net/sunrpc/svc.c
index c51fed4..ba9eb8f 100644
--- a/net/sunrpc/svc.c
+++ b/net/sunrpc/svc.c
@@ -124,7 +124,7 @@ svc_pool_map_choose_mode(void)
 {
 	unsigned int node;
 
-	if (num_online_nodes() > 1) {
+	if (nr_online_nodes > 1) {
 		/*
 		 * Actually have multiple NUMA nodes,
 		 * so split pools on NUMA node boundaries

--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org.  For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>

  parent reply	other threads:[~2009-03-19 22:21 UTC|newest]

Thread overview: 94+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2009-03-16  9:45 [PATCH 00/35] Cleanup and optimise the page allocator V3 Mel Gorman
2009-03-16  9:45 ` [PATCH 01/35] Replace __alloc_pages_internal() with __alloc_pages_nodemask() Mel Gorman
2009-03-16 15:49   ` Christoph Lameter
2009-03-16  9:45 ` [PATCH 02/35] Do not sanity check order in the fast path Mel Gorman
2009-03-16 15:52   ` Christoph Lameter
2009-03-16  9:45 ` [PATCH 03/35] Do not check NUMA node ID when the caller knows the node is valid Mel Gorman
2009-03-16  9:45 ` [PATCH 04/35] Check only once if the zonelist is suitable for the allocation Mel Gorman
2009-03-16  9:46 ` [PATCH 05/35] Break up the allocator entry point into fast and slow paths Mel Gorman
2009-03-16  9:46 ` [PATCH 06/35] Move check for disabled anti-fragmentation out of fastpath Mel Gorman
2009-03-16 15:54   ` Christoph Lameter
2009-03-16  9:46 ` [PATCH 07/35] Check in advance if the zonelist needs additional filtering Mel Gorman
2009-03-16  9:46 ` [PATCH 08/35] Calculate the preferred zone for allocation only once Mel Gorman
2009-03-16  9:46 ` [PATCH 09/35] Calculate the migratetype " Mel Gorman
2009-03-16  9:46 ` [PATCH 10/35] Calculate the alloc_flags " Mel Gorman
2009-03-16  9:46 ` [PATCH 11/35] Calculate the cold parameter " Mel Gorman
2009-03-16  9:46 ` [PATCH 12/35] Remove a branch by assuming __GFP_HIGH == ALLOC_HIGH Mel Gorman
2009-03-16  9:46 ` [PATCH 13/35] Inline __rmqueue_smallest() Mel Gorman
2009-03-16  9:46 ` [PATCH 14/35] Inline buffered_rmqueue() Mel Gorman
2009-03-16  9:46 ` [PATCH 15/35] Inline __rmqueue_fallback() Mel Gorman
2009-03-16 15:57   ` Christoph Lameter
2009-03-16 16:25     ` Mel Gorman
2009-03-16  9:46 ` [PATCH 16/35] Save text by reducing call sites of __rmqueue() Mel Gorman
2009-03-16  9:46 ` [PATCH 17/35] Do not call get_pageblock_migratetype() more than necessary Mel Gorman
2009-03-16 16:00   ` Christoph Lameter
2009-03-16  9:46 ` [PATCH 18/35] Do not disable interrupts in free_page_mlock() Mel Gorman
2009-03-16 16:05   ` Christoph Lameter
2009-03-16 16:29     ` Mel Gorman
2009-03-16  9:46 ` [PATCH 19/35] Do not setup zonelist cache when there is only one node Mel Gorman
2009-03-16 16:06   ` Christoph Lameter
2009-03-16  9:46 ` [PATCH 20/35] Use a pre-calculated value for num_online_nodes() Mel Gorman
2009-03-16 11:42   ` Nick Piggin
2009-03-16 11:46     ` Nick Piggin
2009-03-16 16:08   ` Christoph Lameter
2009-03-16 16:36     ` Mel Gorman
2009-03-16 16:47       ` Christoph Lameter
2009-03-18 15:08         ` Mel Gorman
2009-03-18 16:58           ` Christoph Lameter
2009-03-18 18:01             ` Mel Gorman
2009-03-18 19:10               ` Christoph Lameter
2009-03-19 20:43                 ` Christoph Lameter
2009-03-19 21:29                   ` Mel Gorman
2009-03-19 22:22                     ` Christoph Lameter
2009-03-19 22:33                       ` Mel Gorman
2009-03-19 22:42                         ` Christoph Lameter
2009-03-19 22:52                           ` Mel Gorman
2009-03-19 22:06                   ` Mel Gorman
2009-03-19 22:39                     ` Christoph Lameter
2009-03-19 22:21                   ` Mel Gorman [this message]
2009-03-19 22:24                     ` Christoph Lameter
2009-03-19 23:04                       ` Mel Gorman
2009-03-16  9:46 ` [PATCH 21/35] Do not check for compound pages during the page allocator sanity checks Mel Gorman
2009-03-16 16:09   ` Christoph Lameter
2009-03-16  9:46 ` [PATCH 22/35] Use allocation flags as an index to the zone watermark Mel Gorman
2009-03-16 16:11   ` Christoph Lameter
2009-03-16  9:46 ` [PATCH 23/35] Update NR_FREE_PAGES only as necessary Mel Gorman
2009-03-16 16:17   ` Christoph Lameter
2009-03-16 16:42     ` Mel Gorman
2009-03-16 16:48       ` Christoph Lameter
2009-03-16 16:58         ` Mel Gorman
2009-03-16  9:46 ` [PATCH 24/35] Convert gfp_zone() to use a table of precalculated values Mel Gorman
2009-03-16 16:19   ` Christoph Lameter
2009-03-16 16:45     ` Mel Gorman
2009-03-16  9:46 ` [PATCH 25/35] Re-sort GFP flags and fix whitespace alignment for easier reading Mel Gorman
2009-03-16  9:46 ` [PATCH 26/35] Use the per-cpu allocator for orders up to PAGE_ALLOC_COSTLY_ORDER Mel Gorman
2009-03-16 16:26   ` Christoph Lameter
2009-03-16 16:47     ` Mel Gorman
2009-03-16  9:46 ` [PATCH 27/35] Split per-cpu list into one-list-per-migrate-type Mel Gorman
2009-03-16  9:46 ` [PATCH 28/35] Batch free pages from migratetype per-cpu lists Mel Gorman
2009-03-16  9:46 ` [PATCH 29/35] Do not store the PCP high and batch watermarks in the per-cpu structure Mel Gorman
2009-03-16 16:30   ` Christoph Lameter
2009-03-16  9:46 ` [PATCH 30/35] Skip the PCP list search by counting the order and type of pages on list Mel Gorman
2009-03-16 16:31   ` Christoph Lameter
2009-03-16 16:51     ` Mel Gorman
2009-03-16  9:46 ` [PATCH 31/35] Optimistically check the first page on the PCP free list is suitable Mel Gorman
2009-03-16 16:33   ` Christoph Lameter
2009-03-16 16:52     ` Mel Gorman
2009-03-16  9:46 ` [PATCH 32/35] Inline next_zones_zonelist() of the zonelist scan in the fastpath Mel Gorman
2009-03-16  9:46 ` [PATCH 33/35] Do not merge buddies until they are needed by a high-order allocation or anti-fragmentation Mel Gorman
2009-03-16  9:46 ` [PATCH 34/35] Allow compound pages to be stored on the PCP lists Mel Gorman
2009-03-16 16:47   ` Christoph Lameter
2009-03-16  9:46 ` [PATCH 35/35] Allow up to 4MB PCP lists due to compound pages Mel Gorman
2009-03-16 10:40 ` [PATCH 00/35] Cleanup and optimise the page allocator V3 Nick Piggin
2009-03-16 11:19   ` Mel Gorman
2009-03-16 11:33     ` Nick Piggin
2009-03-16 12:02       ` Mel Gorman
2009-03-16 12:25         ` Nick Piggin
2009-03-16 13:32           ` Mel Gorman
2009-03-16 15:53             ` Nick Piggin
2009-03-16 16:56               ` Mel Gorman
2009-03-16 17:05                 ` Nick Piggin
2009-03-18 15:07                   ` Mel Gorman
2009-03-16 11:45 ` Nick Piggin
2009-03-16 12:11   ` Mel Gorman
2009-03-16 12:28     ` Nick Piggin

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20090319222106.GD24586@csn.ul.ie \
    --to=mel@csn.ul.ie \
    --cc=cl@linux-foundation.org \
    --cc=hannes@cmpxchg.org \
    --cc=kosaki.motohiro@jp.fujitsu.com \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-mm@kvack.org \
    --cc=ming.m.lin@intel.com \
    --cc=npiggin@suse.de \
    --cc=penberg@cs.helsinki.fi \
    --cc=peterz@infradead.org \
    --cc=riel@redhat.com \
    --cc=yanmin_zhang@linux.intel.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).