public inbox for linux-kernel@vger.kernel.org
 help / color / mirror / Atom feed
From: Christoph Lameter <cl@linux-foundation.org>
To: Tejun Heo <tj@kernel.org>
Cc: linux-kernel@vger.kernel.org, Pekka Enberg <penberg@cs.helsinki.fi>
Cc: Mel Gorman <mel@csn.ul.ie>
Cc: Mathieu Desnoyers <mathieu.desnoyers@polymtl.ca>
Subject: [this_cpu_xx V8 06/16] Make slub statistics use this_cpu_inc
Date: Fri, 18 Dec 2009 16:26:23 -0600	[thread overview]
Message-ID: <20091218222650.676681649@quilx.com> (raw)
In-Reply-To: 20091218222617.384355422@quilx.com

[-- Attachment #1: percpu_slub_cleanup_stat --]
[-- Type: text/plain, Size: 5136 bytes --]

this_cpu_inc() translates into a single instruction on x86 and does not
need any register. So use it in stat(). We also want to avoid the
calculation of the per cpu kmem_cache_cpu structure pointer. So pass
a kmem_cache pointer instead of a kmem_cache_cpu pointer.

Cc: Pekka Enberg <penberg@cs.helsinki.fi>
Signed-off-by: Christoph Lameter <cl@linux-foundation.org?

---
 mm/slub.c |   43 ++++++++++++++++++++-----------------------
 1 file changed, 20 insertions(+), 23 deletions(-)

Index: linux-2.6/mm/slub.c
===================================================================
--- linux-2.6.orig/mm/slub.c	2009-09-29 11:44:35.000000000 -0500
+++ linux-2.6/mm/slub.c	2009-09-29 11:44:49.000000000 -0500
@@ -217,10 +217,10 @@ static inline void sysfs_slab_remove(str
 
 #endif
 
-static inline void stat(struct kmem_cache_cpu *c, enum stat_item si)
+static inline void stat(struct kmem_cache *s, enum stat_item si)
 {
 #ifdef CONFIG_SLUB_STATS
-	c->stat[si]++;
+	__this_cpu_inc(s->cpu_slab->stat[si]);
 #endif
 }
 
@@ -1108,7 +1108,7 @@ static struct page *allocate_slab(struct
 		if (!page)
 			return NULL;
 
-		stat(this_cpu_ptr(s->cpu_slab), ORDER_FALLBACK);
+		stat(s, ORDER_FALLBACK);
 	}
 
 	if (kmemcheck_enabled
@@ -1406,23 +1406,22 @@ static struct page *get_partial(struct k
 static void unfreeze_slab(struct kmem_cache *s, struct page *page, int tail)
 {
 	struct kmem_cache_node *n = get_node(s, page_to_nid(page));
-	struct kmem_cache_cpu *c = this_cpu_ptr(s->cpu_slab);
 
 	__ClearPageSlubFrozen(page);
 	if (page->inuse) {
 
 		if (page->freelist) {
 			add_partial(n, page, tail);
-			stat(c, tail ? DEACTIVATE_TO_TAIL : DEACTIVATE_TO_HEAD);
+			stat(s, tail ? DEACTIVATE_TO_TAIL : DEACTIVATE_TO_HEAD);
 		} else {
-			stat(c, DEACTIVATE_FULL);
+			stat(s, DEACTIVATE_FULL);
 			if (SLABDEBUG && PageSlubDebug(page) &&
 						(s->flags & SLAB_STORE_USER))
 				add_full(n, page);
 		}
 		slab_unlock(page);
 	} else {
-		stat(c, DEACTIVATE_EMPTY);
+		stat(s, DEACTIVATE_EMPTY);
 		if (n->nr_partial < s->min_partial) {
 			/*
 			 * Adding an empty slab to the partial slabs in order
@@ -1438,7 +1437,7 @@ static void unfreeze_slab(struct kmem_ca
 			slab_unlock(page);
 		} else {
 			slab_unlock(page);
-			stat(__this_cpu_ptr(s->cpu_slab), FREE_SLAB);
+			stat(s, FREE_SLAB);
 			discard_slab(s, page);
 		}
 	}
@@ -1453,7 +1452,7 @@ static void deactivate_slab(struct kmem_
 	int tail = 1;
 
 	if (page->freelist)
-		stat(c, DEACTIVATE_REMOTE_FREES);
+		stat(s, DEACTIVATE_REMOTE_FREES);
 	/*
 	 * Merge cpu freelist into slab freelist. Typically we get here
 	 * because both freelists are empty. So this is unlikely
@@ -1479,7 +1478,7 @@ static void deactivate_slab(struct kmem_
 
 static inline void flush_slab(struct kmem_cache *s, struct kmem_cache_cpu *c)
 {
-	stat(c, CPUSLAB_FLUSH);
+	stat(s, CPUSLAB_FLUSH);
 	slab_lock(c->page);
 	deactivate_slab(s, c);
 }
@@ -1619,7 +1618,7 @@ static void *__slab_alloc(struct kmem_ca
 	if (unlikely(!node_match(c, node)))
 		goto another_slab;
 
-	stat(c, ALLOC_REFILL);
+	stat(s, ALLOC_REFILL);
 
 load_freelist:
 	object = c->page->freelist;
@@ -1634,7 +1633,7 @@ load_freelist:
 	c->node = page_to_nid(c->page);
 unlock_out:
 	slab_unlock(c->page);
-	stat(c, ALLOC_SLOWPATH);
+	stat(s, ALLOC_SLOWPATH);
 	return object;
 
 another_slab:
@@ -1644,7 +1643,7 @@ new_slab:
 	new = get_partial(s, gfpflags, node);
 	if (new) {
 		c->page = new;
-		stat(c, ALLOC_FROM_PARTIAL);
+		stat(s, ALLOC_FROM_PARTIAL);
 		goto load_freelist;
 	}
 
@@ -1658,7 +1657,7 @@ new_slab:
 
 	if (new) {
 		c = __this_cpu_ptr(s->cpu_slab);
-		stat(c, ALLOC_SLAB);
+		stat(s, ALLOC_SLAB);
 		if (c->page)
 			flush_slab(s, c);
 		slab_lock(new);
@@ -1713,7 +1712,7 @@ static __always_inline void *slab_alloc(
 
 	else {
 		c->freelist = get_freepointer(s, object);
-		stat(c, ALLOC_FASTPATH);
+		stat(s, ALLOC_FASTPATH);
 	}
 	local_irq_restore(flags);
 
@@ -1780,10 +1779,8 @@ static void __slab_free(struct kmem_cach
 {
 	void *prior;
 	void **object = (void *)x;
-	struct kmem_cache_cpu *c;
 
-	c = __this_cpu_ptr(s->cpu_slab);
-	stat(c, FREE_SLOWPATH);
+	stat(s, FREE_SLOWPATH);
 	slab_lock(page);
 
 	if (unlikely(SLABDEBUG && PageSlubDebug(page)))
@@ -1796,7 +1793,7 @@ checks_ok:
 	page->inuse--;
 
 	if (unlikely(PageSlubFrozen(page))) {
-		stat(c, FREE_FROZEN);
+		stat(s, FREE_FROZEN);
 		goto out_unlock;
 	}
 
@@ -1809,7 +1806,7 @@ checks_ok:
 	 */
 	if (unlikely(!prior)) {
 		add_partial(get_node(s, page_to_nid(page)), page, 1);
-		stat(c, FREE_ADD_PARTIAL);
+		stat(s, FREE_ADD_PARTIAL);
 	}
 
 out_unlock:
@@ -1822,10 +1819,10 @@ slab_empty:
 		 * Slab still on the partial list.
 		 */
 		remove_partial(s, page);
-		stat(c, FREE_REMOVE_PARTIAL);
+		stat(s, FREE_REMOVE_PARTIAL);
 	}
 	slab_unlock(page);
-	stat(c, FREE_SLAB);
+	stat(s, FREE_SLAB);
 	discard_slab(s, page);
 	return;
 
@@ -1863,7 +1860,7 @@ static __always_inline void slab_free(st
 	if (likely(page == c->page && c->node >= 0)) {
 		set_freepointer(s, object, c->freelist);
 		c->freelist = object;
-		stat(c, FREE_FASTPATH);
+		stat(s, FREE_FASTPATH);
 	} else
 		__slab_free(s, page, x, addr);
 

-- 

  parent reply	other threads:[~2009-12-18 22:29 UTC|newest]

Thread overview: 38+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2009-12-18 22:26 [this_cpu_xx V8 00/16] Per cpu atomics in core allocators, cleanup and more this_cpu_ops Christoph Lameter
2009-12-18 22:26 ` [this_cpu_xx V8 01/16] this_cpu_ops: page allocator conversion Christoph Lameter
2009-12-18 22:26 ` [this_cpu_xx V8 02/16] this_cpu ops: Remove pageset_notifier Christoph Lameter
2009-12-18 22:26 ` [this_cpu_xx V8 03/16] Use this_cpu operations in slub Christoph Lameter
2009-12-20  9:11   ` Pekka Enberg
2009-12-18 22:26 ` [this_cpu_xx V8 04/16] SLUB: Get rid of dynamic DMA kmalloc cache allocation Christoph Lameter
2009-12-18 22:26 ` [this_cpu_xx V8 05/16] this_cpu: Remove slub kmem_cache fields Christoph Lameter
2009-12-18 22:26 ` Christoph Lameter [this message]
2009-12-18 22:26 ` [this_cpu_xx V8 07/16] Module handling: Use this_cpu_xx to dynamically allocate counters Christoph Lameter
2009-12-21  7:47   ` Tejun Heo
2009-12-21  7:59     ` Tejun Heo
2009-12-21  8:19       ` Tejun Heo
2009-12-21 23:28       ` Rusty Russell
2009-12-22  0:02         ` Tejun Heo
2009-12-22 16:17           ` Christoph Lameter
2009-12-22 15:58       ` Christoph Lameter
2009-12-22 15:57     ` Christoph Lameter
2009-12-23  2:07       ` Tejun Heo
2010-01-04 17:22         ` Christoph Lameter
2010-01-04 17:51           ` Mathieu Desnoyers
2009-12-18 22:26 ` [this_cpu_xx V8 08/16] Remove cpu_local_xx macros Christoph Lameter
2009-12-18 22:26 ` [this_cpu_xx V8 09/16] Allow arch to provide inc/dec functionality for each size separately Christoph Lameter
2009-12-21  7:25   ` Tejun Heo
2009-12-22 15:56     ` Christoph Lameter
2009-12-23  2:08       ` Tejun Heo
2009-12-18 22:26 ` [this_cpu_xx V8 10/16] Support generating inc/dec for this_cpu_inc/dec Christoph Lameter
2009-12-18 22:26 ` [this_cpu_xx V8 11/16] Generic support for this_cpu_cmpxchg Christoph Lameter
2009-12-19 14:45   ` Mathieu Desnoyers
2009-12-22 15:54     ` Christoph Lameter
2009-12-22 17:24       ` Mathieu Desnoyers
2010-01-04 17:21         ` Christoph Lameter
2010-01-05 22:29           ` Mathieu Desnoyers
2010-01-05 22:35             ` Christoph Lameter
2009-12-18 22:26 ` [this_cpu_xx V8 12/16] Add percpu cmpxchg operations Christoph Lameter
2009-12-18 22:26 ` [this_cpu_xx V8 13/16] Generic support for this_cpu_xchg Christoph Lameter
2009-12-18 22:26 ` [this_cpu_xx V8 14/16] x86 percpu xchg operation Christoph Lameter
2009-12-18 22:26 ` [this_cpu_xx V8 15/16] Generic support for this_cpu_add_return() Christoph Lameter
2009-12-18 22:26 ` [this_cpu_xx V8 16/16] x86 support for this_cpu_add_return Christoph Lameter

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20091218222650.676681649@quilx.com \
    --to=cl@linux-foundation.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=penberg@cs.helsinki.fi \
    --cc=tj@kernel.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox