linux-mm.kvack.org archive mirror
 help / color / mirror / Atom feed
From: Christoph Lameter <clameter@sgi.com>
To: Matthew Wilcox <matthew@wil.cx>
Cc: akpm@linux-foundation.org, linux-kernel@vger.kernel.org,
	linux-mm@kvack.org, Pekka Enberg <penberg@cs.helsinki.fi>
Subject: [patch 03/10] SLUB: Move kmem_cache_node determination into add_full and add_partial
Date: Sat, 27 Oct 2007 20:31:59 -0700	[thread overview]
Message-ID: <20071028033259.021882647@sgi.com> (raw)
In-Reply-To: 20071028033156.022983073@sgi.com

[-- Attachment #1: slub_add_partial_kmem_cache_parameter --]
[-- Type: text/plain, Size: 3409 bytes --]

The kmem_cache_node determination can be moved into add_full()
and add_partial(). This removes some code from the slab_free()
slow path and reduces the register overhead that has to be managed
in the slow path.

Signed-off-by: Christoph Lameter <clameter@sgi.com>

---
 mm/slub.c |   29 +++++++++++++++++------------
 1 file changed, 17 insertions(+), 12 deletions(-)

Index: linux-2.6/mm/slub.c
===================================================================
--- linux-2.6.orig/mm/slub.c	2007-10-25 19:36:59.000000000 -0700
+++ linux-2.6/mm/slub.c	2007-10-25 19:37:38.000000000 -0700
@@ -800,8 +800,12 @@ static void trace(struct kmem_cache *s, 
 /*
  * Tracking of fully allocated slabs for debugging purposes.
  */
-static void add_full(struct kmem_cache_node *n, struct page *page)
+static void add_full(struct kmem_cache *s, struct page *page)
 {
+	struct kmem_cache_node *n = get_node(s, page_to_nid(page));
+
+	if (!SlabDebug(page) || !(s->flags & SLAB_STORE_USER))
+		return;
 	spin_lock(&n->list_lock);
 	list_add(&page->lru, &n->full);
 	spin_unlock(&n->list_lock);
@@ -1025,7 +1029,7 @@ static inline int slab_pad_check(struct 
 			{ return 1; }
 static inline int check_object(struct kmem_cache *s, struct page *page,
 			void *object, int active) { return 1; }
-static inline void add_full(struct kmem_cache_node *n, struct page *page) {}
+static inline void add_full(struct kmem_cache *s, struct page *page) {}
 static inline unsigned long kmem_cache_flags(unsigned long objsize,
 	unsigned long flags, const char *name,
 	void (*ctor)(struct kmem_cache *, void *))
@@ -1198,9 +1202,11 @@ static __always_inline int slab_trylock(
 /*
  * Management of partially allocated slabs
  */
-static void add_partial(struct kmem_cache_node *n,
+static void add_partial(struct kmem_cache *s,
 				struct page *page, int tail)
 {
+	struct kmem_cache_node *n = get_node(s, page_to_nid(page));
+
 	spin_lock(&n->list_lock);
 	n->nr_partial++;
 	if (tail)
@@ -1336,19 +1342,18 @@ static struct page *get_partial(struct k
  */
 static void unfreeze_slab(struct kmem_cache *s, struct page *page, int tail)
 {
-	struct kmem_cache_node *n = get_node(s, page_to_nid(page));
-
 	ClearSlabFrozen(page);
 	if (page->inuse) {
 
 		if (page->freelist)
-			add_partial(n, page, tail);
-		else if (SlabDebug(page) && (s->flags & SLAB_STORE_USER))
-			add_full(n, page);
+			add_partial(s, page, tail);
+		else
+			add_full(s, page);
 		slab_unlock(page);
 
 	} else {
-		if (n->nr_partial < MIN_PARTIAL) {
+		if (get_node(s, page_to_nid(page))->nr_partial
+							< MIN_PARTIAL) {
 			/*
 			 * Adding an empty slab to the partial slabs in order
 			 * to avoid page allocator overhead. This slab needs
@@ -1357,7 +1362,7 @@ static void unfreeze_slab(struct kmem_ca
 			 * partial list stays small. kmem_cache_shrink can
 			 * reclaim empty slabs from the partial list.
 			 */
-			add_partial(n, page, 1);
+			add_partial(s, page, 1);
 			slab_unlock(page);
 		} else {
 			slab_unlock(page);
@@ -1633,7 +1638,7 @@ checks_ok:
 	 * then add it.
 	 */
 	if (unlikely(!prior))
-		add_partial(get_node(s, page_to_nid(page)), page, 0);
+		add_partial(s, page, 0);
 
 out_unlock:
 	slab_unlock(page);
@@ -2041,7 +2046,7 @@ static struct kmem_cache_node *early_kme
 #endif
 	init_kmem_cache_node(n);
 	atomic_long_inc(&n->nr_slabs);
-	add_partial(n, page, 0);
+	add_partial(kmalloc_caches, page, 0);
 	return n;
 }
 

-- 

  parent reply	other threads:[~2007-10-28  3:31 UTC|newest]

Thread overview: 35+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2007-10-28  3:31 [patch 00/10] SLUB: SMP regression tests on Dual Xeon E5345 (8p) and new performance patches Christoph Lameter
2007-10-28  3:31 ` [patch 01/10] SLUB: Consolidate add_partial and add_partial_tail to one function Christoph Lameter
2007-10-28 13:07   ` Pekka J Enberg
2007-10-28  3:31 ` [patch 02/10] SLUB: Noinline some functions to avoid them being folded into alloc/free Christoph Lameter
2007-10-28 13:08   ` Pekka J Enberg
2007-10-29 23:25   ` Matt Mackall
2007-10-28  3:31 ` Christoph Lameter [this message]
2007-10-28 13:09   ` [patch 03/10] SLUB: Move kmem_cache_node determination into add_full and add_partial Pekka J Enberg
2007-10-28  3:32 ` [patch 04/10] SLUB: Avoid checking for a valid object before zeroing on the fast path Christoph Lameter
2007-10-28 13:10   ` Pekka J Enberg
2007-10-28  3:32 ` [patch 05/10] SLUB: __slab_alloc() exit path consolidation Christoph Lameter
2007-10-28 13:11   ` Pekka J Enberg
2007-10-28  3:32 ` [patch 06/10] SLUB: Provide unique end marker for each slab Christoph Lameter
2007-10-28  3:32 ` [patch 07/10] SLUB: Avoid referencing kmem_cache structure in __slab_alloc Christoph Lameter
2007-10-28 13:12   ` Pekka J Enberg
2007-10-30 18:38   ` Andrew Morton
2007-10-28  3:32 ` [patch 08/10] SLUB: Optional fast path using cmpxchg_local Christoph Lameter
2007-10-28 13:05   ` Pekka J Enberg
2007-10-29  2:59     ` Christoph Lameter
2007-10-29  3:34     ` Christoph Lameter
2007-10-30 18:30     ` Andrew Morton
2007-10-30 18:49   ` Andrew Morton
2007-10-30 18:58     ` Christoph Lameter
2007-10-30 19:12       ` Mathieu Desnoyers
2007-10-31  1:52       ` [PATCH] local_t Documentation update 2 Mathieu Desnoyers
2007-10-31  2:28   ` [patch 08/10] SLUB: Optional fast path using cmpxchg_local Mathieu Desnoyers
2007-10-28  3:32 ` [patch 09/10] SLUB: Do our own locking via slab_lock and slab_unlock Christoph Lameter
2007-10-28 15:10   ` Pekka J Enberg
2007-10-28 15:14     ` Pekka J Enberg
2007-10-29  3:03     ` Christoph Lameter
2007-10-29  6:30       ` Pekka Enberg
2007-10-30  4:50   ` Nick Piggin
2007-10-30 18:32     ` Christoph Lameter
2007-10-31  1:17       ` Nick Piggin
2007-10-28  3:32 ` [patch 10/10] SLUB: Restructure slab alloc Christoph Lameter

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20071028033259.021882647@sgi.com \
    --to=clameter@sgi.com \
    --cc=akpm@linux-foundation.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-mm@kvack.org \
    --cc=matthew@wil.cx \
    --cc=penberg@cs.helsinki.fi \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).