linux-fsdevel.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
From: Christoph Lameter <clameter@sgi.com>
To: Andy Whitcroft <apw@shadowen.org>
Cc: linux-kernel@vger.kernel.org
Cc: linux-mm@kvack.org
Cc: linux-fsdevel@vger.kernel.org
Cc: Christoph Hellwig <hch@lst.de>, Mel Gorman <mel@skynet.ie>
Cc: David Chinner <dgc@sgi.com>
Subject: [RFC 08/26] SLUB: Consolidate add_partial and add_partial_tail to one function
Date: Fri, 31 Aug 2007 18:41:15 -0700	[thread overview]
Message-ID: <20070901014221.148426921@sgi.com> (raw)
In-Reply-To: 20070901014107.719506437@sgi.com

[-- Attachment #1: 0008-slab_defrag_add_partial_tail.patch --]
[-- Type: text/plain, Size: 3926 bytes --]

Add a parameter to add_partial instead of having separate functions.
That allows the detailed control from multiple places when putting
slabs back to the partial list. If we put slabs back to the front
then they are likely used immediately for allocations. If they are
put at the end then we can maximize the time that the partial slabs
spent without allocations.

When deactivating slab we can put the slabs that had remote objects freed
to them at the end of the list so that the cachelines can cool down.
Slabs that had objects from the cpu freed to them are put in the front
of the list to be reused ASAP.

Signed-off-by: Christoph Lameter <clameter@sgi.com>
---
 mm/slub.c |   31 +++++++++++++++----------------
 1 file changed, 15 insertions(+), 16 deletions(-)

Index: linux-2.6/mm/slub.c
===================================================================
--- linux-2.6.orig/mm/slub.c	2007-08-28 20:03:16.000000000 -0700
+++ linux-2.6/mm/slub.c	2007-08-28 20:21:55.000000000 -0700
@@ -1173,19 +1173,15 @@ static __always_inline int slab_trylock(
 /*
  * Management of partially allocated slabs
  */
-static void add_partial_tail(struct kmem_cache_node *n, struct page *page)
+static void add_partial(struct kmem_cache_node *n,
+				struct page *page, int tail)
 {
 	spin_lock(&n->list_lock);
 	n->nr_partial++;
-	list_add_tail(&page->lru, &n->partial);
-	spin_unlock(&n->list_lock);
-}
-
-static void add_partial(struct kmem_cache_node *n, struct page *page)
-{
-	spin_lock(&n->list_lock);
-	n->nr_partial++;
-	list_add(&page->lru, &n->partial);
+	if (tail)
+		list_add_tail(&page->lru, &n->partial);
+	else
+		list_add(&page->lru, &n->partial);
 	spin_unlock(&n->list_lock);
 }
 
@@ -1314,7 +1310,7 @@ static struct page *get_partial(struct k
  *
  * On exit the slab lock will have been dropped.
  */
-static void unfreeze_slab(struct kmem_cache *s, struct page *page)
+static void unfreeze_slab(struct kmem_cache *s, struct page *page, int tail)
 {
 	struct kmem_cache_node *n = get_node(s, page_to_nid(page));
 
@@ -1322,7 +1318,7 @@ static void unfreeze_slab(struct kmem_ca
 	if (page->inuse) {
 
 		if (page->freelist)
-			add_partial(n, page);
+			add_partial(n, page, tail);
 		else if (SlabDebug(page) && (s->flags & SLAB_STORE_USER))
 			add_full(n, page);
 		slab_unlock(page);
@@ -1337,7 +1333,7 @@ static void unfreeze_slab(struct kmem_ca
 			 * partial list stays small. kmem_cache_shrink can
 			 * reclaim empty slabs from the partial list.
 			 */
-			add_partial_tail(n, page);
+			add_partial(n, page, 1);
 			slab_unlock(page);
 		} else {
 			slab_unlock(page);
@@ -1352,6 +1348,7 @@ static void unfreeze_slab(struct kmem_ca
 static void deactivate_slab(struct kmem_cache *s, struct kmem_cache_cpu *c)
 {
 	struct page *page = c->page;
+	int tail = 1;
 	/*
 	 * Merge cpu freelist into freelist. Typically we get here
 	 * because both freelists are empty. So this is unlikely
@@ -1360,6 +1357,8 @@ static void deactivate_slab(struct kmem_
 	while (unlikely(c->freelist)) {
 		void **object;
 
+		tail = 0;	/* Hot objects. Put the slab first */
+
 		/* Retrieve object from cpu_freelist */
 		object = c->freelist;
 		c->freelist = c->freelist[c->offset];
@@ -1370,7 +1369,7 @@ static void deactivate_slab(struct kmem_
 		page->inuse--;
 	}
 	c->page = NULL;
-	unfreeze_slab(s, page);
+	unfreeze_slab(s, page, tail);
 }
 
 static inline void flush_slab(struct kmem_cache *s, struct kmem_cache_cpu *c)
@@ -1603,7 +1602,7 @@ checks_ok:
 	 * then add it.
 	 */
 	if (unlikely(!prior))
-		add_partial(get_node(s, page_to_nid(page)), page);
+		add_partial(get_node(s, page_to_nid(page)), page, 0);
 
 out_unlock:
 	slab_unlock(page);
@@ -2012,7 +2011,7 @@ static struct kmem_cache_node * __init e
 #endif
 	init_kmem_cache_node(n);
 	atomic_long_inc(&n->nr_slabs);
-	add_partial(n, page);
+	add_partial(n, page, 0);
 
 	/*
 	 * new_slab() disables interupts. If we do not reenable interrupts here

-- 

  parent reply	other threads:[~2007-09-01  1:42 UTC|newest]

Thread overview: 34+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2007-09-01  1:41 [RFC 00/26] Slab defragmentation V5 Christoph Lameter
2007-09-01  1:41 ` [RFC 01/26] SLUB: Extend slabinfo to support -D and -C options Christoph Lameter
2007-09-01  1:41 ` [RFC 02/26] SLUB: Move count_partial() Christoph Lameter
2007-09-01  1:41 ` [RFC 03/26] SLUB: Rename NUMA defrag_ratio to remote_node_defrag_ratio Christoph Lameter
2007-09-01  1:41 ` [RFC 04/26] SLUB: Add defrag_ratio field and sysfs support Christoph Lameter
2007-09-01  1:41 ` [RFC 05/26] SLUB: Replace ctor field with ops field in /sys/slab/:0000008 /sys/slab/:0000016 /sys/slab/:0000024 /sys/slab/:0000032 /sys/slab/:0000040 /sys/slab/:0000048 /sys/slab/:0000056 /sys/slab/:0000064 /sys/slab/:0000072 /sys/slab/:0000080 /sys/slab/:0000088 /sys/slab/:0000096 /sys/slab/:0000104 /sys/slab/:0000128 /sys/slab/:0000144 /sys/slab/:0000184 /sys/slab/:0000192 /sys/slab/:0000216 /sys/slab/:0000256 /sys/slab/:0000344 /sys/slab/:0000384 /sys/slab/:0000448 /sys/slab/:0000512 /sys/slab/:0000768 /sys/slab/:0000920 /sys/slab/:0001024 /sys/slab/:0001152 /sys/slab/:0001344 /sys/slab/:0001536 /sys/slab/:0002048 /sys/slab/:0003072 /sys/slab/:0004096 /sys/slab/:a-0000056 /sys/slab/:a-0000080 /sys/slab/:a-0000128 /sys/slab/Acpi-Namespace /sys/slab/Acpi-Operand /sys/slab/Acpi-Pa rse /sys/slab/Acpi-ParseExt /sys/slab/Acpi-State /sys/slab/RAW /sys/slab/TCP /sys/slab/UDP /sys/sl Christoph Lameter
2007-09-01  1:41 ` [RFC 06/26] SLUB: Add get() and kick() methods Christoph Lameter
2007-09-01  1:41 ` [RFC 07/26] SLUB: Sort slab cache list and establish maximum objects for defrag slabs Christoph Lameter
2007-09-01  1:41 ` Christoph Lameter [this message]
2007-09-01  1:41 ` [RFC 09/26] SLUB: Slab defrag core Christoph Lameter
2007-09-01  1:41 ` [RFC 10/26] SLUB: Trigger defragmentation from memory reclaim Christoph Lameter
2007-09-01  1:41 ` [RFC 11/26] VM: Allow get_page_unless_zero on compound pages Christoph Lameter
2007-09-01  1:41 ` [RFC 12/26] SLUB: Slab reclaim through Lumpy reclaim Christoph Lameter
2007-09-01  1:41 ` [RFC 13/26] SLUB: Add SlabReclaimable() to avoid repeated reclaim attempts Christoph Lameter
2007-09-19 15:08   ` Rik van Riel
2007-09-19 18:00     ` Christoph Lameter
2007-09-01  1:41 ` [RFC 14/26] SLUB: __GFP_MOVABLE and SLAB_TEMPORARY support Christoph Lameter
2007-09-01  2:04   ` KAMEZAWA Hiroyuki
2007-09-01  2:07     ` Christoph Lameter
2007-09-01  1:41 ` [RFC 15/26] bufferhead: Revert constructor removal Christoph Lameter
2007-09-01  1:41 ` [RFC 16/26] Buffer heads: Support slab defrag Christoph Lameter
2007-09-01  1:41 ` [RFC 17/26] inodes: Support generic defragmentation Christoph Lameter
2007-09-01  1:41 ` [RFC 18/26] FS: ExtX filesystem defrag Christoph Lameter
2007-09-01  9:48   ` Jeff Garzik
2007-09-02 11:37     ` Christoph Lameter
2007-09-01  1:41 ` [RFC 19/26] FS: XFS slab defragmentation Christoph Lameter
2007-09-01  1:41 ` [RFC 20/26] FS: Proc filesystem support for slab defrag Christoph Lameter
2007-09-01  1:41 ` [RFC 21/26] FS: Slab defrag: Reiserfs support Christoph Lameter
2007-09-01  1:41 ` [RFC 22/26] FS: Socket inode defragmentation Christoph Lameter
2007-09-01  1:41 ` [RFC 23/26] dentries: Extract common code to remove dentry from lru Christoph Lameter
2007-09-01  1:41 ` [RFC 24/26] dentries: Add constructor Christoph Lameter
2007-09-01  1:41 ` [RFC 25/26] dentries: dentry defragmentation Christoph Lameter
2007-09-01  1:41 ` [RFC 26/26] SLUB: Add debugging for slab defrag Christoph Lameter
2007-09-06 20:34 ` [RFC 00/26] Slab defragmentation V5 Jörn Engel

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20070901014221.148426921@sgi.com \
    --to=clameter@sgi.com \
    --cc=apw@shadowen.org \
    --cc=linux-kernel@vger.kernel.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).