linux-fsdevel.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
From: Christoph Lameter <clameter@sgi.com>
To: Andy Whitcroft <apw@shadowen.org>
Cc: linux-kernel@vger.kernel.org
Cc: linux-mm@kvack.org
Cc: linux-fsdevel@vger.kernel.org
Cc: Christoph Hellwig <hch@lst.de>, Mel Gorman <mel@skynet.ie>
Cc: David Chinner <dgc@sgi.com>
Subject: [RFC 10/26] SLUB: Trigger defragmentation from memory reclaim
Date: Fri, 31 Aug 2007 18:41:17 -0700	[thread overview]
Message-ID: <20070901014221.611405363@sgi.com> (raw)
In-Reply-To: 20070901014107.719506437@sgi.com

[-- Attachment #1: 0010-slab_defrag_trigger_defrag_from_reclaim.patch --]
[-- Type: text/plain, Size: 5672 bytes --]

This patch triggers slab defragmentation from memory reclaim.
The logical point for this is after slab shrinking was performed in
vmscan.c. At that point the fragmentation ratio of a slab was increased
by objects being freed. So we call kmem_cache_defrag from there.

slab_shrink() from vmscan.c is called in some contexts to do
global shrinking of slabs and in others to do shrinking for
a particular zone. Pass the zone to slab_shrink, so that slab_shrink
can call kmem_cache_defrag() and restrict the defragmentation to
the node that is under memory pressure.

Signed-off-by: Christoph Lameter <clameter@sgi.com>
---
 fs/drop_caches.c     |    2 +-
 include/linux/mm.h   |    2 +-
 include/linux/slab.h |    1 +
 mm/vmscan.c          |   27 ++++++++++++++++++++-------
 4 files changed, 23 insertions(+), 9 deletions(-)

diff --git a/fs/drop_caches.c b/fs/drop_caches.c
index 59375ef..fb58e63 100644
--- a/fs/drop_caches.c
+++ b/fs/drop_caches.c
@@ -50,7 +50,7 @@ void drop_slab(void)
 	int nr_objects;
 
 	do {
-		nr_objects = shrink_slab(1000, GFP_KERNEL, 1000);
+		nr_objects = shrink_slab(1000, GFP_KERNEL, 1000, NULL);
 	} while (nr_objects > 10);
 }
 
diff --git a/include/linux/mm.h b/include/linux/mm.h
index a396aac..9fbb6ba 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -1202,7 +1202,7 @@ int in_gate_area_no_task(unsigned long addr);
 int drop_caches_sysctl_handler(struct ctl_table *, int, struct file *,
 					void __user *, size_t *, loff_t *);
 unsigned long shrink_slab(unsigned long scanned, gfp_t gfp_mask,
-			unsigned long lru_pages);
+			unsigned long lru_pages, struct zone *zone);
 void drop_pagecache(void);
 void drop_slab(void);
 
diff --git a/include/linux/slab.h b/include/linux/slab.h
index 848e9a7..7d8ec17 100644
--- a/include/linux/slab.h
+++ b/include/linux/slab.h
@@ -61,6 +61,7 @@ void kmem_cache_free(struct kmem_cache *, void *);
 unsigned int kmem_cache_size(struct kmem_cache *);
 const char *kmem_cache_name(struct kmem_cache *);
 int kmem_ptr_validate(struct kmem_cache *cachep, const void *ptr);
+int kmem_cache_defrag(int node);
 
 /*
  * Please use this macro to create slab caches. Simply specify the
diff --git a/mm/vmscan.c b/mm/vmscan.c
index d419e10..c6882d8 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -150,10 +150,18 @@ EXPORT_SYMBOL(unregister_shrinker);
  * are eligible for the caller's allocation attempt.  It is used for balancing
  * slab reclaim versus page reclaim.
  *
+ * zone is the zone for which we are shrinking the slabs. If the intent
+ * is to do a global shrink then zone may be NULL. Specification of a
+ * zone is currently only used to limit slab defragmentation to a NUMA node.
+ * The performace of shrink_slab would be better (in particular under NUMA)
+ * if it could be targeted as a whole to the zone that is under memory
+ * pressure but the VFS infrastructure does not allow that at the present
+ * time.
+ *
  * Returns the number of slab objects which we shrunk.
  */
 unsigned long shrink_slab(unsigned long scanned, gfp_t gfp_mask,
-			unsigned long lru_pages)
+			unsigned long lru_pages, struct zone *zone)
 {
 	struct shrinker *shrinker;
 	unsigned long ret = 0;
@@ -210,6 +218,8 @@ unsigned long shrink_slab(unsigned long scanned, gfp_t gfp_mask,
 		shrinker->nr += total_scan;
 	}
 	up_read(&shrinker_rwsem);
+	if (gfp_mask & __GFP_FS)
+		kmem_cache_defrag(zone ? zone_to_nid(zone) : -1);
 	return ret;
 }
 
@@ -1151,7 +1161,8 @@ unsigned long try_to_free_pages(struct zone **zones, int order, gfp_t gfp_mask)
 		if (!priority)
 			disable_swap_token();
 		nr_reclaimed += shrink_zones(priority, zones, &sc);
-		shrink_slab(sc.nr_scanned, gfp_mask, lru_pages);
+		shrink_slab(sc.nr_scanned, gfp_mask, lru_pages,
+						NULL);
 		if (reclaim_state) {
 			nr_reclaimed += reclaim_state->reclaimed_slab;
 			reclaim_state->reclaimed_slab = 0;
@@ -1321,7 +1332,7 @@ loop_again:
 			nr_reclaimed += shrink_zone(priority, zone, &sc);
 			reclaim_state->reclaimed_slab = 0;
 			nr_slab = shrink_slab(sc.nr_scanned, GFP_KERNEL,
-						lru_pages);
+						lru_pages, zone);
 			nr_reclaimed += reclaim_state->reclaimed_slab;
 			total_scanned += sc.nr_scanned;
 			if (zone->all_unreclaimable)
@@ -1559,7 +1570,7 @@ unsigned long shrink_all_memory(unsigned long nr_pages)
 	/* If slab caches are huge, it's better to hit them first */
 	while (nr_slab >= lru_pages) {
 		reclaim_state.reclaimed_slab = 0;
-		shrink_slab(nr_pages, sc.gfp_mask, lru_pages);
+		shrink_slab(nr_pages, sc.gfp_mask, lru_pages, NULL);
 		if (!reclaim_state.reclaimed_slab)
 			break;
 
@@ -1597,7 +1608,7 @@ unsigned long shrink_all_memory(unsigned long nr_pages)
 
 			reclaim_state.reclaimed_slab = 0;
 			shrink_slab(sc.nr_scanned, sc.gfp_mask,
-					count_lru_pages());
+					count_lru_pages(), NULL);
 			ret += reclaim_state.reclaimed_slab;
 			if (ret >= nr_pages)
 				goto out;
@@ -1614,7 +1625,8 @@ unsigned long shrink_all_memory(unsigned long nr_pages)
 	if (!ret) {
 		do {
 			reclaim_state.reclaimed_slab = 0;
-			shrink_slab(nr_pages, sc.gfp_mask, count_lru_pages());
+			shrink_slab(nr_pages, sc.gfp_mask,
+					count_lru_pages(), NULL);
 			ret += reclaim_state.reclaimed_slab;
 		} while (ret < nr_pages && reclaim_state.reclaimed_slab > 0);
 	}
@@ -1774,7 +1786,8 @@ static int __zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order)
 		 * Note that shrink_slab will free memory on all zones and may
 		 * take a long time.
 		 */
-		while (shrink_slab(sc.nr_scanned, gfp_mask, order) &&
+		while (shrink_slab(sc.nr_scanned, gfp_mask, order,
+						zone) &&
 			zone_page_state(zone, NR_SLAB_RECLAIMABLE) >
 				slab_reclaimable - nr_pages)
 			;
-- 
1.5.2.4

-- 

  parent reply	other threads:[~2007-09-01  1:42 UTC|newest]

Thread overview: 34+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2007-09-01  1:41 [RFC 00/26] Slab defragmentation V5 Christoph Lameter
2007-09-01  1:41 ` [RFC 01/26] SLUB: Extend slabinfo to support -D and -C options Christoph Lameter
2007-09-01  1:41 ` [RFC 02/26] SLUB: Move count_partial() Christoph Lameter
2007-09-01  1:41 ` [RFC 03/26] SLUB: Rename NUMA defrag_ratio to remote_node_defrag_ratio Christoph Lameter
2007-09-01  1:41 ` [RFC 04/26] SLUB: Add defrag_ratio field and sysfs support Christoph Lameter
2007-09-01  1:41 ` [RFC 05/26] SLUB: Replace ctor field with ops field in /sys/slab/:0000008 /sys/slab/:0000016 /sys/slab/:0000024 /sys/slab/:0000032 /sys/slab/:0000040 /sys/slab/:0000048 /sys/slab/:0000056 /sys/slab/:0000064 /sys/slab/:0000072 /sys/slab/:0000080 /sys/slab/:0000088 /sys/slab/:0000096 /sys/slab/:0000104 /sys/slab/:0000128 /sys/slab/:0000144 /sys/slab/:0000184 /sys/slab/:0000192 /sys/slab/:0000216 /sys/slab/:0000256 /sys/slab/:0000344 /sys/slab/:0000384 /sys/slab/:0000448 /sys/slab/:0000512 /sys/slab/:0000768 /sys/slab/:0000920 /sys/slab/:0001024 /sys/slab/:0001152 /sys/slab/:0001344 /sys/slab/:0001536 /sys/slab/:0002048 /sys/slab/:0003072 /sys/slab/:0004096 /sys/slab/:a-0000056 /sys/slab/:a-0000080 /sys/slab/:a-0000128 /sys/slab/Acpi-Namespace /sys/slab/Acpi-Operand /sys/slab/Acpi-Pa rse /sys/slab/Acpi-ParseExt /sys/slab/Acpi-State /sys/slab/RAW /sys/slab/TCP /sys/slab/UDP /sys/sl Christoph Lameter
2007-09-01  1:41 ` [RFC 06/26] SLUB: Add get() and kick() methods Christoph Lameter
2007-09-01  1:41 ` [RFC 07/26] SLUB: Sort slab cache list and establish maximum objects for defrag slabs Christoph Lameter
2007-09-01  1:41 ` [RFC 08/26] SLUB: Consolidate add_partial and add_partial_tail to one function Christoph Lameter
2007-09-01  1:41 ` [RFC 09/26] SLUB: Slab defrag core Christoph Lameter
2007-09-01  1:41 ` Christoph Lameter [this message]
2007-09-01  1:41 ` [RFC 11/26] VM: Allow get_page_unless_zero on compound pages Christoph Lameter
2007-09-01  1:41 ` [RFC 12/26] SLUB: Slab reclaim through Lumpy reclaim Christoph Lameter
2007-09-01  1:41 ` [RFC 13/26] SLUB: Add SlabReclaimable() to avoid repeated reclaim attempts Christoph Lameter
2007-09-19 15:08   ` Rik van Riel
2007-09-19 18:00     ` Christoph Lameter
2007-09-01  1:41 ` [RFC 14/26] SLUB: __GFP_MOVABLE and SLAB_TEMPORARY support Christoph Lameter
2007-09-01  2:04   ` KAMEZAWA Hiroyuki
2007-09-01  2:07     ` Christoph Lameter
2007-09-01  1:41 ` [RFC 15/26] bufferhead: Revert constructor removal Christoph Lameter
2007-09-01  1:41 ` [RFC 16/26] Buffer heads: Support slab defrag Christoph Lameter
2007-09-01  1:41 ` [RFC 17/26] inodes: Support generic defragmentation Christoph Lameter
2007-09-01  1:41 ` [RFC 18/26] FS: ExtX filesystem defrag Christoph Lameter
2007-09-01  9:48   ` Jeff Garzik
2007-09-02 11:37     ` Christoph Lameter
2007-09-01  1:41 ` [RFC 19/26] FS: XFS slab defragmentation Christoph Lameter
2007-09-01  1:41 ` [RFC 20/26] FS: Proc filesystem support for slab defrag Christoph Lameter
2007-09-01  1:41 ` [RFC 21/26] FS: Slab defrag: Reiserfs support Christoph Lameter
2007-09-01  1:41 ` [RFC 22/26] FS: Socket inode defragmentation Christoph Lameter
2007-09-01  1:41 ` [RFC 23/26] dentries: Extract common code to remove dentry from lru Christoph Lameter
2007-09-01  1:41 ` [RFC 24/26] dentries: Add constructor Christoph Lameter
2007-09-01  1:41 ` [RFC 25/26] dentries: dentry defragmentation Christoph Lameter
2007-09-01  1:41 ` [RFC 26/26] SLUB: Add debugging for slab defrag Christoph Lameter
2007-09-06 20:34 ` [RFC 00/26] Slab defragmentation V5 Jörn Engel

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20070901014221.611405363@sgi.com \
    --to=clameter@sgi.com \
    --cc=apw@shadowen.org \
    --cc=linux-kernel@vger.kernel.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).