linux-mm.kvack.org archive mirror
 help / color / mirror / Atom feed
From: Christoph Lameter <cl@linux.com>
To: Pekka Enberg <penberg@cs.helsinki.fi>
Cc: linux-mm@kvack.org, linux-kernel@vger.kernel.org,
	David Rientjes <rientjes@google.com>
Subject: [UnifiedV4 15/16] slub: Detailed reports on validate
Date: Tue, 05 Oct 2010 13:57:40 -0500	[thread overview]
Message-ID: <20101005185820.497596028@linux.com> (raw)
In-Reply-To: 20101005185725.088808842@linux.com

[-- Attachment #1: unified_detail_object_reports --]
[-- Type: text/plain, Size: 4718 bytes --]

Provide some more detail on what is going on with various types of object
in slabs. This is mainly useful for debugging the queueing operations.

Signed-off-by: Christoph Lameter <cl@linux.com>

---
 mm/slub.c |   86 +++++++++++++++++++++++++++++++++++++++++++++++++++++---------
 1 file changed, 74 insertions(+), 12 deletions(-)

Index: linux-2.6/mm/slub.c
===================================================================
--- linux-2.6.orig/mm/slub.c	2010-10-05 13:40:08.000000000 -0500
+++ linux-2.6/mm/slub.c	2010-10-05 13:40:11.000000000 -0500
@@ -4151,12 +4151,24 @@ static int count_total(struct page *page
 #endif
 
 #ifdef CONFIG_SLUB_DEBUG
-static int validate_slab(struct kmem_cache *s, struct page *page)
+
+struct validate_counters {
+	int objects;
+	int available;
+	int queue;
+	int checked;
+	int unchecked;
+	int hist[];
+};
+
+static int validate_slab(struct kmem_cache *s, struct page *page,
+		int partial, struct validate_counters *v)
 {
 	void *p;
 	void *addr = page_address(page);
 	unsigned long *m = map(page);
 	unsigned long errors = 0;
+	unsigned long inuse = 0;
 
 	if (!check_slab(s, page) || !verify_slab(s, page))
 		return 0;
@@ -4168,7 +4180,10 @@ static int validate_slab(struct kmem_cac
 			/* Available */
 			if (!check_object(s, page, p, SLUB_RED_INACTIVE))
 				errors++;
+			else
+				v->available++;
 		} else {
+			inuse++;
 #ifdef CONFIG_SLUB_DEBUG
 			/*
 			 * We cannot check if the object is on a queue without
@@ -4178,24 +4193,45 @@ static int validate_slab(struct kmem_cac
 			if (s->flags & SLAB_RED_ZONE) {
 				u8 *q = p + s->objsize;
 
-				if (*q != SLUB_RED_QUEUE)
+				if (*q != SLUB_RED_QUEUE) {
 					if (!check_object(s, page, p, SLUB_RED_ACTIVE))
 						errors++;
-			}
+					else
+						v->checked++;
+				} else
+					v->queue++;
+			} else
+				/*
+				 * Allocated object that cannot be verified
+				 * since red zoning is diabled. The object
+				 * may be free after all if its on a queue.
+				 */
 #endif
+				v->unchecked++;
 		}
 	}
 
+	v->hist[inuse]++;
+
+	if (inuse < page->objects) {
+		if (!partial)
+			slab_err(s, page, "Objects available but not on partial list");
+	} else {
+		if (partial)
+			slab_err(s, page, "On partial list but no object available");
+	}
+	v->objects += page->objects;
 	return errors;
 }
 
-static unsigned long validate_slab_slab(struct kmem_cache *s, struct page *page)
+static unsigned long validate_slab_slab(struct kmem_cache *s,
+	struct page *page, int partial, struct validate_counters *v)
 {
-	return validate_slab(s, page);
+	return validate_slab(s, page, partial, v);
 }
 
 static int validate_slab_node(struct kmem_cache *s,
-		struct kmem_cache_node *n)
+	struct kmem_cache_node *n, struct validate_counters *v)
 {
 	unsigned long count = 0;
 	struct page *page;
@@ -4206,7 +4242,7 @@ static int validate_slab_node(struct kme
 
 	list_for_each_entry(page, &n->partial, lru) {
 		if (get_node(s, page_to_nid(page)) == n)
-			errors += validate_slab_slab(s, page);
+			errors += validate_slab_slab(s, page, 1, v);
 		else
 			printk(KERN_ERR "SLUB %s: Partial list page from wrong node\n", s->name);
 		count++;
@@ -4219,7 +4255,7 @@ static int validate_slab_node(struct kme
 		goto out;
 
 	list_for_each_entry(page, &n->full, lru) {
-		validate_slab_slab(s, page);
+		errors += validate_slab_slab(s, page, 0, v);
 		count++;
 	}
 	if (count != atomic_long_read(&n->nr_slabs))
@@ -4235,15 +4271,41 @@ out:
 static long validate_slab_cache(struct kmem_cache *s)
 {
 	int node;
-	unsigned long count = 0;
+	int i;
+	struct validate_counters *v;
+	unsigned long errors = 0;
+	int maxobj = oo_objects(s->max);
+
+	v = kzalloc(GFP_KERNEL, offsetof(struct validate_counters, hist) + maxobj * sizeof(int));
+	if (!v)
+		return -ENOMEM;
 
-	flush_all(s);
 	for_each_node_state(node, N_NORMAL_MEMORY) {
 		struct kmem_cache_node *n = get_node(s, node);
 
-		count += validate_slab_node(s, n);
+		errors += validate_slab_node(s, n, v);
 	}
-	return count;
+
+	printk(KERN_DEBUG "Validation of slab %s: total=%d available=%d checked=%d",
+			s->name, v->objects, v->available, v->checked);
+
+	if (v->unchecked)
+		printk(" unchecked=%d", v->unchecked);
+
+	if (v->queue)
+		printk(" onqueue=%d", v->queue);
+
+	if (errors)
+		printk(" errors=%lu", errors);
+
+	for (i = 0; i < maxobj; i++)
+		if (v->hist[i])
+			printk(" p<%d>=%d", i, v->hist[i]);
+
+	printk("\n");
+	kfree(v);
+
+	return errors;
 }
 /*
  * Generate lists of code addresses where slabcache objects are allocated

--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org.  For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>

  parent reply	other threads:[~2010-10-05 19:00 UTC|newest]

Thread overview: 48+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2010-10-05 18:57 [UnifiedV4 00/16] The Unified slab allocator (V4) Christoph Lameter
2010-10-05 18:57 ` [UnifiedV4 01/16] slub: Enable sysfs support for !CONFIG_SLUB_DEBUG Christoph Lameter
2010-10-06 14:02   ` Pekka Enberg
2010-10-05 18:57 ` [UnifiedV4 02/16] slub: Move functions to reduce #ifdefs Christoph Lameter
2010-10-06 14:02   ` Pekka Enberg
2010-10-05 18:57 ` [UnifiedV4 03/16] slub: Add per cpu queueing Christoph Lameter
2010-10-05 18:57 ` [UnifiedV4 04/16] slub: Allow resizing of per cpu queues Christoph Lameter
2010-10-05 18:57 ` [UnifiedV4 05/16] slub: Remove MAX_OBJS limitation Christoph Lameter
2010-10-05 18:57 ` [UnifiedV4 06/16] slub: Drop allocator announcement Christoph Lameter
2010-10-05 18:57 ` [UnifiedV4 07/16] slub: Object based NUMA policies Christoph Lameter
2010-10-05 18:57 ` [UnifiedV4 08/16] slub: Get rid of page lock and rely on per node lock Christoph Lameter
2010-10-05 18:57 ` [UnifiedV4 09/16] slub: Shared cache to exploit cross cpu caching abilities Christoph Lameter
2010-10-05 18:57 ` [UnifiedV4 10/16] slub: Support Alien Caches Christoph Lameter
2010-10-05 18:57 ` [UnifiedV4 11/16] slub: Add a "touched" state to queues and partial lists Christoph Lameter
2010-10-05 18:57 ` [UnifiedV4 12/16] slub: Cached object expiration Christoph Lameter
2010-10-05 18:57 ` [UnifiedV4 13/16] vmscan: Tie slub object expiration into page reclaim Christoph Lameter
2010-10-05 18:57 ` [UnifiedV4 14/16] slub: Reduce size of not performance critical slabs Christoph Lameter
2010-10-05 18:57 ` Christoph Lameter [this message]
2010-10-05 18:57 ` [UnifiedV4 16/16] slub: Add stats for alien allocation slowpath Christoph Lameter
2010-10-06  8:01 ` [UnifiedV4 00/16] The Unified slab allocator (V4) Pekka Enberg
2010-10-06 11:03   ` Richard Kennedy
2010-10-06 11:19     ` Pekka Enberg
2010-10-06 15:46       ` Richard Kennedy
2010-10-06 16:21         ` [UnifiedV4 slabinfo 1/2] Move slabinfo.c to tools/slub/slabinfo.c Christoph Lameter
2010-10-06 16:21         ` [UnifiedV4 slabinfo 2/2] slub: update slabinfo.c for queuing Christoph Lameter
2010-10-06 20:56         ` [UnifiedV4 00/16] The Unified slab allocator (V4) Christoph Lameter
2010-10-06 16:00     ` Christoph Lameter
2010-10-06 12:37   ` Wu Fengguang
2010-10-13  2:21     ` Alex,Shi
2010-10-18 18:00       ` Christoph Lameter
2010-10-19  0:01         ` Alex,Shi
2010-10-06 15:56   ` Christoph Lameter
2010-10-13 14:14     ` Mel Gorman
2010-10-18 18:13       ` Christoph Lameter
2010-10-19  9:23         ` Mel Gorman
2010-10-12 18:25   ` Mel Gorman
2010-10-13  7:16     ` Pekka Enberg
2010-10-13 13:46       ` Mel Gorman
2010-10-13 16:10     ` Christoph Lameter
2010-10-06 10:47 ` Andi Kleen
2010-10-06 15:59   ` Christoph Lameter
2010-10-06 16:25     ` Andi Kleen
2010-10-06 16:37       ` Christoph Lameter
2010-10-06 16:43         ` Andi Kleen
2010-10-06 16:49           ` Christoph Lameter
2010-10-06 16:52           ` Christoph Lameter
2010-10-19 20:39 ` David Rientjes
2010-10-20 13:47   ` Christoph Lameter

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20101005185820.497596028@linux.com \
    --to=cl@linux.com \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-mm@kvack.org \
    --cc=penberg@cs.helsinki.fi \
    --cc=rientjes@google.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).