public inbox for linux-kernel@vger.kernel.org
 help / color / mirror / Atom feed
From: Christoph Lameter <clameter@sgi.com>
To: linux-kernel@vger.kernel.org
Cc: Christoph Lameter <clameter@sgi.com>
Subject: [PATCH 7/7] Simple Performance Counters: SLUB instrumentation
Date: Tue, 31 Jul 2007 16:25:12 -0700	[thread overview]
Message-ID: <1185924312685-git-send-email-clameter@sgi.com> (raw)
In-Reply-To: simple-perf-counters

With this patch SLUB will perform tests on bootup and display results.

Signed-off-by: Christoph Lameter <clameter@sgi.com>
---
 mm/slub.c |   97 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++---
 1 files changed, 92 insertions(+), 5 deletions(-)

diff --git a/mm/slub.c b/mm/slub.c
index 6c6d74f..568b16a 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -20,6 +20,7 @@
 #include <linux/mempolicy.h>
 #include <linux/ctype.h>
 #include <linux/kallsyms.h>
+#include <linux/perf.h>
 
 /*
  * Lock order:
@@ -152,6 +153,7 @@ static inline void ClearSlabDebug(struct page *page)
 
 /* Enable to test recovery from slab corruption on boot */
 #undef SLUB_RESILIENCY_TEST
+#undef SLUB_PERFORMANCE_TEST
 
 #if PAGE_SHIFT <= 12
 
@@ -2870,9 +2872,97 @@ static long validate_slab_cache(struct kmem_cache *s)
 	return count;
 }
 
-#ifdef SLUB_RESILIENCY_TEST
 static void resiliency_test(void)
 {
+#ifdef SLUB_PERFORMANCE_TEST
+#define TEST_COUNT 10000
+	int size, i;
+	struct pc x;
+	void **v = kmalloc(TEST_COUNT * sizeof(void *), GFP_KERNEL);
+
+	printk(KERN_INFO "SLUB Performance testing\n");
+	printk(KERN_INFO "========================\n");
+	printk(KERN_INFO "1. Kmalloc: Repeatedly allocate then free test\n");
+	for (size = 8; size <= PAGE_SIZE << 2; size <<= 1) {
+		pc_start(&x);
+		for(i = 0; i < TEST_COUNT; i++) {
+			v[i] = kmalloc(size, GFP_KERNEL);
+		}
+		printk(KERN_INFO "%i times kmalloc(%d) = ", i, size);
+		pc_stop_printk(&x);
+		pc_start(&x);
+		for(i = 0; i < TEST_COUNT; i++)
+			kfree(v[i]);
+		printk(" kfree() = ");
+		pc_stop_printk(&x);
+		printk("\n");
+	}
+
+	printk(KERN_INFO "2. Kmalloc: alloc/free test\n");
+	for (size = 8; size <= PAGE_SIZE << 2; size <<= 1) {
+		pc_start(&x);
+		for(i = 0; i < TEST_COUNT; i++)
+			kfree(kmalloc(size, GFP_KERNEL));
+		printk(KERN_INFO "%i times kmalloc(%d)/kfree = ", i, size);
+		pc_stop_printk(&x);
+		printk("\n");
+	}
+	printk(KERN_INFO "3. kmem_cache_alloc: Repeatedly allocate then free test\n");
+	for (size = 3; size <= PAGE_SHIFT; size ++) {
+		pc_start(&x);
+		for(i = 0; i < TEST_COUNT; i++) {
+			v[i] = kmem_cache_alloc(kmalloc_caches + size, GFP_KERNEL);
+		}
+		printk(KERN_INFO "%d times kmem_cache_alloc(%d) = ", i, 1 << size);
+		pc_stop_printk(&x);
+		pc_start(&x);
+		for(i = 0; i < TEST_COUNT; i++)
+			kmem_cache_free(kmalloc_caches + size, v[i]);
+		printk(" kmem_cache_free() = ");
+		pc_stop_printk(&x);
+		printk("\n");
+	}
+
+	printk(KERN_INFO "4. kmem_cache_alloc: alloc/free test\n");
+	for (size = 3; size <= PAGE_SHIFT; size++) {
+		pc_start(&x);
+		for(i = 0; i < TEST_COUNT; i++)
+			kmem_cache_free(kmalloc_caches + size,
+				kmem_cache_alloc(kmalloc_caches + size,
+							GFP_KERNEL));
+		printk(KERN_INFO "%d times kmem_cache_alloc(%d)/kmem_cache_free = ", i, 1 << size);
+		pc_stop_printk(&x);
+		printk("\n");
+	}
+	printk(KERN_INFO "5. kmem_cache_zalloc: Repeatedly allocate then free test\n");
+	for (size = 3; size <= PAGE_SHIFT; size ++) {
+		pc_start(&x);
+		for(i = 0; i < TEST_COUNT; i++) {
+			v[i] = kmem_cache_zalloc(kmalloc_caches + size, GFP_KERNEL);
+		}
+		printk(KERN_INFO "%d times kmem_cache_zalloc(%d) = ", i, 1 << size);
+		pc_stop_printk(&x);
+		pc_start(&x);
+		for(i = 0; i < TEST_COUNT; i++)
+			kmem_cache_free(kmalloc_caches + size, v[i]);
+		printk(" kmem_cache_free() = ");
+		pc_stop_printk(&x);
+		printk("\n");
+	}
+
+	printk(KERN_INFO "6. kmem_cache_zalloc: alloc/free test\n");
+	for (size = 3; size <= PAGE_SHIFT; size++) {
+		pc_start(&x);
+		for(i = 0; i < TEST_COUNT; i++)
+			kmem_cache_free(kmalloc_caches + size,
+				kmem_cache_zalloc(kmalloc_caches + size,
+							GFP_KERNEL));
+		printk(KERN_INFO "%d times kmem_cache_zalloc(%d)/kmem_cache_free = ", i, 1 << size);
+		pc_stop_printk(&x);
+		printk("\n");
+	}
+#endif
+#ifdef SLUB_RESILIENCY_TEST
 	u8 *p;
 
 	printk(KERN_ERR "SLUB resiliency testing\n");
@@ -2920,11 +3010,8 @@ static void resiliency_test(void)
 	p[512] = 0xab;
 	printk(KERN_ERR "\n3. kmalloc-512: Clobber redzone 0xab->0x%p\n\n", p);
 	validate_slab_cache(kmalloc_caches + 9);
-}
-#else
-static void resiliency_test(void) {};
 #endif
-
+}
 /*
  * Generate lists of code addresses where slabcache objects are allocated
  * and freed.
-- 
1.5.2.4


             reply	other threads:[~2007-07-31 23:27 UTC|newest]

Thread overview: 5+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2007-07-31 23:25 Christoph Lameter [this message]
2007-08-17 16:21 ` [PATCH 7/7] Simple Performance Counters: SLUB instrumentation Mathieu Desnoyers
2007-08-17 20:39   ` Christoph Lameter
2007-08-18  3:20     ` Mathieu Desnoyers
2007-08-20 19:41       ` Christoph Lameter

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1185924312685-git-send-email-clameter@sgi.com \
    --to=clameter@sgi.com \
    --cc=linux-kernel@vger.kernel.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox