public inbox for linux-kernel@vger.kernel.org
 help / color / mirror / Atom feed
* [PATCH] do not create 0-sized shared arrays
@ 2005-07-02 14:43 Manfred Spraul
  0 siblings, 0 replies; only message in thread
From: Manfred Spraul @ 2005-07-02 14:43 UTC (permalink / raw)
  To: Linux Kernel Mailing List; +Cc: bharata

[-- Attachment #1: Type: text/plain, Size: 599 bytes --]

Hi,

the slab allocator supports system-wide arrays with object pointers for 
fast memory allocations. The arrays are optional: for caches with large 
objects they are not used, because it could use too much memory.
Bharata noticed a bug in the implemenation: s_show accessed the shared 
array without checking that it's not NULL.
And do_tune_cpucache allocated an array, even if 0 entries were 
requested, thus s_show only oopses if the system runs out of memory...
The attached patch (against 2.6.12) fixes both bugs. It's tested on i386.

Signed-Off-By: Manfred Spraul <manfred@colorfullife.com>

[-- Attachment #2: patch-slab-01-no_0_shared --]
[-- Type: text/plain, Size: 2282 bytes --]

// $Header$
// Kernel Version:
//  VERSION = 2
//  PATCHLEVEL = 6
//  SUBLEVEL = 12
//  EXTRAVERSION =
--- 2.6/mm/slab.c	2005-06-18 15:00:24.000000000 +0200
+++ build-2.6/mm/slab.c	2005-07-02 16:37:52.000000000 +0200
@@ -2642,7 +2642,7 @@
 				int shared)
 {
 	struct ccupdate_struct new;
-	struct array_cache *new_shared;
+	struct array_cache *new_shared, *old;
 	int i;
 
 	memset(&new.new,0,sizeof(new.new));
@@ -2677,19 +2677,26 @@
 		spin_unlock_irq(&cachep->spinlock);
 		kfree(ccold);
 	}
-	new_shared = alloc_arraycache(-1, batchcount*shared, 0xbaadf00d);
-	if (new_shared) {
-		struct array_cache *old;
-
-		spin_lock_irq(&cachep->spinlock);
-		old = cachep->lists.shared;
-		cachep->lists.shared = new_shared;
-		if (old)
-			free_block(cachep, ac_entry(old), old->avail);
-		spin_unlock_irq(&cachep->spinlock);
-		kfree(old);
+	if (shared > 0) {
+		new_shared = alloc_arraycache(-1, batchcount*shared, 0xbaadf00d);
+		/*
+		 * Memory allocation failed - keep shared as it was
+		 */
+		if (!new_shared)
+			goto keep_shared;
+	} else {
+		new_shared = NULL;
 	}
 
+	spin_lock_irq(&cachep->spinlock);
+	old = cachep->lists.shared;
+	cachep->lists.shared = new_shared;
+	if (old)
+		free_block(cachep, ac_entry(old), old->avail);
+	spin_unlock_irq(&cachep->spinlock);
+	kfree(old);
+
+keep_shared:
 	return 0;
 }
 
@@ -2908,6 +2915,7 @@
 	unsigned long	num_objs;
 	unsigned long	active_slabs = 0;
 	unsigned long	num_slabs;
+	unsigned int	shared_sz, shared_avail;
 	const char *name; 
 	char *error = NULL;
 
@@ -2949,11 +2957,17 @@
 	seq_printf(m, "%-17s %6lu %6lu %6u %4u %4d",
 		name, active_objs, num_objs, cachep->objsize,
 		cachep->num, (1<<cachep->gfporder));
+	if (cachep->lists.shared) {
+		shared_sz = cachep->lists.shared->limit/cachep->batchcount;
+		shared_avail = cachep->lists.shared->avail;
+	} else {
+		shared_sz = 0;
+		shared_avail = 0;
+	}
 	seq_printf(m, " : tunables %4u %4u %4u",
-			cachep->limit, cachep->batchcount,
-			cachep->lists.shared->limit/cachep->batchcount);
+			cachep->limit, cachep->batchcount, shared_sz);
 	seq_printf(m, " : slabdata %6lu %6lu %6u",
-			active_slabs, num_slabs, cachep->lists.shared->avail);
+			active_slabs, num_slabs, shared_avail);
 #if STATS
 	{	/* list3 stats */
 		unsigned long high = cachep->high_mark;

^ permalink raw reply	[flat|nested] only message in thread

only message in thread, other threads:[~2005-07-02 14:44 UTC | newest]

Thread overview: (only message) (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2005-07-02 14:43 [PATCH] do not create 0-sized shared arrays Manfred Spraul

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox