public inbox for linux-kernel@vger.kernel.org
 help / color / mirror / Atom feed
From: Ravikiran G Thirumalai <kiran@scalex86.org>
To: linux-kernel@vger.kernel.org
Cc: Andrew Morton <akpm@osdl.org>,
	Manfred Spraul <manfred@colorfullife.com>,
	"Shai Fultheim (Shai@scalex86.org)" <shai@scalex86.org>,
	Christoph Lameter <clameter@engr.sgi.com>,
	Alok Kataria <alok.kataria@calsoftinc.com>,
	sonny@burdell.org
Subject: [patch 2/3] NUMA slab locking fixes -- slab locking irq optimizations
Date: Fri, 3 Feb 2006 12:56:15 -0800	[thread overview]
Message-ID: <20060203205615.GE3653@localhost.localdomain> (raw)
In-Reply-To: <20060203205341.GC3653@localhost.localdomain>

Earlier, we had to disable on chip interrupts while taking the cachep->spinlock
because, at cache_grow, on every addition of a slab to a slab cache, we 
incremented colour_next which was protected by the cachep->spinlock, and
cache_grow could occur at interrupt context.  Since, now we protect the 
per-node colour_next with the node's list_lock, we do not need to disable 
on chip interrupts while taking the per-cache spinlock, but we
just need to disable interrupts when taking the per-node kmem_list3 list_lock.

Signed-off-by: Alok N Kataria <alokk@calsoftinc.com>
Signed-off-by: Ravikiran Thirumalai <kiran@scalex86.org>
Signed-off-by: Shai Fultheim <shai@scalex86.org>

Index: linux-2.6.16-rc1mm4/mm/slab.c
===================================================================
--- linux-2.6.16-rc1mm4.orig/mm/slab.c	2006-01-29 20:23:28.000000000 -0800
+++ linux-2.6.16-rc1mm4/mm/slab.c	2006-01-29 23:13:14.000000000 -0800
@@ -995,7 +995,7 @@ static int __devinit cpuup_callback(stru
 			cpumask_t mask;
 
 			mask = node_to_cpumask(node);
-			spin_lock_irq(&cachep->spinlock);
+			spin_lock(&cachep->spinlock);
 			/* cpu is dead; no one can alloc from it. */
 			nc = cachep->array[cpu];
 			cachep->array[cpu] = NULL;
@@ -1004,7 +1004,7 @@ static int __devinit cpuup_callback(stru
 			if (!l3)
 				goto unlock_cache;
 
-			spin_lock(&l3->list_lock);
+			spin_lock_irq(&l3->list_lock);
 
 			/* Free limit for this kmem_list3 */
 			l3->free_limit -= cachep->batchcount;
@@ -1012,7 +1012,7 @@ static int __devinit cpuup_callback(stru
 				free_block(cachep, nc->entry, nc->avail, node);
 
 			if (!cpus_empty(mask)) {
-				spin_unlock(&l3->list_lock);
+				spin_unlock_irq(&l3->list_lock);
 				goto unlock_cache;
 			}
 
@@ -1031,13 +1031,13 @@ static int __devinit cpuup_callback(stru
 			/* free slabs belonging to this node */
 			if (__node_shrink(cachep, node)) {
 				cachep->nodelists[node] = NULL;
-				spin_unlock(&l3->list_lock);
+				spin_unlock_irq(&l3->list_lock);
 				kfree(l3);
 			} else {
-				spin_unlock(&l3->list_lock);
+				spin_unlock_irq(&l3->list_lock);
 			}
 		      unlock_cache:
-			spin_unlock_irq(&cachep->spinlock);
+			spin_unlock(&cachep->spinlock);
 			kfree(nc);
 		}
 		mutex_unlock(&cache_chain_mutex);
@@ -2021,18 +2021,18 @@ static void drain_cpu_caches(struct kmem
 
 	smp_call_function_all_cpus(do_drain, cachep);
 	check_irq_on();
-	spin_lock_irq(&cachep->spinlock);
+	spin_lock(&cachep->spinlock);
 	for_each_online_node(node) {
 		l3 = cachep->nodelists[node];
 		if (l3) {
-			spin_lock(&l3->list_lock);
+			spin_lock_irq(&l3->list_lock);
 			drain_array_locked(cachep, l3->shared, 1, node);
-			spin_unlock(&l3->list_lock);
+			spin_unlock_irq(&l3->list_lock);
 			if (l3->alien)
 				drain_alien_cache(cachep, l3);
 		}
 	}
-	spin_unlock_irq(&cachep->spinlock);
+	spin_unlock(&cachep->spinlock);
 }
 
 static int __node_shrink(struct kmem_cache *cachep, int node)
@@ -2348,7 +2348,6 @@ static int cache_grow(struct kmem_cache 
 
 	offset *= cachep->colour_off;
 
-	check_irq_off();
 	if (local_flags & __GFP_WAIT)
 		local_irq_enable();
 
@@ -2744,6 +2743,7 @@ static void *__cache_alloc_node(struct k
 	BUG_ON(!l3);
 
       retry:
+	check_irq_off();
 	spin_lock(&l3->list_lock);
 	entry = l3->slabs_partial.next;
 	if (entry == &l3->slabs_partial) {
@@ -3323,11 +3323,11 @@ static int do_tune_cpucache(struct kmem_
 	smp_call_function_all_cpus(do_ccupdate_local, (void *)&new);
 
 	check_irq_on();
-	spin_lock_irq(&cachep->spinlock);
+	spin_lock(&cachep->spinlock);
 	cachep->batchcount = batchcount;
 	cachep->limit = limit;
 	cachep->shared = shared;
-	spin_unlock_irq(&cachep->spinlock);
+	spin_unlock(&cachep->spinlock);
 
 	for_each_online_cpu(i) {
 		struct array_cache *ccold = new.new[i];
@@ -3584,8 +3584,7 @@ static int s_show(struct seq_file *m, vo
 	int node;
 	struct kmem_list3 *l3;
 
-	check_irq_on();
-	spin_lock_irq(&cachep->spinlock);
+	spin_lock(&cachep->spinlock);
 	active_objs = 0;
 	num_slabs = 0;
 	for_each_online_node(node) {
@@ -3593,7 +3592,8 @@ static int s_show(struct seq_file *m, vo
 		if (!l3)
 			continue;
 
-		spin_lock(&l3->list_lock);
+		check_irq_on();
+		spin_lock_irq(&l3->list_lock);
 
 		list_for_each(q, &l3->slabs_full) {
 			slabp = list_entry(q, struct slab, list);
@@ -3620,7 +3620,7 @@ static int s_show(struct seq_file *m, vo
 		free_objects += l3->free_objects;
 		shared_avail += l3->shared->avail;
 
-		spin_unlock(&l3->list_lock);
+		spin_unlock_irq(&l3->list_lock);
 	}
 	num_slabs += active_slabs;
 	num_objs = num_slabs * cachep->num;
@@ -3670,7 +3670,7 @@ static int s_show(struct seq_file *m, vo
 			shrinker_stat_read(cachep->shrinker, nr_freed));
 	}
 	seq_putc(m, '\n');
-	spin_unlock_irq(&cachep->spinlock);
+	spin_unlock(&cachep->spinlock);
 	return 0;
 }
 
@@ -3702,10 +3702,10 @@ static void do_dump_slabp(kmem_cache_t *
 	int node;
 
 	check_irq_on();
-	spin_lock_irq(&cachep->spinlock);
+	spin_lock(&cachep->spinlock);
 	for_each_online_node(node) {
 		struct kmem_list3 *rl3 = cachep->nodelists[node];
-		spin_lock(&rl3->list_lock);
+		spin_lock_irq(&rl3->list_lock);
 
 		list_for_each(q, &rl3->slabs_full) {
 			int i;
@@ -3719,9 +3719,9 @@ static void do_dump_slabp(kmem_cache_t *
 				printk("\n");
 			}
 		}
-		spin_unlock(&rl3->list_lock);
+		spin_unlock_irq(&rl3->list_lock);
 	}
-	spin_unlock_irq(&cachep->spinlock);
+	spin_unlock(&cachep->spinlock);
 #endif
 }
 

  parent reply	other threads:[~2006-02-03 20:56 UTC|newest]

Thread overview: 20+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2006-02-03 20:53 [patch 0/3] NUMA slab locking fixes Ravikiran G Thirumalai
2006-02-03 20:55 ` [patch 1/3] NUMA slab locking fixes -- slab-colour-next fix Ravikiran G Thirumalai
2006-02-03 20:56 ` Ravikiran G Thirumalai [this message]
2006-02-03 20:57 ` [patch 3/3] NUMA slab locking fixes -- slab cpu hotplug fix Ravikiran G Thirumalai
2006-02-03 22:07 ` [patch 0/3] NUMA slab locking fixes Andrew Morton
2006-02-03 23:06   ` Christoph Lameter
2006-02-04  1:08     ` Ravikiran G Thirumalai
2006-02-04  1:15       ` [patch 1/3] NUMA slab locking fixes -- move color_next to l3 Ravikiran G Thirumalai
2006-02-04  1:22       ` [patch 0/3] NUMA slab locking fixes Andrew Morton
2006-02-04  1:28       ` [patch 2/3] NUMA slab locking fixes - move irq disabling from cahep->spinlock to l3 lock Ravikiran G Thirumalai
2006-02-04  9:48         ` Andrew Morton
2006-02-06 22:51           ` Ravikiran G Thirumalai
2006-02-06 23:30             ` Andrew Morton
2006-02-07  0:21               ` Christoph Lameter
2006-02-07  7:36                 ` Pekka J Enberg
2006-02-07  7:50                   ` Ravikiran G Thirumalai
2006-02-07  7:55                     ` Pekka J Enberg
2006-02-04  1:29       ` [patch 3/3] NUMA slab locking fixes -- fix cpu down and up locking Ravikiran G Thirumalai
2006-02-04 10:03         ` Andrew Morton
2006-02-04 10:05           ` Andrew Morton

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20060203205615.GE3653@localhost.localdomain \
    --to=kiran@scalex86.org \
    --cc=akpm@osdl.org \
    --cc=alok.kataria@calsoftinc.com \
    --cc=clameter@engr.sgi.com \
    --cc=linux-kernel@vger.kernel.org \
    --cc=manfred@colorfullife.com \
    --cc=shai@scalex86.org \
    --cc=sonny@burdell.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox