public inbox for linux-kernel@vger.kernel.org
 help / color / mirror / Atom feed
* [PATCH] Fix early request_irq
@ 2004-10-29 14:05 Pantelis Antoniou
  2004-10-29 14:16 ` Ingo Molnar
  2004-10-29 14:21 ` Christoph Hellwig
  0 siblings, 2 replies; 8+ messages in thread
From: Pantelis Antoniou @ 2004-10-29 14:05 UTC (permalink / raw)
  To: Ingo Molnar, Tom Rini, Linuxppc-Embedded, Linux Kernel list

[-- Attachment #1: Type: text/plain, Size: 623 bytes --]

Hi there

The recent consolidation of the IRQ code has caused
a number of PPC embedded cpus to stop working.

The problem is that on init_IRQ these platforms call
request_irq very early, which in turn calls kmalloc
without the memory subsystem being initialized.

The following patch fixes it by keeping a small static
array of irqactions just for this purpose.

This is still not enough to get these platforms working
since I crash on the first interrupt, but at least is a
start.

Regards

Pantelis

Signed-off-by: Pantelis Antoniou <panto@intracom.gr>

---------------------------------------------------------------




[-- Attachment #2: early-irq-fix.patch --]
[-- Type: text/x-patch, Size: 2263 bytes --]

--- linux-2.5/kernel/irq/manage.c	2004-10-29 16:39:08.496715752 +0300
+++ linuxppc_2.5-public/kernel/irq/manage.c	2004-10-29 16:44:13.361369280 +0300
@@ -10,6 +10,7 @@
 #include <linux/module.h>
 #include <linux/random.h>
 #include <linux/interrupt.h>
+#include <linux/bitops.h>
 
 #include "internals.h"
 
@@ -144,6 +145,53 @@
 	return !action;
 }
 
+/* we must support request_irqs before mem init */
+
+#define IRQ_CACHE_ENTRIES 8	/* 8 should be enough */
+
+static spinlock_t irqa_lock = SPIN_LOCK_UNLOCKED;
+static unsigned int irqa_cache_map = (1 << IRQ_CACHE_ENTRIES) - 1;
+static struct irqaction irqa_cache[IRQ_CACHE_ENTRIES];
+
+static struct irqaction *irqaction_alloc(int pri)
+{
+	extern int mem_init_done;
+	int i;
+	unsigned long flags;
+	struct irqaction *irqa;
+
+	if (mem_init_done)
+		return kmalloc(sizeof(struct irqaction), pri);
+
+	spin_lock_irqsave(&irqa_lock, flags);
+	i = ffs(irqa_cache_map);
+	if (i > 0 && --i < IRQ_CACHE_ENTRIES) {
+		irqa_cache_map &= ~(1 << i);
+		irqa = irqa_cache + i;
+	} else
+		irqa = NULL;
+	spin_unlock_irqrestore(&irqa_lock, flags);
+
+	return irqa;
+}
+
+static void irqaction_free(void *ptr)
+{
+	struct irqaction *irqa = ptr;
+	unsigned long flags;
+	int i;
+
+	if (irqa < irqa_cache || irqa >= irqa_cache + IRQ_CACHE_ENTRIES) {
+		kfree(ptr);
+		return;
+	}
+
+	spin_lock_irqsave(&irqa_lock, flags);
+	i = irqa_cache - irqa;
+	irqa_cache_map |= 1 << i;
+	spin_unlock_irqrestore(&irqa_lock, flags);
+}
+
 /*
  * Internal function to register an irqaction - typically used to
  * allocate special interrupts that are part of the architecture.
@@ -265,7 +313,7 @@
 
 			/* Make sure it's not being used on another CPU */
 			synchronize_irq(irq);
-			kfree(action);
+			irqaction_free(action);
 			return;
 		}
 		printk(KERN_ERR "Trying to free free IRQ%d\n",irq);
@@ -325,7 +373,7 @@
 	if (!handler)
 		return -EINVAL;
 
-	action = kmalloc(sizeof(struct irqaction), GFP_ATOMIC);
+	action = irqaction_alloc(GFP_ATOMIC);
 	if (!action)
 		return -ENOMEM;
 
@@ -337,10 +385,12 @@
 	action->dev_id = dev_id;
 
 	retval = setup_irq(irq, action);
-	if (retval)
-		kfree(action);
+	if (retval) {
+		irqaction_free(action);
+		return retval;
+	}
 
-	return retval;
+	return 0;
 }
 
 EXPORT_SYMBOL(request_irq);

^ permalink raw reply	[flat|nested] 8+ messages in thread

end of thread, other threads:[~2004-11-02  7:53 UTC | newest]

Thread overview: 8+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2004-10-29 14:05 [PATCH] Fix early request_irq Pantelis Antoniou
2004-10-29 14:16 ` Ingo Molnar
2004-10-29 14:20   ` Pantelis Antoniou
2004-10-29 14:21 ` Christoph Hellwig
2004-10-29 16:11   ` Tom Rini
2004-11-01 17:41     ` Kumar Gala
2004-11-01 19:59       ` Tom Rini
2004-11-02  7:51         ` Ingo Molnar

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox