* [PATCH 14/27] ia64: Move holding of vector_lock to __setup_vector_irq()
[not found] <20120601090952.31979.24799.stgit@srivatsabhat.in.ibm.com>
@ 2012-06-01 9:25 ` Srivatsa S. Bhat
2012-06-01 9:25 ` [PATCH 15/27] ia64, smpboot: Use generic SMP booting infrastructure Srivatsa S. Bhat
1 sibling, 0 replies; 2+ messages in thread
From: Srivatsa S. Bhat @ 2012-06-01 9:25 UTC (permalink / raw)
To: tglx, peterz, paulmck
Cc: rusty, mingo, yong.zhang0, akpm, vatsa, rjw, linux-arch,
linux-kernel, srivatsa.bhat, nikunj, Tony Luck, Fenghua Yu,
Andrew Morton, David Howells, Mike Frysinger, linux-ia64
__setup_vector_irq() expects that its caller holds the vector_lock.
As of now there is only one caller - smp_callin(); and acquiring the lock
in smp_callin() around the call to __setup_vector_irq() obstructs the
conversion of ia64 to generic smp booting code. So move the lock acquisition
to __setup_vector_irq() itself (mimicking what x86 does).
Cc: Tony Luck <tony.luck@intel.com>
Cc: Fenghua Yu <fenghua.yu@intel.com>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: David Howells <dhowells@redhat.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Mike Frysinger <vapier@gentoo.org>
Cc: linux-ia64@vger.kernel.org
Signed-off-by: Srivatsa S. Bhat <srivatsa.bhat@linux.vnet.ibm.com>
---
arch/ia64/kernel/irq_ia64.c | 5 +++--
arch/ia64/kernel/smpboot.c | 2 +-
2 files changed, 4 insertions(+), 3 deletions(-)
diff --git a/arch/ia64/kernel/irq_ia64.c b/arch/ia64/kernel/irq_ia64.c
index 5c3e088..6ac99c8 100644
--- a/arch/ia64/kernel/irq_ia64.c
+++ b/arch/ia64/kernel/irq_ia64.c
@@ -241,13 +241,13 @@ reserve_irq_vector (int vector)
}
/*
- * Initialize vector_irq on a new cpu. This function must be called
- * with vector_lock held.
+ * Initialize vector_irq on a new cpu.
*/
void __setup_vector_irq(int cpu)
{
int irq, vector;
+ spin_lock(&vector_lock);
/* Clear vector_irq */
for (vector = 0; vector < IA64_NUM_VECTORS; ++vector)
per_cpu(vector_irq, cpu)[vector] = -1;
@@ -258,6 +258,7 @@ void __setup_vector_irq(int cpu)
vector = irq_to_vector(irq);
per_cpu(vector_irq, cpu)[vector] = irq;
}
+ spin_unlock(&vector_lock);
}
#if defined(CONFIG_SMP) && (defined(CONFIG_IA64_GENERIC) || defined(CONFIG_IA64_DIG))
diff --git a/arch/ia64/kernel/smpboot.c b/arch/ia64/kernel/smpboot.c
index df00a3c..709ce07 100644
--- a/arch/ia64/kernel/smpboot.c
+++ b/arch/ia64/kernel/smpboot.c
@@ -378,10 +378,10 @@ smp_callin (void)
set_numa_node(cpu_to_node_map[cpuid]);
set_numa_mem(local_memory_node(cpu_to_node_map[cpuid]));
- spin_lock(&vector_lock);
/* Setup the per cpu irq handling data structures */
__setup_vector_irq(cpuid);
notify_cpu_starting(cpuid);
+ spin_lock(&vector_lock);
set_cpu_online(cpuid, true);
per_cpu(cpu_state, cpuid) = CPU_ONLINE;
spin_unlock(&vector_lock);
^ permalink raw reply related [flat|nested] 2+ messages in thread
* [PATCH 15/27] ia64, smpboot: Use generic SMP booting infrastructure
[not found] <20120601090952.31979.24799.stgit@srivatsabhat.in.ibm.com>
2012-06-01 9:25 ` [PATCH 14/27] ia64: Move holding of vector_lock to __setup_vector_irq() Srivatsa S. Bhat
@ 2012-06-01 9:25 ` Srivatsa S. Bhat
1 sibling, 0 replies; 2+ messages in thread
From: Srivatsa S. Bhat @ 2012-06-01 9:25 UTC (permalink / raw)
To: tglx, peterz, paulmck
Cc: rusty, mingo, yong.zhang0, akpm, vatsa, rjw, linux-arch,
linux-kernel, srivatsa.bhat, nikunj, Nikunj A. Dadhania,
Fenghua Yu, Andrew Morton, David Howells, Mike Frysinger,
linux-ia64
From: Nikunj A. Dadhania <nikunj@linux.vnet.ibm.com>
Convert ia64 to use the generic framework to boot secondary CPUs.
Notes:
1. ia64 manipulates the cpu_online_mask under vector_lock. So, while
converting over to the generic smp booting code, override arch_vector_lock()
and arch_vector_unlock() appropriately.
2. ia64 needs to enable interrupts before fully completing booting of
secondary CPU (as explicitly mentioned in the comment above the call to
ia64_sync_itc()). Luckily this won't pose much of a problem because it
is in post-online stage (and hence CPU_STARTING notifications have been
sent and cpu_online_mask is setup already) and moreover, we were going to
enable the interrupts shortly anyway.
Signed-off-by: Nikunj A. Dadhania <nikunj@linux.vnet.ibm.com>
Cc: Fenghua Yu <fenghua.yu@intel.com>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: David Howells <dhowells@redhat.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Mike Frysinger <vapier@gentoo.org>
Cc: linux-ia64@vger.kernel.org
Signed-off-by: Srivatsa S. Bhat <srivatsa.bhat@linux.vnet.ibm.com>
---
arch/ia64/kernel/irq_ia64.c | 16 ++++++++++++++++
arch/ia64/kernel/smpboot.c | 40 ++++++++++++++++++++++------------------
2 files changed, 38 insertions(+), 18 deletions(-)
diff --git a/arch/ia64/kernel/irq_ia64.c b/arch/ia64/kernel/irq_ia64.c
index 6ac99c8..6a724f9 100644
--- a/arch/ia64/kernel/irq_ia64.c
+++ b/arch/ia64/kernel/irq_ia64.c
@@ -196,6 +196,22 @@ static void clear_irq_vector(int irq)
spin_unlock_irqrestore(&vector_lock, flags);
}
+/*
+ * We need to hold vector_lock while manipulating cpu_online_mask so that the
+ * set of online cpus does not change while we are assigning vectors to cpus
+ * (assign_irq_vector()). Holding this lock ensures we don't half assign or
+ * remove an irq from a cpu.
+ */
+void arch_vector_lock(void)
+{
+ spin_lock(&vector_lock);
+}
+
+void arch_vector_unlock(void)
+{
+ spin_unlock(&vector_lock);
+}
+
int
ia64_native_assign_irq_vector (int irq)
{
diff --git a/arch/ia64/kernel/smpboot.c b/arch/ia64/kernel/smpboot.c
index 709ce07..0f00dc6 100644
--- a/arch/ia64/kernel/smpboot.c
+++ b/arch/ia64/kernel/smpboot.c
@@ -351,18 +351,10 @@ smp_setup_percpu_timer (void)
static void __cpuinit
smp_callin (void)
{
- int cpuid, phys_id, itc_master;
- struct cpuinfo_ia64 *last_cpuinfo, *this_cpuinfo;
- extern void ia64_init_itm(void);
- extern volatile int time_keeper_id;
-
-#ifdef CONFIG_PERFMON
- extern void pfm_init_percpu(void);
-#endif
+ unsigned int cpuid, phys_id;
cpuid = smp_processor_id();
phys_id = hard_smp_processor_id();
- itc_master = time_keeper_id;
if (cpu_online(cpuid)) {
printk(KERN_ERR "huh, phys CPU#0x%x, CPU#0x%x already present??\n",
@@ -380,11 +372,21 @@ smp_callin (void)
/* Setup the per cpu irq handling data structures */
__setup_vector_irq(cpuid);
- notify_cpu_starting(cpuid);
- spin_lock(&vector_lock);
- set_cpu_online(cpuid, true);
- per_cpu(cpu_state, cpuid) = CPU_ONLINE;
- spin_unlock(&vector_lock);
+}
+
+void __cpuinit __cpu_post_online(void *unused)
+{
+ unsigned int cpuid, itc_master;
+ struct cpuinfo_ia64 *last_cpuinfo, *this_cpuinfo;
+ extern void ia64_init_itm(void);
+ extern volatile int time_keeper_id;
+
+#ifdef CONFIG_PERFMON
+ extern void pfm_init_percpu(void);
+#endif
+
+ cpuid = smp_processor_id();
+ itc_master = time_keeper_id;
smp_setup_percpu_timer();
@@ -442,6 +444,12 @@ smp_callin (void)
int __cpuinit
start_secondary (void *unused)
{
+ smpboot_start_secondary(unused);
+ return 0;
+}
+
+void __cpuinit __cpu_pre_starting(void *unused)
+{
/* Early console may use I/O ports */
ia64_set_kr(IA64_KR_IO_BASE, __pa(ia64_iobase));
#ifndef CONFIG_PRINTK_TIME
@@ -449,11 +457,7 @@ start_secondary (void *unused)
#endif
efi_map_pal_code();
cpu_init();
- preempt_disable();
smp_callin();
-
- cpu_idle();
- return 0;
}
struct pt_regs * __cpuinit idle_regs(struct pt_regs *regs)
^ permalink raw reply related [flat|nested] 2+ messages in thread